Overview
Comment: | Predictor's compressor and decompressor structures now implement io.Writer/io.Reader in order to deal away with function indirection but they do not follow the required semantics. Those are provided by the SizedWriter/SizedReader wrappers returned by the constructor functions. |
---|---|
Downloads: | Tarball | ZIP archive | SQL archive |
Timelines: | family | ancestors | descendants | both | trunk |
Files: | files | file ages | folders |
SHA1: |
4dfcff962c763b560a5af8101c2f3f3a |
User & Date: | spaskalev on 2014-12-25 00:55:43 |
Other Links: | manifest | tags |
Context
2014-12-25
| ||
01:25 | Removed pdc's output buffer when decompressing as io.Copy uses a sufficiently-large buffer internally. check-in: 2b1ed8e45e user: spaskalev tags: trunk | |
00:55 | Predictor's compressor and decompressor structures now implement io.Writer/io.Reader in order to deal away with function indirection but they do not follow the required semantics. Those are provided by the SizedWriter/SizedReader wrappers returned by the constructor functions. check-in: 4dfcff962c user: spaskalev tags: trunk | |
00:43 | Extracted predictor's compressor and decompressor code into separate structs that embed Sized{Writer,Reader} check-in: 50507bd510 user: spaskalev tags: trunk | |
Changes
Modified src/0dev.org/predictor/predictor.go from [cb877636af] to [3161320c11].
︙ | ︙ | |||
27 28 29 30 31 32 33 | // Returns an io.Writer implementation that wraps the provided io.Writer // and compresses data according to the predictor algorithm // // It can buffer data as the predictor mandates 8-byte blocks with a header. // A call with no data will force a flush. func Compressor(writer io.Writer) io.Writer { var cmp compressor | < | < > | | 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | // Returns an io.Writer implementation that wraps the provided io.Writer // and compresses data according to the predictor algorithm // // It can buffer data as the predictor mandates 8-byte blocks with a header. // A call with no data will force a flush. func Compressor(writer io.Writer) io.Writer { var cmp compressor cmp.target = writer return iou.SizedWriter(&cmp, 8) } type compressor struct { context target io.Writer } // Note: this method does not implement the full io.Writer's Write() semantics func (ctx *compressor) Write(data []byte) (int, error) { var ( blockSize int = 8 datalength int = len(data) ) if datalength == 0 { return 0, nil |
︙ | ︙ | |||
82 83 84 85 86 87 88 | return datalength, nil } // Returns an io.Reader implementation that wraps the provided io.Reader // and decompresses data according to the predictor algorithm func Decompressor(reader io.Reader) io.Reader { var dcmp decompressor | < | | < > | < | < | 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 | return datalength, nil } // Returns an io.Reader implementation that wraps the provided io.Reader // and decompresses data according to the predictor algorithm func Decompressor(reader io.Reader) io.Reader { var dcmp decompressor dcmp.source = reader dcmp.input = make([]byte, 8) return iou.SizedReader(&dcmp, 8) } type decompressor struct { context source io.Reader input []byte } // Note: this method does not implement the full io.Reader's Read() semantics func (ctx *decompressor) Read(output []byte) (int, error) { var ( err error flags, predicted byte rc, total, copied int ) // Read the next prediction header readHeader: rc, err = ctx.source.Read(ctx.input[:1]) // Fail on error unless it is EOF if err != nil && err != io.EOF { return total, err } else if rc == 0 { return total, err } // Copy the prediction header and calculate the number of subsequent bytes to read flags = ctx.input[0] predicted = bits.Hamming(flags) // Read the non-predicted bytes and place them in the end of the buffer rc, err = ctx.source.Read(ctx.input[predicted:]) retryData: if rc < int(8-predicted) && err == nil { |
︙ | ︙ | |||
152 153 154 155 156 157 158 | ctx.update(ctx.input[i]) } // Copy the decompressed data to the output and accumulate the count copied = copy(output, ctx.input[:rc]) total += copied | < < < | 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 | ctx.update(ctx.input[i]) } // Copy the decompressed data to the output and accumulate the count copied = copy(output, ctx.input[:rc]) total += copied // Loop for another pass if there is available space in the output output = output[copied:] if len(output) > 0 && err == nil { goto readHeader } return total, err } |