Index: src/0dev.org/predictor/predictor.go ================================================================== --- src/0dev.org/predictor/predictor.go +++ src/0dev.org/predictor/predictor.go @@ -29,22 +29,21 @@ // // It can buffer data as the predictor mandates 8-byte blocks with a header. // A call with no data will force a flush. func Compressor(writer io.Writer) io.Writer { var cmp compressor - cmp.Writer = iou.SizedWriter(iou.WriterFunc(cmp.compress), 8) cmp.target = writer - return &cmp + return iou.SizedWriter(&cmp, 8) } type compressor struct { context - io.Writer target io.Writer } -func (ctx *compressor) compress(data []byte) (int, error) { +// Note: this method does not implement the full io.Writer's Write() semantics +func (ctx *compressor) Write(data []byte) (int, error) { var ( blockSize int = 8 datalength int = len(data) ) @@ -84,24 +83,23 @@ // Returns an io.Reader implementation that wraps the provided io.Reader // and decompresses data according to the predictor algorithm func Decompressor(reader io.Reader) io.Reader { var dcmp decompressor - dcmp.Reader = iou.SizedReader(iou.ReaderFunc(dcmp.decompress), 8) dcmp.source = reader - dcmp.input = make([]byte, 0, 8) - return &dcmp + dcmp.input = make([]byte, 8) + return iou.SizedReader(&dcmp, 8) } type decompressor struct { context - io.Reader source io.Reader input []byte } -func (ctx *decompressor) decompress(output []byte) (int, error) { +// Note: this method does not implement the full io.Reader's Read() semantics +func (ctx *decompressor) Read(output []byte) (int, error) { var ( err error flags, predicted byte rc, total, copied int ) @@ -114,13 +112,11 @@ return total, err } else if rc == 0 { return total, err } - // Extend the buffer, copy the prediction header - // and calculate the number of subsequent bytes to read - ctx.input = ctx.input[:8] + // Copy the prediction header and calculate the number of subsequent bytes to read flags = ctx.input[0] predicted = bits.Hamming(flags) // Read the non-predicted bytes and place them in the end of the buffer rc, err = ctx.source.Read(ctx.input[predicted:]) @@ -154,16 +150,13 @@ // Copy the decompressed data to the output and accumulate the count copied = copy(output, ctx.input[:rc]) total += copied - // Clear the buffer - ctx.input = ctx.input[:0] - // Loop for another pass if there is available space in the output output = output[copied:] if len(output) > 0 && err == nil { goto readHeader } return total, err }