Index: src/0dev.org/predictor/predictor.go ================================================================== --- src/0dev.org/predictor/predictor.go +++ src/0dev.org/predictor/predictor.go @@ -21,16 +21,10 @@ // at the expense of time. func (ctx *context) update(val byte) { ctx.hash = (ctx.hash << 4) ^ uint16(val) } -type compressor func([]byte) error - -func (w compressor) Write(data []byte) (int, error) { - return len(data), w(data) -} - // Returns an io.Writer implementation that wraps the provided io.Writer // and compresses data according to the predictor algorithm // // It can buffer data as the predictor mandates 8-byte blocks with a header. // A call with no data will force a flush. @@ -37,56 +31,58 @@ func Compressor(writer io.Writer) io.Writer { var ctx context ctx.input = make([]byte, 0, 8) // Forward declaration as it is required for recursion - var write compressor + var write iou.WriterFunc - write = func(data []byte) error { + write = func(data []byte) (int, error) { var ( blockSize int = 8 bufferLength int = len(ctx.input) + datalength int = len(data) ) // Force a flush if we are called with no data to write - if len(data) == 0 { + if datalength == 0 { // Nothing to flush if the buffer is empty though if len(ctx.input) == 0 { - return nil + return 0, nil } // We can't have more than 7 bytes in the buffer so this is safe - data, blockSize, bufferLength = ctx.input, len(ctx.input), 0 + data, datalength = ctx.input, len(ctx.input) + blockSize, bufferLength = datalength, 0 } // Check if there are pending bytes in the buffer - if len(data) < blockSize || bufferLength > 0 { + if datalength < blockSize || bufferLength > 0 { // If the current buffer + new data can fit into a block - if (len(data) + bufferLength) <= blockSize { + if (datalength + bufferLength) <= blockSize { ctx.input = append(ctx.input, data...) // Flush the block if the buffer fills it if len(ctx.input) == blockSize { return write(nil) } // ... otherwise just return - return nil + return datalength, nil } // The current buffer + new data overflow the block size // Complete the block, flush it ... ctx.input = append(ctx.input, data[:blockSize-bufferLength]...) - if err := write(nil); err != nil { - return err + if c, err := write(nil); err != nil { + return c, err } // ... and stage the rest of the data in the buffer ctx.input = append(ctx.input, data[blockSize-bufferLength:]...) - return nil + return datalength, nil } var buf []byte = make([]byte, 1, blockSize+1) - for block := 0; block < len(data)/blockSize; block++ { + for block := 0; block < datalength/blockSize; block++ { for i := 0; i < blockSize; i++ { var current byte = data[(block*blockSize)+i] if ctx.table[ctx.hash] == current { // Guess was right - don't output buf[0] |= 1 << uint(i) @@ -96,26 +92,26 @@ buf = append(buf, current) } ctx.update(current) } - if _, err := writer.Write(buf); err != nil { - return err + if c, err := writer.Write(buf); err != nil { + return (block * blockSize) + c, err } // Reset the flags and buffer for the next iteration buf, buf[0] = buf[:1], 0 } - if remaining := len(data) % blockSize; remaining > 0 { + if remaining := datalength % blockSize; remaining > 0 { ctx.input = ctx.input[:remaining] - copy(ctx.input, data[len(data)-remaining:]) + copy(ctx.input, data[datalength-remaining:]) } else { ctx.input = ctx.input[:0] } - return nil + return datalength, nil } return write }