Check-in [c62581c2a6]
Overview
Comment:Removed the compressor alias type from predictor, use ioutil.WriterFunc
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | trunk
Files: files | file ages | folders
SHA1: c62581c2a644cab7abdb5e728c73cd013f9fd454
User & Date: spaskalev on 2014-12-23 09:32:00
Other Links: manifest | tags
Context
2014-12-23
10:38
Added MinReader to ioutils, CC at 100% check-in: 47b221d5b4 user: spaskalev tags: trunk
09:32
Removed the compressor alias type from predictor, use ioutil.WriterFunc check-in: c62581c2a6 user: spaskalev tags: trunk
08:15
Removed the decompressor alias type from predictor, use ioutil.ReaderFunc check-in: 2b049247ed user: spaskalev tags: trunk
Changes

Modified src/0dev.org/predictor/predictor.go from [4bb1d52139] to [7a1990432a].

    19     19   // characters which will be used to index the guess table.
    20     20   // A better hash function would result in additional compression,
    21     21   // at the expense of time.
    22     22   func (ctx *context) update(val byte) {
    23     23   	ctx.hash = (ctx.hash << 4) ^ uint16(val)
    24     24   }
    25     25   
    26         -type compressor func([]byte) error
    27         -
    28         -func (w compressor) Write(data []byte) (int, error) {
    29         -	return len(data), w(data)
    30         -}
    31         -
    32     26   // Returns an io.Writer implementation that wraps the provided io.Writer
    33     27   // and compresses data according to the predictor algorithm
    34     28   //
    35     29   // It can buffer data as the predictor mandates 8-byte blocks with a header.
    36     30   // A call with no data will force a flush.
    37     31   func Compressor(writer io.Writer) io.Writer {
    38     32   	var ctx context
    39     33   	ctx.input = make([]byte, 0, 8)
    40     34   
    41     35   	// Forward declaration as it is required for recursion
    42         -	var write compressor
           36  +	var write iou.WriterFunc
    43     37   
    44         -	write = func(data []byte) error {
           38  +	write = func(data []byte) (int, error) {
    45     39   		var (
    46     40   			blockSize    int = 8
    47     41   			bufferLength int = len(ctx.input)
           42  +			datalength   int = len(data)
    48     43   		)
    49     44   
    50     45   		// Force a flush if we are called with no data to write
    51         -		if len(data) == 0 {
           46  +		if datalength == 0 {
    52     47   			// Nothing to flush if the buffer is empty though
    53     48   			if len(ctx.input) == 0 {
    54         -				return nil
           49  +				return 0, nil
    55     50   			}
    56     51   			// We can't have more than 7 bytes in the buffer so this is safe
    57         -			data, blockSize, bufferLength = ctx.input, len(ctx.input), 0
           52  +			data, datalength = ctx.input, len(ctx.input)
           53  +			blockSize, bufferLength = datalength, 0
    58     54   		}
    59     55   
    60     56   		// Check if there are pending bytes in the buffer
    61         -		if len(data) < blockSize || bufferLength > 0 {
           57  +		if datalength < blockSize || bufferLength > 0 {
    62     58   
    63     59   			// If the current buffer + new data can fit into a block
    64         -			if (len(data) + bufferLength) <= blockSize {
           60  +			if (datalength + bufferLength) <= blockSize {
    65     61   				ctx.input = append(ctx.input, data...)
    66     62   
    67     63   				// Flush the block if the buffer fills it
    68     64   				if len(ctx.input) == blockSize {
    69     65   					return write(nil)
    70     66   				}
    71     67   				// ... otherwise just return
    72         -				return nil
           68  +				return datalength, nil
    73     69   			}
    74     70   
    75     71   			// The current buffer + new data overflow the block size
    76     72   			// Complete the block, flush it ...
    77     73   			ctx.input = append(ctx.input, data[:blockSize-bufferLength]...)
    78         -			if err := write(nil); err != nil {
    79         -				return err
           74  +			if c, err := write(nil); err != nil {
           75  +				return c, err
    80     76   			}
    81     77   			// ... and stage the rest of the data in the buffer
    82     78   			ctx.input = append(ctx.input, data[blockSize-bufferLength:]...)
    83         -			return nil
           79  +			return datalength, nil
    84     80   		}
    85     81   
    86     82   		var buf []byte = make([]byte, 1, blockSize+1)
    87         -		for block := 0; block < len(data)/blockSize; block++ {
           83  +		for block := 0; block < datalength/blockSize; block++ {
    88     84   			for i := 0; i < blockSize; i++ {
    89     85   				var current byte = data[(block*blockSize)+i]
    90     86   				if ctx.table[ctx.hash] == current {
    91     87   					// Guess was right - don't output
    92     88   					buf[0] |= 1 << uint(i)
    93     89   				} else {
    94     90   					// Guess was wrong, output char
    95     91   					ctx.table[ctx.hash] = current
    96     92   					buf = append(buf, current)
    97     93   				}
    98     94   				ctx.update(current)
    99     95   			}
   100     96   
   101         -			if _, err := writer.Write(buf); err != nil {
   102         -				return err
           97  +			if c, err := writer.Write(buf); err != nil {
           98  +				return (block * blockSize) + c, err
   103     99   			}
   104    100   
   105    101   			// Reset the flags and buffer for the next iteration
   106    102   			buf, buf[0] = buf[:1], 0
   107    103   		}
   108    104   
   109         -		if remaining := len(data) % blockSize; remaining > 0 {
          105  +		if remaining := datalength % blockSize; remaining > 0 {
   110    106   			ctx.input = ctx.input[:remaining]
   111         -			copy(ctx.input, data[len(data)-remaining:])
          107  +			copy(ctx.input, data[datalength-remaining:])
   112    108   		} else {
   113    109   			ctx.input = ctx.input[:0]
   114    110   		}
   115    111   
   116         -		return nil
          112  +		return datalength, nil
   117    113   	}
   118    114   
   119    115   	return write
   120    116   }
   121    117   
   122    118   // Returns an io.Reader implementation that wraps the provided io.Reader
   123    119   // and decompresses data according to the predictor algorithm