Check-in [723ffeb1fd]
Overview
Comment:[predictor] Removed the buffer from the context struct, allocate the input slice buffer on creation with make.
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | trunk
Files: files | file ages | folders
SHA1: 723ffeb1fd5d01e982acb9a401e720f5c07843ab
User & Date: spaskalev on 2014-12-20 11:52:46
Other Links: manifest | tags
Context
2014-12-20
13:04
Decompressor might loose part of the underlying buffer array by reslicing, fixed by copy check-in: d516e7425d user: spaskalev tags: trunk
11:52
[predictor] Removed the buffer from the context struct, allocate the input slice buffer on creation with make. check-in: 723ffeb1fd user: spaskalev tags: trunk
2014-12-19
21:54
Implemented commands/pdc using predictor. Made predictor's Compressor(...) return an io.Writer. check-in: c9f3a59cb6 user: spaskalev tags: trunk
Changes

Modified src/0dev.org/predictor/predictor.go from [c65cc5c256] to [b8ee77a9c4].

     3      3   package predictor
     4      4   
     5      5   import (
     6      6   	"io"
     7      7   )
     8      8   
     9      9   type context struct {
    10         -	table  [1 << 16]byte
    11         -	buffer [1 << 3]byte
    12         -	input  []byte
    13         -	hash   uint16
           10  +	table [1 << 16]byte
           11  +	input []byte
           12  +	hash  uint16
    14     13   }
    15     14   
    16     15   type compressor func([]byte) error
    17     16   
    18     17   func (w compressor) Write(data []byte) (int, error) {
    19     18   	return len(data), w(data)
    20     19   }
................................................................................
    22     21   // Returns an io.Writer implementation that wraps the provided io.Writer
    23     22   // and compresses data according to the predictor algorithm
    24     23   //
    25     24   // It can buffer data as the predictor mandates 8-byte blocks with a header.
    26     25   // A call with no data will force a flush.
    27     26   func Compressor(writer io.Writer) io.Writer {
    28     27   	var ctx context
    29         -	ctx.input = ctx.buffer[:0]
           28  +	ctx.input = make([]byte, 0, 8)
    30     29   
    31     30   	// Forward declaration as it is required for recursion
    32     31   	var write compressor
    33     32   
    34     33   	write = func(data []byte) error {
    35     34   		var (
    36     35   			blockSize    int = 8
................................................................................
    94     93   			}
    95     94   
    96     95   			// Reset the flags and buffer for the next iteration
    97     96   			buf, buf[0] = buf[:1], 0
    98     97   		}
    99     98   
   100     99   		if remaining := len(data) % blockSize; remaining > 0 {
   101         -			ctx.input = ctx.buffer[:remaining]
          100  +			ctx.input = ctx.input[:remaining]
   102    101   			copy(ctx.input, data[len(data)-remaining:])
   103    102   		} else {
   104         -			ctx.input = ctx.buffer[:0]
          103  +			ctx.input = ctx.input[:0]
   105    104   		}
   106    105   
   107    106   		return nil
   108    107   	}
   109    108   
   110    109   	return write
   111    110   }
................................................................................
   118    117   	return r(output)
   119    118   }
   120    119   
   121    120   // Returns an io.Reader implementation that wraps the provided io.Reader
   122    121   // and decompresses data according to the predictor algorithm
   123    122   func Decompressor(wrapped io.Reader) io.Reader {
   124    123   	var ctx context
   125         -	ctx.input = ctx.buffer[:0]
          124  +	ctx.input = make([]byte, 0, 8)
   126    125   
   127    126   	return decompressor(func(output []byte) (int, error) {
   128    127   		var (
   129    128   			err       error
   130    129   			flags     byte
   131    130   			readCount int
   132    131   		)
................................................................................
   144    143   		}
   145    144   
   146    145   		// This is single-iteration only but it is fine according to io.Reader's contract ?!
   147    146   		// TODO - read all bytes from a block based on the hamming weight of the flag
   148    147   		// and just shuffle them for predictions instead of bite-sized reads ;)
   149    148   
   150    149   		// Read the flags
   151         -		ctx.input = ctx.buffer[:1]
          150  +		ctx.input = ctx.input[:1]
   152    151   		readCount, err = wrapped.Read(ctx.input)
   153    152   		if readCount == 0 || err != nil {
   154    153   			return readCount, err
   155    154   		}
   156    155   
   157    156   		flags = ctx.input[0]
   158         -		ctx.input = ctx.buffer[:8]
          157  +		ctx.input = ctx.input[:8]
   159    158   
   160    159   		var i uint = 0
   161    160   		for ; i < 8; i++ {
   162    161   			if flags&(1<<i) > 0 {
   163    162   				// Guess was right
   164    163   				ctx.input[i] = ctx.table[ctx.hash]
   165    164   			} else {
................................................................................
   185    184   
   186    185   		readCount = copy(output, ctx.input[:i])
   187    186   
   188    187   		// Place any remaining bytes in the buffer
   189    188   		if uint(readCount) < i {
   190    189   			ctx.input = ctx.input[readCount:i]
   191    190   		} else {
   192         -			ctx.input = ctx.buffer[:0]
          191  +			ctx.input = ctx.input[:0]
   193    192   		}
   194    193   
   195    194   		return readCount, nil
   196    195   	})
   197    196   }