Check-in [723ffeb1fd]
Overview
Comment:[predictor] Removed the buffer from the context struct, allocate the input slice buffer on creation with make.
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | trunk
Files: files | file ages | folders
SHA1:723ffeb1fd5d01e982acb9a401e720f5c07843ab
User & Date: spaskalev on 2014-12-20 11:52:46
Other Links: manifest | tags
Context
2014-12-20
13:04
Decompressor might loose part of the underlying buffer array by reslicing, fixed by copy check-in: d516e7425d user: spaskalev tags: trunk
11:52
[predictor] Removed the buffer from the context struct, allocate the input slice buffer on creation with make. check-in: 723ffeb1fd user: spaskalev tags: trunk
2014-12-19
21:54
Implemented commands/pdc using predictor. Made predictor's Compressor(...) return an io.Writer. check-in: c9f3a59cb6 user: spaskalev tags: trunk
Changes

Modified src/0dev.org/predictor/predictor.go from [c65cc5c256] to [b8ee77a9c4].

3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
..
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
..
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
...
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
...
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
...
185
186
187
188
189
190
191
192
193
194
195
196
197
package predictor

import (
	"io"
)

type context struct {
	table  [1 << 16]byte
	buffer [1 << 3]byte
	input  []byte
	hash   uint16
}

type compressor func([]byte) error

func (w compressor) Write(data []byte) (int, error) {
	return len(data), w(data)
}
................................................................................
// Returns an io.Writer implementation that wraps the provided io.Writer
// and compresses data according to the predictor algorithm
//
// It can buffer data as the predictor mandates 8-byte blocks with a header.
// A call with no data will force a flush.
func Compressor(writer io.Writer) io.Writer {
	var ctx context
	ctx.input = ctx.buffer[:0]

	// Forward declaration as it is required for recursion
	var write compressor

	write = func(data []byte) error {
		var (
			blockSize    int = 8
................................................................................
			}

			// Reset the flags and buffer for the next iteration
			buf, buf[0] = buf[:1], 0
		}

		if remaining := len(data) % blockSize; remaining > 0 {
			ctx.input = ctx.buffer[:remaining]
			copy(ctx.input, data[len(data)-remaining:])
		} else {
			ctx.input = ctx.buffer[:0]
		}

		return nil
	}

	return write
}
................................................................................
	return r(output)
}

// Returns an io.Reader implementation that wraps the provided io.Reader
// and decompresses data according to the predictor algorithm
func Decompressor(wrapped io.Reader) io.Reader {
	var ctx context
	ctx.input = ctx.buffer[:0]

	return decompressor(func(output []byte) (int, error) {
		var (
			err       error
			flags     byte
			readCount int
		)
................................................................................
		}

		// This is single-iteration only but it is fine according to io.Reader's contract ?!
		// TODO - read all bytes from a block based on the hamming weight of the flag
		// and just shuffle them for predictions instead of bite-sized reads ;)

		// Read the flags
		ctx.input = ctx.buffer[:1]
		readCount, err = wrapped.Read(ctx.input)
		if readCount == 0 || err != nil {
			return readCount, err
		}

		flags = ctx.input[0]
		ctx.input = ctx.buffer[:8]

		var i uint = 0
		for ; i < 8; i++ {
			if flags&(1<<i) > 0 {
				// Guess was right
				ctx.input[i] = ctx.table[ctx.hash]
			} else {
................................................................................

		readCount = copy(output, ctx.input[:i])

		// Place any remaining bytes in the buffer
		if uint(readCount) < i {
			ctx.input = ctx.input[readCount:i]
		} else {
			ctx.input = ctx.buffer[:0]
		}

		return readCount, nil
	})
}







|
<
|
|







 







|







 







|


|







 







|







 







|






|







 







|





3
4
5
6
7
8
9
10

11
12
13
14
15
16
17
18
19
..
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
..
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
...
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
...
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
...
184
185
186
187
188
189
190
191
192
193
194
195
196
package predictor

import (
	"io"
)

type context struct {
	table [1 << 16]byte

	input []byte
	hash  uint16
}

type compressor func([]byte) error

func (w compressor) Write(data []byte) (int, error) {
	return len(data), w(data)
}
................................................................................
// Returns an io.Writer implementation that wraps the provided io.Writer
// and compresses data according to the predictor algorithm
//
// It can buffer data as the predictor mandates 8-byte blocks with a header.
// A call with no data will force a flush.
func Compressor(writer io.Writer) io.Writer {
	var ctx context
	ctx.input = make([]byte, 0, 8)

	// Forward declaration as it is required for recursion
	var write compressor

	write = func(data []byte) error {
		var (
			blockSize    int = 8
................................................................................
			}

			// Reset the flags and buffer for the next iteration
			buf, buf[0] = buf[:1], 0
		}

		if remaining := len(data) % blockSize; remaining > 0 {
			ctx.input = ctx.input[:remaining]
			copy(ctx.input, data[len(data)-remaining:])
		} else {
			ctx.input = ctx.input[:0]
		}

		return nil
	}

	return write
}
................................................................................
	return r(output)
}

// Returns an io.Reader implementation that wraps the provided io.Reader
// and decompresses data according to the predictor algorithm
func Decompressor(wrapped io.Reader) io.Reader {
	var ctx context
	ctx.input = make([]byte, 0, 8)

	return decompressor(func(output []byte) (int, error) {
		var (
			err       error
			flags     byte
			readCount int
		)
................................................................................
		}

		// This is single-iteration only but it is fine according to io.Reader's contract ?!
		// TODO - read all bytes from a block based on the hamming weight of the flag
		// and just shuffle them for predictions instead of bite-sized reads ;)

		// Read the flags
		ctx.input = ctx.input[:1]
		readCount, err = wrapped.Read(ctx.input)
		if readCount == 0 || err != nil {
			return readCount, err
		}

		flags = ctx.input[0]
		ctx.input = ctx.input[:8]

		var i uint = 0
		for ; i < 8; i++ {
			if flags&(1<<i) > 0 {
				// Guess was right
				ctx.input[i] = ctx.table[ctx.hash]
			} else {
................................................................................

		readCount = copy(output, ctx.input[:i])

		// Place any remaining bytes in the buffer
		if uint(readCount) < i {
			ctx.input = ctx.input[readCount:i]
		} else {
			ctx.input = ctx.input[:0]
		}

		return readCount, nil
	})
}