Check-in [4dfcff962c]
Overview
SHA1:4dfcff962c763b560a5af8101c2f3f3ad059ae8c
Date: 2014-12-25 00:55:43
User: spaskalev
Comment:Predictor's compressor and decompressor structures now implement io.Writer/io.Reader in order to deal away with function indirection but they do not follow the required semantics. Those are provided by the SizedWriter/SizedReader wrappers returned by the constructor functions.
Timelines: family | ancestors | descendants | both | trunk
Downloads: Tarball | ZIP archive
Other Links: files | file ages | folders | manifest
Tags And Properties
Context
2014-12-25
01:25
[2b1ed8e45e] Removed pdc's output buffer when decompressing as io.Copy uses a sufficiently-large buffer internally. (user: spaskalev, tags: trunk)
00:55
[4dfcff962c] Predictor's compressor and decompressor structures now implement io.Writer/io.Reader in order to deal away with function indirection but they do not follow the required semantics. Those are provided by the SizedWriter/SizedReader wrappers returned by the constructor functions. (user: spaskalev, tags: trunk)
00:43
[50507bd510] Extracted predictor's compressor and decompressor code into separate structs that embed Sized{Writer,Reader} (user: spaskalev, tags: trunk)
Changes

Modified src/0dev.org/predictor/predictor.go from [cb877636af] to [3161320c11].

27
28
29
30
31
32
33
34
35
36

37
38
39
40
41
42
43
44

45
46
47
48
49
50
51
52
..
82
83
84
85
86
87
88
89
90
91
92

93
94
95
96
97
98
99
100
101

102
103
104
105
106
107
108
109
...
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
...
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
// Returns an io.Writer implementation that wraps the provided io.Writer
// and compresses data according to the predictor algorithm
//
// It can buffer data as the predictor mandates 8-byte blocks with a header.
// A call with no data will force a flush.
func Compressor(writer io.Writer) io.Writer {
	var cmp compressor
	cmp.Writer = iou.SizedWriter(iou.WriterFunc(cmp.compress), 8)
	cmp.target = writer
	return &cmp

}

type compressor struct {
	context
	io.Writer
	target io.Writer
}


func (ctx *compressor) compress(data []byte) (int, error) {
	var (
		blockSize  int = 8
		datalength int = len(data)
	)

	if datalength == 0 {
		return 0, nil
................................................................................
	return datalength, nil
}

// Returns an io.Reader implementation that wraps the provided io.Reader
// and decompresses data according to the predictor algorithm
func Decompressor(reader io.Reader) io.Reader {
	var dcmp decompressor
	dcmp.Reader = iou.SizedReader(iou.ReaderFunc(dcmp.decompress), 8)
	dcmp.source = reader
	dcmp.input = make([]byte, 0, 8)
	return &dcmp

}

type decompressor struct {
	context
	io.Reader
	source io.Reader
	input  []byte
}


func (ctx *decompressor) decompress(output []byte) (int, error) {
	var (
		err               error
		flags, predicted  byte
		rc, total, copied int
	)

	// Read the next prediction header
................................................................................
	// Fail on error unless it is EOF
	if err != nil && err != io.EOF {
		return total, err
	} else if rc == 0 {
		return total, err
	}

	// Extend the buffer, copy the prediction header
	//  and calculate the number of subsequent bytes to read
	ctx.input = ctx.input[:8]
	flags = ctx.input[0]
	predicted = bits.Hamming(flags)

	// Read the non-predicted bytes and place them in the end of the buffer
	rc, err = ctx.source.Read(ctx.input[predicted:])
retryData:
	if rc < int(8-predicted) && err == nil {
................................................................................
		ctx.update(ctx.input[i])
	}

	// Copy the decompressed data to the output and accumulate the count
	copied = copy(output, ctx.input[:rc])
	total += copied

	// Clear the buffer
	ctx.input = ctx.input[:0]

	// Loop for another pass if there is available space in the output
	output = output[copied:]
	if len(output) > 0 && err == nil {
		goto readHeader
	}

	return total, err
}







<

<
>




<



>
|







 







<

|
<
>




<




>
|







 







<
|
<







 







<
<
<








27
28
29
30
31
32
33

34

35
36
37
38
39

40
41
42
43
44
45
46
47
48
49
50
51
..
81
82
83
84
85
86
87

88
89

90
91
92
93
94

95
96
97
98
99
100
101
102
103
104
105
106
107
...
110
111
112
113
114
115
116

117

118
119
120
121
122
123
124
...
148
149
150
151
152
153
154



155
156
157
158
159
160
161
162
// Returns an io.Writer implementation that wraps the provided io.Writer
// and compresses data according to the predictor algorithm
//
// It can buffer data as the predictor mandates 8-byte blocks with a header.
// A call with no data will force a flush.
func Compressor(writer io.Writer) io.Writer {
	var cmp compressor

	cmp.target = writer

	return iou.SizedWriter(&cmp, 8)
}

type compressor struct {
	context

	target io.Writer
}

// Note: this method does not implement the full io.Writer's Write() semantics
func (ctx *compressor) Write(data []byte) (int, error) {
	var (
		blockSize  int = 8
		datalength int = len(data)
	)

	if datalength == 0 {
		return 0, nil
................................................................................
	return datalength, nil
}

// Returns an io.Reader implementation that wraps the provided io.Reader
// and decompresses data according to the predictor algorithm
func Decompressor(reader io.Reader) io.Reader {
	var dcmp decompressor

	dcmp.source = reader
	dcmp.input = make([]byte, 8)

	return iou.SizedReader(&dcmp, 8)
}

type decompressor struct {
	context

	source io.Reader
	input  []byte
}

// Note: this method does not implement the full io.Reader's Read() semantics
func (ctx *decompressor) Read(output []byte) (int, error) {
	var (
		err               error
		flags, predicted  byte
		rc, total, copied int
	)

	// Read the next prediction header
................................................................................
	// Fail on error unless it is EOF
	if err != nil && err != io.EOF {
		return total, err
	} else if rc == 0 {
		return total, err
	}


	// Copy the prediction header and calculate the number of subsequent bytes to read

	flags = ctx.input[0]
	predicted = bits.Hamming(flags)

	// Read the non-predicted bytes and place them in the end of the buffer
	rc, err = ctx.source.Read(ctx.input[predicted:])
retryData:
	if rc < int(8-predicted) && err == nil {
................................................................................
		ctx.update(ctx.input[i])
	}

	// Copy the decompressed data to the output and accumulate the count
	copied = copy(output, ctx.input[:rc])
	total += copied




	// Loop for another pass if there is available space in the output
	output = output[copied:]
	if len(output) > 0 && err == nil {
		goto readHeader
	}

	return total, err
}