Overview
Comment: | Renamed MinReader to BlockReader. The later is now used by predictor's decompressor to simplify the code and deal away with the need for internal buffering. |
---|---|
Downloads: | Tarball | ZIP archive | SQL archive |
Timelines: | family | ancestors | descendants | both | trunk |
Files: | files | file ages | folders |
SHA1: |
38f8e62c81fb3daca174e4a8e6c6a71b |
User & Date: | spaskalev on 2014-12-23 18:52:46 |
Other Links: | manifest | tags |
Context
2014-12-23
| ||
19:18 | Fixing ioutil tests to compile :) check-in: b0ff11dfcd user: spaskalev tags: trunk | |
18:52 | Renamed MinReader to BlockReader. The later is now used by predictor's decompressor to simplify the code and deal away with the need for internal buffering. check-in: 38f8e62c81 user: spaskalev tags: trunk | |
18:39 | Additional fixes and code simplification for MinReader check-in: 70896f73e9 user: spaskalev tags: trunk | |
Changes
Modified src/0dev.org/ioutil/ioutil.go from [3d22f83cbe] to [f63ac1dff8].
1
2
3
4
5
6
7
8
9
10
11
12
..
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
|
// Package ioutil contains various constructs for io operations package ioutil import ( //"fmt" "io" ) // An function alias type that implements io.Writer type WriterFunc func([]byte) (int, error) // Delegates the call to the WriterFunc while implementing io.Writer ................................................................................ // Delegates the call to the WriterFunc while implementing io.Reader func (r ReaderFunc) Read(b []byte) (int, error) { return r(b) } // Returns a reader that will delegate calls to Read(...) while ensuring // that the output buffer will never be smaller than the required size func MinReader(reader io.Reader, size int) io.Reader { var buffer []byte = make([]byte, 0, size) return ReaderFunc(func(output []byte) (int, error) { var ( readCount int err error ) start: //fmt.Println("Requesting read with length ", len(output), "buffer's length is ", len(buffer)) // Reply with the buffered data if there is any if len(buffer) > 0 { readCount = copy(output, buffer) // Advance the data in the buffer buffer = buffer[:copy(buffer, buffer[readCount:])] //fmt.Println("After buffer read - buffer lenght is", len(buffer)) if len(buffer) == 0 { return readCount, err } // Do not propagate an error until the buffer is exhausted return readCount, nil } // Delegate if the buffer is empty and the destination buffer is large enough if len(output) >= size { //fmt.Println("Delegating read for output length ", len(output), " and size ", size) return reader.Read(output[:(len(output)/size)*size]) } // Perform a read into the buffer readCount, err = reader.Read(buffer[:size]) // Size the buffer down to the read data size and restart buffer = buffer[:readCount] //fmt.Println("Read into buffer: ", len(buffer), "bytes") if len(buffer) > 0 { goto start } return 0, err }) } |
<
>
|
<
<
|
<
<
|
>
<
<
<
>
>
|
1
2
3
4
5
6
7
8
9
10
11
..
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
|
// Package ioutil contains various constructs for io operations package ioutil import ( "io" ) // An function alias type that implements io.Writer type WriterFunc func([]byte) (int, error) // Delegates the call to the WriterFunc while implementing io.Writer ................................................................................ // Delegates the call to the WriterFunc while implementing io.Reader func (r ReaderFunc) Read(b []byte) (int, error) { return r(b) } // Returns a reader that will delegate calls to Read(...) while ensuring // that the output buffer will never be smaller than the required size // and will be downsized to a multiple of the required size if larger func BlockReader(reader io.Reader, size int) io.Reader { var buffer []byte = make([]byte, 0, size) return ReaderFunc(func(output []byte) (int, error) { var ( readCount int err error ) start: // Reply with the buffered data if there is any if len(buffer) > 0 { readCount = copy(output, buffer) // Advance the data in the buffer buffer = buffer[:copy(buffer, buffer[readCount:])] // Return count and error if we have read the whole buffer if len(buffer) == 0 { return readCount, err } // Do not propagate an error until the buffer is exhausted return readCount, nil } // Delegate if the buffer is empty and the destination buffer is large enough if len(output) >= size { return reader.Read(output[:(len(output)/size)*size]) } // Perform a read into the buffer readCount, err = reader.Read(buffer[:size]) // Size the buffer down to the read data size // and restart if we have successfully read some bytes buffer = buffer[:readCount] if len(buffer) > 0 { goto start } // Returning on err/misbehaving noop reader return 0, err }) } |
Modified src/0dev.org/predictor/predictor.go from [7a1990432a] to [625ed7e8bc].
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
...
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
|
// Returns an io.Reader implementation that wraps the provided io.Reader // and decompresses data according to the predictor algorithm func Decompressor(reader io.Reader) io.Reader { var ctx context ctx.input = make([]byte, 0, 8) return iou.ReaderFunc(func(output []byte) (int, error) { var ( err error flags, predicted byte rc, total, copied int ) // Sanity check for space to read into if len(output) == 0 { return 0, nil } // Check whether we have leftover data in the buffer if len(ctx.input) > 0 { rc = copy(output, ctx.input) // Check whether we still have leftover data in the buffer :) if rc < len(ctx.input) { // Shift the remaining bytes at the start of the buffer // and resize the buffer accordingly ctx.input = ctx.input[:copy(ctx.input, ctx.input[rc:])] } return rc, nil } // Read the next prediction header readHeader: rc, err = reader.Read(ctx.input[:1]) // Fail on error unless it is EOF if err != nil && err != io.EOF { return total, err } else if rc == 0 { ................................................................................ ctx.update(ctx.input[i]) } // Copy the decompressed data to the output and accumulate the count copied = copy(output, ctx.input[:rc]) total += copied // Check for remaining bytes that dont fit in the output buffer if copied < rc { // Shift the remaining bytes at the start of the buffer // and resize the buffer accordingly ctx.input = ctx.input[:copy(ctx.input, ctx.input[copied:rc])] } else { // Clear the buffer ctx.input = ctx.input[:0] // Loop for another pass if there is available space in the output output = output[copied:] if len(output) > 0 && err == nil { goto readHeader } } return total, err }) } |
|
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
|
|
|
|
|
|
<
|
|
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
...
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
|
// Returns an io.Reader implementation that wraps the provided io.Reader // and decompresses data according to the predictor algorithm func Decompressor(reader io.Reader) io.Reader { var ctx context ctx.input = make([]byte, 0, 8) return iou.BlockReader(iou.ReaderFunc(func(output []byte) (int, error) { var ( err error flags, predicted byte rc, total, copied int ) // Read the next prediction header readHeader: rc, err = reader.Read(ctx.input[:1]) // Fail on error unless it is EOF if err != nil && err != io.EOF { return total, err } else if rc == 0 { ................................................................................ ctx.update(ctx.input[i]) } // Copy the decompressed data to the output and accumulate the count copied = copy(output, ctx.input[:rc]) total += copied // Clear the buffer ctx.input = ctx.input[:0] // Loop for another pass if there is available space in the output output = output[copied:] if len(output) > 0 && err == nil { goto readHeader } return total, err }), 8) } |