Overview
Comment: | Renamed MinReader to BlockReader. The later is now used by predictor's decompressor to simplify the code and deal away with the need for internal buffering. |
---|---|
Downloads: | Tarball | ZIP archive | SQL archive |
Timelines: | family | ancestors | descendants | both | trunk |
Files: | files | file ages | folders |
SHA1: |
38f8e62c81fb3daca174e4a8e6c6a71b |
User & Date: | spaskalev on 2014-12-23 18:52:46 |
Other Links: | manifest | tags |
Context
2014-12-23
| ||
19:18 | Fixing ioutil tests to compile :) check-in: b0ff11dfcd user: spaskalev tags: trunk | |
18:52 | Renamed MinReader to BlockReader. The later is now used by predictor's decompressor to simplify the code and deal away with the need for internal buffering. check-in: 38f8e62c81 user: spaskalev tags: trunk | |
18:39 | Additional fixes and code simplification for MinReader check-in: 70896f73e9 user: spaskalev tags: trunk | |
Changes
Modified src/0dev.org/ioutil/ioutil.go from [3d22f83cbe] to [f63ac1dff8].
1 2 3 4 | // Package ioutil contains various constructs for io operations package ioutil import ( | < | 1 2 3 4 5 6 7 8 9 10 11 | // Package ioutil contains various constructs for io operations package ioutil import ( "io" ) // An function alias type that implements io.Writer type WriterFunc func([]byte) (int, error) // Delegates the call to the WriterFunc while implementing io.Writer |
︙ | ︙ | |||
20 21 22 23 24 25 26 | // Delegates the call to the WriterFunc while implementing io.Reader func (r ReaderFunc) Read(b []byte) (int, error) { return r(b) } // Returns a reader that will delegate calls to Read(...) while ensuring // that the output buffer will never be smaller than the required size | > | < < < | < | > < < < > > | 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 | // Delegates the call to the WriterFunc while implementing io.Reader func (r ReaderFunc) Read(b []byte) (int, error) { return r(b) } // Returns a reader that will delegate calls to Read(...) while ensuring // that the output buffer will never be smaller than the required size // and will be downsized to a multiple of the required size if larger func BlockReader(reader io.Reader, size int) io.Reader { var buffer []byte = make([]byte, 0, size) return ReaderFunc(func(output []byte) (int, error) { var ( readCount int err error ) start: // Reply with the buffered data if there is any if len(buffer) > 0 { readCount = copy(output, buffer) // Advance the data in the buffer buffer = buffer[:copy(buffer, buffer[readCount:])] // Return count and error if we have read the whole buffer if len(buffer) == 0 { return readCount, err } // Do not propagate an error until the buffer is exhausted return readCount, nil } // Delegate if the buffer is empty and the destination buffer is large enough if len(output) >= size { return reader.Read(output[:(len(output)/size)*size]) } // Perform a read into the buffer readCount, err = reader.Read(buffer[:size]) // Size the buffer down to the read data size // and restart if we have successfully read some bytes buffer = buffer[:readCount] if len(buffer) > 0 { goto start } // Returning on err/misbehaving noop reader return 0, err }) } |
Modified src/0dev.org/predictor/predictor.go from [7a1990432a] to [625ed7e8bc].
︙ | ︙ | |||
117 118 119 120 121 122 123 | // Returns an io.Reader implementation that wraps the provided io.Reader // and decompresses data according to the predictor algorithm func Decompressor(reader io.Reader) io.Reader { var ctx context ctx.input = make([]byte, 0, 8) | | < < < < < < < < < < < < < < < < < < | 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 | // Returns an io.Reader implementation that wraps the provided io.Reader // and decompresses data according to the predictor algorithm func Decompressor(reader io.Reader) io.Reader { var ctx context ctx.input = make([]byte, 0, 8) return iou.BlockReader(iou.ReaderFunc(func(output []byte) (int, error) { var ( err error flags, predicted byte rc, total, copied int ) // Read the next prediction header readHeader: rc, err = reader.Read(ctx.input[:1]) // Fail on error unless it is EOF if err != nil && err != io.EOF { return total, err } else if rc == 0 { |
︙ | ︙ | |||
192 193 194 195 196 197 198 | ctx.update(ctx.input[i]) } // Copy the decompressed data to the output and accumulate the count copied = copy(output, ctx.input[:rc]) total += copied | < < < < < < | | | | | | < | | 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 | ctx.update(ctx.input[i]) } // Copy the decompressed data to the output and accumulate the count copied = copy(output, ctx.input[:rc]) total += copied // Clear the buffer ctx.input = ctx.input[:0] // Loop for another pass if there is available space in the output output = output[copied:] if len(output) > 0 && err == nil { goto readHeader } return total, err }), 8) } |