Check-in [42ba1f458d]
Overview
Comment:Decompressor - try to fill as much as possible in the output buffer in a single pass.
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | decompressor2
Files: files | file ages | folders
SHA1: 42ba1f458d7b62fd3cc81c27cf27ff4147cb5cdf
User & Date: spaskalev on 2014-12-22 16:35:06
Other Links: branch diff | manifest | tags
Context
2014-12-22
16:41
Integrate the decompressor2 branch into trunk now that it is faster. check-in: 6d10a1d28f user: spaskalev tags: trunk
16:35
Decompressor - try to fill as much as possible in the output buffer in a single pass. Closed-Leaf check-in: 42ba1f458d user: spaskalev tags: decompressor2
15:34
Fixed a nasty variable shadowing bug :) check-in: e9b80a705b user: spaskalev tags: decompressor2
Changes

Modified src/0dev.org/predictor/predictor.go from [219e9c8a2d] to [71e92568a2].

121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149

150
151
152
153
154
155
156
157
158
159
160
161
162
// and decompresses data according to the predictor algorithm
func Decompressor(reader io.Reader) io.Reader {
	var ctx context
	ctx.input = make([]byte, 0, 8)

	return decompressor(func(output []byte) (int, error) {
		var (
			err                      error
			flags                    byte
			rc, available, predicted int
		)

		// Sanity check for space to read into
		if len(output) == 0 {
			return 0, nil
		}

		// Check whether we have leftover data in the buffer
		if len(ctx.input) > 0 {
			rc = copy(output, ctx.input)

			// Check whether we still have leftover data in the buffer :)
			if rc < len(ctx.input) {
				ctx.input = ctx.input[:copy(ctx.input, ctx.input[rc:])]
			}
			return rc, nil
		}

		// Read the next prediction header

		rc, err = reader.Read(ctx.input[:1])
		// Fail on error unless it is EOF
		if err != nil && err != io.EOF {
			return 0, err
		} else if rc == 0 {
			return 0, err
		}

		// Extend the buffer, copy the prediction header
		//  and calculate the number of subsequent bytes to read
		ctx.input = ctx.input[:8]
		flags = ctx.input[0]
		predicted = int(bits.Hamming(flags))







|
|
|



















>



|

|







121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
// and decompresses data according to the predictor algorithm
func Decompressor(reader io.Reader) io.Reader {
	var ctx context
	ctx.input = make([]byte, 0, 8)

	return decompressor(func(output []byte) (int, error) {
		var (
			err                             error
			flags                           byte
			rc, available, predicted, total int
		)

		// Sanity check for space to read into
		if len(output) == 0 {
			return 0, nil
		}

		// Check whether we have leftover data in the buffer
		if len(ctx.input) > 0 {
			rc = copy(output, ctx.input)

			// Check whether we still have leftover data in the buffer :)
			if rc < len(ctx.input) {
				ctx.input = ctx.input[:copy(ctx.input, ctx.input[rc:])]
			}
			return rc, nil
		}

		// Read the next prediction header
	readHeader:
		rc, err = reader.Read(ctx.input[:1])
		// Fail on error unless it is EOF
		if err != nil && err != io.EOF {
			return total, err
		} else if rc == 0 {
			return total, err
		}

		// Extend the buffer, copy the prediction header
		//  and calculate the number of subsequent bytes to read
		ctx.input = ctx.input[:8]
		flags = ctx.input[0]
		predicted = int(bits.Hamming(flags))
189
190
191
192
193
194
195


196
197
198
199
200
201
202
203



204


205
206
207
			// Update the hash
			ctx.hash = (ctx.hash << 4) ^ uint16(ctx.input[i])
		}

		// rc now contains the precise amount of populated data
		ctx.input = ctx.input[:rc]
		available = copy(output, ctx.input)



		// Check for remaining bytes that dont fit in the output buffer
		if available < rc {
			ctx.input = ctx.input[:copy(ctx.input, ctx.input[available:])]
		} else {
			// Clear the buffer
			ctx.input = ctx.input[:0]
		}






		return available, err
	})
}







>
>







|
>
>
>
|
>
>
|


190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
			// Update the hash
			ctx.hash = (ctx.hash << 4) ^ uint16(ctx.input[i])
		}

		// rc now contains the precise amount of populated data
		ctx.input = ctx.input[:rc]
		available = copy(output, ctx.input)

		total += available

		// Check for remaining bytes that dont fit in the output buffer
		if available < rc {
			ctx.input = ctx.input[:copy(ctx.input, ctx.input[available:])]
		} else {
			// Clear the buffer
			ctx.input = ctx.input[:0]

			output = output[available:]
			if len(output) > 0 && err == nil {
				goto readHeader
			}
		}

		return total, err
	})
}