Check-in [c62581c2a6]
Overview
Comment:Removed the compressor alias type from predictor, use ioutil.WriterFunc
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | trunk
Files: files | file ages | folders
SHA1: c62581c2a644cab7abdb5e728c73cd013f9fd454
User & Date: spaskalev on 2014-12-23 09:32:00
Other Links: manifest | tags
Context
2014-12-23
10:38
Added MinReader to ioutils, CC at 100% check-in: 47b221d5b4 user: spaskalev tags: trunk
09:32
Removed the compressor alias type from predictor, use ioutil.WriterFunc check-in: c62581c2a6 user: spaskalev tags: trunk
08:15
Removed the decompressor alias type from predictor, use ioutil.ReaderFunc check-in: 2b049247ed user: spaskalev tags: trunk
Changes

Modified src/0dev.org/predictor/predictor.go from [4bb1d52139] to [7a1990432a].

19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42

43
44

45
46
47

48
49
50
51

52
53
54

55
56
57


58
59
60
61

62
63
64

65
66
67
68
69
70
71
72

73
74
75
76
77
78
79


80
81
82
83

84
85
86
87

88
89
90
91
92
93
94
95
96
97
98
99
100
101
102


103
104
105
106
107
108
109

110
111

112
113
114
115
116

117
118
119
120
121
122
123
19
20
21
22
23
24
25






26
27
28
29
30
31
32
33
34
35

36
37

38
39
40
41
42
43
44
45

46
47
48

49
50
51

52
53
54
55
56

57
58
59

60
61
62
63
64
65
66
67

68
69
70
71
72
73


74
75
76
77
78

79
80
81
82

83
84
85
86
87
88
89
90
91
92
93
94
95
96


97
98
99
100
101
102
103
104

105
106

107
108
109
110
111

112
113
114
115
116
117
118
119







-
-
-
-
-
-










-
+

-
+



+



-
+


-
+


-
+
+



-
+


-
+







-
+





-
-
+
+



-
+



-
+













-
-
+
+






-
+

-
+




-
+







// characters which will be used to index the guess table.
// A better hash function would result in additional compression,
// at the expense of time.
func (ctx *context) update(val byte) {
	ctx.hash = (ctx.hash << 4) ^ uint16(val)
}

type compressor func([]byte) error

func (w compressor) Write(data []byte) (int, error) {
	return len(data), w(data)
}

// Returns an io.Writer implementation that wraps the provided io.Writer
// and compresses data according to the predictor algorithm
//
// It can buffer data as the predictor mandates 8-byte blocks with a header.
// A call with no data will force a flush.
func Compressor(writer io.Writer) io.Writer {
	var ctx context
	ctx.input = make([]byte, 0, 8)

	// Forward declaration as it is required for recursion
	var write compressor
	var write iou.WriterFunc

	write = func(data []byte) error {
	write = func(data []byte) (int, error) {
		var (
			blockSize    int = 8
			bufferLength int = len(ctx.input)
			datalength   int = len(data)
		)

		// Force a flush if we are called with no data to write
		if len(data) == 0 {
		if datalength == 0 {
			// Nothing to flush if the buffer is empty though
			if len(ctx.input) == 0 {
				return nil
				return 0, nil
			}
			// We can't have more than 7 bytes in the buffer so this is safe
			data, blockSize, bufferLength = ctx.input, len(ctx.input), 0
			data, datalength = ctx.input, len(ctx.input)
			blockSize, bufferLength = datalength, 0
		}

		// Check if there are pending bytes in the buffer
		if len(data) < blockSize || bufferLength > 0 {
		if datalength < blockSize || bufferLength > 0 {

			// If the current buffer + new data can fit into a block
			if (len(data) + bufferLength) <= blockSize {
			if (datalength + bufferLength) <= blockSize {
				ctx.input = append(ctx.input, data...)

				// Flush the block if the buffer fills it
				if len(ctx.input) == blockSize {
					return write(nil)
				}
				// ... otherwise just return
				return nil
				return datalength, nil
			}

			// The current buffer + new data overflow the block size
			// Complete the block, flush it ...
			ctx.input = append(ctx.input, data[:blockSize-bufferLength]...)
			if err := write(nil); err != nil {
				return err
			if c, err := write(nil); err != nil {
				return c, err
			}
			// ... and stage the rest of the data in the buffer
			ctx.input = append(ctx.input, data[blockSize-bufferLength:]...)
			return nil
			return datalength, nil
		}

		var buf []byte = make([]byte, 1, blockSize+1)
		for block := 0; block < len(data)/blockSize; block++ {
		for block := 0; block < datalength/blockSize; block++ {
			for i := 0; i < blockSize; i++ {
				var current byte = data[(block*blockSize)+i]
				if ctx.table[ctx.hash] == current {
					// Guess was right - don't output
					buf[0] |= 1 << uint(i)
				} else {
					// Guess was wrong, output char
					ctx.table[ctx.hash] = current
					buf = append(buf, current)
				}
				ctx.update(current)
			}

			if _, err := writer.Write(buf); err != nil {
				return err
			if c, err := writer.Write(buf); err != nil {
				return (block * blockSize) + c, err
			}

			// Reset the flags and buffer for the next iteration
			buf, buf[0] = buf[:1], 0
		}

		if remaining := len(data) % blockSize; remaining > 0 {
		if remaining := datalength % blockSize; remaining > 0 {
			ctx.input = ctx.input[:remaining]
			copy(ctx.input, data[len(data)-remaining:])
			copy(ctx.input, data[datalength-remaining:])
		} else {
			ctx.input = ctx.input[:0]
		}

		return nil
		return datalength, nil
	}

	return write
}

// Returns an io.Reader implementation that wraps the provided io.Reader
// and decompresses data according to the predictor algorithm