_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q8300
huffSort
train
func (s *Scratch) huffSort() { type rankPos struct { base uint32 current uint32 } // Clear nodes nodes := s.nodes[:huffNodesLen+1] s.nodes = nodes nodes = nodes[1 : huffNodesLen+1] // Sort into buckets based on length of symbol count. var rank [32]rankPos for _, v := range s.count[:s.symbolLen] { r := highBit32(v+1) & 31 rank[r].base++ } for n := 30; n > 0; n-- { rank[n-1].base += rank[n].base } for n := range rank[:] { rank[n].current = rank[n].base } for n, c := range s.count[:s.symbolLen] { r := (highBit32(c+1) + 1) & 31 pos := rank[r].current rank[r].current++ prev := nodes[(pos-1)&huffNodesMask] for pos > rank[r].base && c > prev.count { nodes[pos&huffNodesMask] = prev pos-- prev = nodes[(pos-1)&huffNodesMask] } nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} } return }
go
{ "resource": "" }
q8301
NewWriter
train
func NewWriter(w io.Writer) *Writer { z, _ := NewWriterLevelDict(w, DefaultCompression, nil) return z }
go
{ "resource": "" }
q8302
NewWriterLevel
train
func NewWriterLevel(w io.Writer, level int) (*Writer, error) { return NewWriterLevelDict(w, level, nil) }
go
{ "resource": "" }
q8303
NewWriterLevelDict
train
func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) { if level < HuffmanOnly || level > BestCompression { return nil, fmt.Errorf("zlib: invalid compression level: %d", level) } return &Writer{ w: w, level: level, dict: dict, }, nil }
go
{ "resource": "" }
q8304
Reset
train
func (z *Writer) Reset(w io.Writer) { z.w = w // z.level and z.dict left unchanged. if z.compressor != nil { z.compressor.Reset(w) } if z.digest != nil { z.digest.Reset() } z.err = nil z.scratch = [4]byte{} z.wroteHeader = false }
go
{ "resource": "" }
q8305
writeHeader
train
func (z *Writer) writeHeader() (err error) { z.wroteHeader = true // ZLIB has a two-byte header (as documented in RFC 1950). // The first four bits is the CINFO (compression info), which is 7 for the default deflate window size. // The next four bits is the CM (compression method), which is 8 for deflate. z.scratch[0] = 0x78 // The next two bits is the FLEVEL (compression level). The four values are: // 0=fastest, 1=fast, 2=default, 3=best. // The next bit, FDICT, is set if a dictionary is given. // The final five FCHECK bits form a mod-31 checksum. switch z.level { case -2, 0, 1: z.scratch[1] = 0 << 6 case 2, 3, 4, 5: z.scratch[1] = 1 << 6 case 6, -1: z.scratch[1] = 2 << 6 case 7, 8, 9: z.scratch[1] = 3 << 6 default: panic("unreachable") } if z.dict != nil { z.scratch[1] |= 1 << 5 } z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31) if _, err = z.w.Write(z.scratch[0:2]); err != nil { return err } if z.dict != nil { // The next four bytes are the Adler-32 checksum of the dictionary. checksum := adler32.Checksum(z.dict) z.scratch[0] = uint8(checksum >> 24) z.scratch[1] = uint8(checksum >> 16) z.scratch[2] = uint8(checksum >> 8) z.scratch[3] = uint8(checksum >> 0) if _, err = z.w.Write(z.scratch[0:4]); err != nil { return err } } if z.compressor == nil { // Initialize deflater unless the Writer is being reused // after a Reset call. z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict) if err != nil { return err } z.digest = adler32.New() } return nil }
go
{ "resource": "" }
q8306
Write
train
func (z *Writer) Write(p []byte) (n int, err error) { if !z.wroteHeader { z.err = z.writeHeader() } if z.err != nil { return 0, z.err } if len(p) == 0 { return 0, nil } n, err = z.compressor.Write(p) if err != nil { z.err = err return } z.digest.Write(p) return }
go
{ "resource": "" }
q8307
NewWriter
train
func NewWriter(w io.Writer) *Writer { return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}} }
go
{ "resource": "" }
q8308
SetOffset
train
func (w *Writer) SetOffset(n int64) { if w.cw.count != 0 { panic("zip: SetOffset called after data was written") } w.cw.count = n }
go
{ "resource": "" }
q8309
Flush
train
func (w *Writer) Flush() error { return w.cw.w.(*bufio.Writer).Flush() }
go
{ "resource": "" }
q8310
RegisterCompressor
train
func (w *Writer) RegisterCompressor(method uint16, comp Compressor) { if w.compressors == nil { w.compressors = make(map[uint16]Compressor) } w.compressors[method] = comp }
go
{ "resource": "" }
q8311
estimateSize
train
func (c cTable) estimateSize(hist []uint32) int { nbBits := uint32(7) for i, v := range c[:len(hist)] { nbBits += uint32(v.nBits) * hist[i] } return int(nbBits >> 3) }
go
{ "resource": "" }
q8312
minSize
train
func (s *Scratch) minSize(total int) int { nbBits := float64(7) fTotal := float64(total) for _, v := range s.count[:s.symbolLen] { n := float64(v) if n > 0 { nbBits += math.Log2(fTotal/n) * n } } return int(nbBits) >> 3 }
go
{ "resource": "" }
q8313
decSymbolValue
train
func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { if int(symb) >= len(t) { return decSymbol{}, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) } lu := t[symb] return decSymbol{ addBits: lu.addBits, baseline: lu.baseLine, }, nil }
go
{ "resource": "" }
q8314
setRLE
train
func (s *fseDecoder) setRLE(symbol decSymbol) { s.actualTableLog = 0 s.maxBits = 0 s.dt[0] = symbol }
go
{ "resource": "" }
q8315
transform
train
func (s *fseDecoder) transform(t []baseOffset) error { tableSize := uint16(1 << s.actualTableLog) s.maxBits = 0 for i, v := range s.dt[:tableSize] { if int(v.addBits) >= len(t) { return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits, len(t)) } lu := t[v.addBits] if lu.addBits > s.maxBits { s.maxBits = lu.addBits } s.dt[i&maxTableMask] = decSymbol{ newState: v.newState, nbBits: v.nbBits, addBits: lu.addBits, baseline: lu.baseLine, } } return nil }
go
{ "resource": "" }
q8316
init
train
func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { s.dt = dt br.fill() s.state = dt[br.getBits(tableLog)] }
go
{ "resource": "" }
q8317
next
train
func (s *fseState) next(br *bitReader) { lowBits := uint16(br.getBits(s.state.nbBits)) s.state = s.dt[s.state.newState+lowBits] }
go
{ "resource": "" }
q8318
final
train
func (s *fseState) final() (int, uint8) { return int(s.state.baseline), s.state.addBits }
go
{ "resource": "" }
q8319
NewReader
train
func NewReader(r io.Reader) (io.ReadCloser, error) { return NewReaderDict(r, nil) }
go
{ "resource": "" }
q8320
NewReaderDict
train
func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) { z := new(reader) err := z.Reset(r, dict) if err != nil { return nil, err } return z, nil }
go
{ "resource": "" }
q8321
Close
train
func (z *reader) Close() error { if z.err != nil && z.err != io.EOF { return z.err } z.err = z.decompressor.Close() return z.err }
go
{ "resource": "" }
q8322
reset
train
func (h *history) reset() { h.b = h.b[:0] h.error = false h.recentOffsets = [3]int{1, 4, 8} if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { fseDecoderPool.Put(f) } if f := h.decoders.offsets.fse; f != nil && !f.preDefined { fseDecoderPool.Put(f) } if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { fseDecoderPool.Put(f) } h.decoders = sequenceDecs{} if h.huffTree != nil { huffDecoderPool.Put(h.huffTree) } h.huffTree = nil //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) }
go
{ "resource": "" }
q8323
append
train
func (h *history) append(b []byte) { if len(b) >= h.windowSize { // Discard all history by simply overwriting h.b = h.b[:h.windowSize] copy(h.b, b[len(b)-h.windowSize:]) return } // If there is space, append it. if len(b) < cap(h.b)-len(h.b) { h.b = append(h.b, b...) return } // Move data down so we only have window size left. // We know we have less than window size in b at this point. discard := len(b) + len(h.b) - h.windowSize copy(h.b, h.b[discard:]) h.b = h.b[:h.windowSize] copy(h.b[h.windowSize-len(b):], b) }
go
{ "resource": "" }
q8324
appendKeep
train
func (h *history) appendKeep(b []byte) { h.b = append(h.b, b...) }
go
{ "resource": "" }
q8325
Decompress1X
train
func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { if len(s.dt.single) == 0 { return nil, errors.New("no table loaded") } var br bitReader err = br.init(in) if err != nil { return nil, err } s.Out = s.Out[:0] decode := func() byte { val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */ v := s.dt.single[val] br.bitsRead += v.nBits return v.byte } hasDec := func(v dEntrySingle) byte { br.bitsRead += v.nBits return v.byte } // Avoid bounds check by always having full sized table. const tlSize = 1 << tableLogMax const tlMask = tlSize - 1 dt := s.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. var tmp = s.huffWeight[:256] var off uint8 for br.off >= 8 { br.fillFast() tmp[off+0] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) tmp[off+1] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) br.fillFast() tmp[off+2] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) tmp[off+3] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) off += 4 if off == 0 { s.Out = append(s.Out, tmp...) } } s.Out = append(s.Out, tmp[:off]...) for !br.finished() { br.fill() s.Out = append(s.Out, decode()) } return s.Out, br.close() }
go
{ "resource": "" }
q8326
matches
train
func (s *Scratch) matches(ct cTable, w io.Writer) { if s == nil || len(s.dt.single) == 0 { return } dt := s.dt.single[:1<<s.actualTableLog] tablelog := s.actualTableLog ok := 0 broken := 0 for sym, enc := range ct { errs := 0 broken++ if enc.nBits == 0 { for _, dec := range dt { if dec.byte == byte(sym) { fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) errs++ break } } if errs == 0 { broken-- } continue } // Unused bits in input ub := tablelog - enc.nBits top := enc.val << ub // decoder looks at top bits. dec := dt[top] if dec.nBits != enc.nBits { fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, dec.nBits) errs++ } if dec.byte != uint8(sym) { fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, dec.byte) errs++ } if errs > 0 { fmt.Fprintf(w, "%d errros in base, stopping\n", errs) continue } // Ensure that all combinations are covered. for i := uint16(0); i < (1 << ub); i++ { vval := top | i dec := dt[vval] if dec.nBits != enc.nBits { fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, dec.nBits) errs++ } if dec.byte != uint8(sym) { fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, dec.byte) errs++ } if errs > 20 { fmt.Fprintf(w, "%d errros, stopping\n", errs) break } } if errs == 0 { ok++ broken-- } } if broken > 0 { fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) } }
go
{ "resource": "" }
q8327
Decompress
train
func Decompress(b []byte, s *Scratch) ([]byte, error) { s, err := s.prepare(b) if err != nil { return nil, err } s.Out = s.Out[:0] err = s.readNCount() if err != nil { return nil, err } err = s.buildDtable() if err != nil { return nil, err } err = s.decompress() if err != nil { return nil, err } return s.Out, nil }
go
{ "resource": "" }
q8328
allocDtable
train
func (s *Scratch) allocDtable() { tableSize := 1 << s.actualTableLog if cap(s.decTable) < int(tableSize) { s.decTable = make([]decSymbol, tableSize) } s.decTable = s.decTable[:tableSize] if cap(s.ct.tableSymbol) < 256 { s.ct.tableSymbol = make([]byte, 256) } s.ct.tableSymbol = s.ct.tableSymbol[:256] if cap(s.ct.stateTable) < 256 { s.ct.stateTable = make([]uint16, 256) } s.ct.stateTable = s.ct.stateTable[:256] }
go
{ "resource": "" }
q8329
decompress
train
func (s *Scratch) decompress() error { br := &s.bits br.init(s.br.unread()) var s1, s2 decoder // Initialize and decode first state and symbol. s1.init(br, s.decTable, s.actualTableLog) s2.init(br, s.decTable, s.actualTableLog) // Use temp table to avoid bound checks/append penalty. var tmp = s.ct.tableSymbol[:256] var off uint8 // Main part if !s.zeroBits { for br.off >= 8 { br.fillFast() tmp[off+0] = s1.nextFast() tmp[off+1] = s2.nextFast() br.fillFast() tmp[off+2] = s1.nextFast() tmp[off+3] = s2.nextFast() off += 4 if off == 0 { s.Out = append(s.Out, tmp...) } } } else { for br.off >= 8 { br.fillFast() tmp[off+0] = s1.next() tmp[off+1] = s2.next() br.fillFast() tmp[off+2] = s1.next() tmp[off+3] = s2.next() off += 4 if off == 0 { s.Out = append(s.Out, tmp...) off = 0 if len(s.Out) >= s.DecompressLimit { return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) } } } } s.Out = append(s.Out, tmp[:off]...) // Final bits, a bit more expensive check for { if s1.finished() { s.Out = append(s.Out, s1.final(), s2.final()) break } br.fill() s.Out = append(s.Out, s1.next()) if s2.finished() { s.Out = append(s.Out, s2.final(), s1.final()) break } s.Out = append(s.Out, s2.next()) if len(s.Out) >= s.DecompressLimit { return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) } } return br.close() }
go
{ "resource": "" }
q8330
init
train
func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { d.dt = dt d.br = in d.state = uint16(in.getBits(tableLog)) }
go
{ "resource": "" }
q8331
next
train
func (d *decoder) next() uint8 { n := &d.dt[d.state] lowBits := d.br.getBits(n.nbBits) d.state = n.newState + lowBits return n.symbol }
go
{ "resource": "" }
q8332
next
train
func (d *frameDec) next(block *blockDec) error { println("decoding new block") err := block.reset(d.rawInput, d.WindowSize) if err != nil { println("block error:", err) // Signal the frame decoder we have a problem. d.sendErr(block, err) return err } block.input <- struct{}{} if debug { println("next block:", block) } d.asyncRunningMu.Lock() defer d.asyncRunningMu.Unlock() if !d.asyncRunning { return nil } if block.Last { // We indicate the frame is done by sending io.EOF d.decoding <- block return io.EOF } d.decoding <- block return nil }
go
{ "resource": "" }
q8333
sendErr
train
func (d *frameDec) sendErr(block *blockDec, err error) bool { d.asyncRunningMu.Lock() defer d.asyncRunningMu.Unlock() if !d.asyncRunning { return false } println("sending error", err.Error()) block.sendErr(err) d.decoding <- block return true }
go
{ "resource": "" }
q8334
checkCRC
train
func (d *frameDec) checkCRC() error { if !d.HasCheckSum { return nil } var tmp [8]byte gotB := d.crc.Sum(tmp[:0]) // Flip to match file order. gotB[0] = gotB[7] gotB[1] = gotB[6] gotB[2] = gotB[5] gotB[3] = gotB[4] // We can overwrite upper tmp now want := d.rawInput.readSmall(4) if want == nil { println("CRC missing?") return io.ErrUnexpectedEOF } if !bytes.Equal(gotB[:4], want) { println("CRC Check Failed:", gotB[:4], "!=", want) return ErrCRCMismatch } println("CRC ok") return nil }
go
{ "resource": "" }
q8335
runDecoder
train
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { // TODO: Init to dictionary d.history.reset() saved := d.history.b // We use the history for output to avoid copying it. d.history.b = dst // Store input length, so we only check new data. crcStart := len(dst) var err error for { err = dec.reset(d.rawInput, d.WindowSize) if err != nil { break } if debug { println("next block:", dec) } err = dec.decodeBuf(&d.history) if err != nil || dec.Last { break } if uint64(len(d.history.b)) > d.o.maxDecodedSize { err = ErrDecoderSizeExceeded break } } dst = d.history.b if err == nil { if d.HasCheckSum { var n int n, err = d.crc.Write(dst[crcStart:]) if err == nil { if n != len(dst)-crcStart { err = io.ErrShortWrite } } err = d.checkCRC() } } d.history.b = saved return dst, err }
go
{ "resource": "" }
q8336
dynamicSize
train
func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { numCodegens = len(w.codegenFreq) for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { numCodegens-- } header := 3 + 5 + 5 + 4 + (3 * numCodegens) + w.codegenEncoding.bitLength(w.codegenFreq[:]) + int(w.codegenFreq[16])*2 + int(w.codegenFreq[17])*3 + int(w.codegenFreq[18])*7 size = header + litEnc.bitLength(w.literalFreq) + offEnc.bitLength(w.offsetFreq) + extraBits return size, numCodegens }
go
{ "resource": "" }
q8337
fixedSize
train
func (w *huffmanBitWriter) fixedSize(extraBits int) int { return 3 + fixedLiteralEncoding.bitLength(w.literalFreq) + fixedOffsetEncoding.bitLength(w.offsetFreq) + extraBits }
go
{ "resource": "" }
q8338
storedSize
train
func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { if in == nil { return 0, false } if len(in) <= maxStoreBlockSize { return (len(in) + 5) * 8, true } return 0, false }
go
{ "resource": "" }
q8339
writeDynamicHeader
train
func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { if w.err != nil { return } var firstBits int32 = 4 if isEof { firstBits = 5 } w.writeBits(firstBits, 3) w.writeBits(int32(numLiterals-257), 5) w.writeBits(int32(numOffsets-1), 5) w.writeBits(int32(numCodegens-4), 4) for i := 0; i < numCodegens; i++ { value := uint(w.codegenEncoding.codes[codegenOrder[i]].len) w.writeBits(int32(value), 3) } i := 0 for { var codeWord int = int(w.codegen[i]) i++ if codeWord == badCode { break } w.writeCode(w.codegenEncoding.codes[uint32(codeWord)]) switch codeWord { case 16: w.writeBits(int32(w.codegen[i]), 2) i++ break case 17: w.writeBits(int32(w.codegen[i]), 3) i++ break case 18: w.writeBits(int32(w.codegen[i]), 7) i++ break } } }
go
{ "resource": "" }
q8340
writeBlock
train
func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { if w.err != nil { return } tokens = append(tokens, endBlockMarker) numLiterals, numOffsets := w.indexTokens(tokens) var extraBits int storedSize, storable := w.storedSize(input) if storable { // We only bother calculating the costs of the extra bits required by // the length of offset fields (which will be the same for both fixed // and dynamic encoding), if we need to compare those two encodings // against stored encoding. for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { // First eight length codes have extra size = 0. extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart]) } for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { // First four offset codes have extra size = 0. extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode]) } } // Figure out smallest code. // Fixed Huffman baseline. var literalEncoding = fixedLiteralEncoding var offsetEncoding = fixedOffsetEncoding var size = w.fixedSize(extraBits) // Dynamic Huffman? var numCodegens int // Generate codegen and codegenFrequencies, which indicates how to encode // the literalEncoding and the offsetEncoding. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) w.codegenEncoding.generate(w.codegenFreq[:], 7) dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) if dynamicSize < size { size = dynamicSize literalEncoding = w.literalEncoding offsetEncoding = w.offsetEncoding } // Stored bytes? if storable && storedSize < size { w.writeStoredHeader(len(input), eof) w.writeBytes(input) return } // Huffman. if literalEncoding == fixedLiteralEncoding { w.writeFixedHeader(eof) } else { w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) } // Write the tokens. w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes) }
go
{ "resource": "" }
q8341
indexTokens
train
func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) { for i := range w.literalFreq { w.literalFreq[i] = 0 } for i := range w.offsetFreq { w.offsetFreq[i] = 0 } for _, t := range tokens { if t < matchType { w.literalFreq[t.literal()]++ continue } length := t.length() offset := t.offset() w.literalFreq[lengthCodesStart+lengthCode(length)]++ w.offsetFreq[offsetCode(offset)]++ } // get the number of literals numLiterals = len(w.literalFreq) for w.literalFreq[numLiterals-1] == 0 { numLiterals-- } // get the number of offsets numOffsets = len(w.offsetFreq) for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { numOffsets-- } if numOffsets == 0 { // We haven't found a single match. If we want to go with the dynamic encoding, // we should count at least one offset to be sure that the offset huffman tree could be encoded. w.offsetFreq[0] = 1 numOffsets = 1 } w.literalEncoding.generate(w.literalFreq, 15) w.offsetEncoding.generate(w.offsetFreq, 15) return }
go
{ "resource": "" }
q8342
writeTokens
train
func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { if w.err != nil { return } for _, t := range tokens { if t < matchType { w.writeCode(leCodes[t.literal()]) continue } // Write the length length := t.length() lengthCode := lengthCode(length) w.writeCode(leCodes[lengthCode+lengthCodesStart]) extraLengthBits := uint(lengthExtraBits[lengthCode]) if extraLengthBits > 0 { extraLength := int32(length - lengthBase[lengthCode]) w.writeBits(extraLength, extraLengthBits) } // Write the offset offset := t.offset() offsetCode := offsetCode(offset) w.writeCode(oeCodes[offsetCode]) extraOffsetBits := uint(offsetExtraBits[offsetCode]) if extraOffsetBits > 0 { extraOffset := int32(offset - offsetBase[offsetCode]) w.writeBits(extraOffset, extraOffsetBits) } } }
go
{ "resource": "" }
q8343
writeBlockHuff
train
func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) { if w.err != nil { return } // Clear histogram for i := range w.literalFreq { w.literalFreq[i] = 0 } // Add everything as literals histogram(input, w.literalFreq) w.literalFreq[endBlockMarker] = 1 const numLiterals = endBlockMarker + 1 const numOffsets = 1 w.literalEncoding.generate(w.literalFreq, 15) // Figure out smallest code. // Always use dynamic Huffman or Store var numCodegens int // Generate codegen and codegenFrequencies, which indicates how to encode // the literalEncoding and the offsetEncoding. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) w.codegenEncoding.generate(w.codegenFreq[:], 7) size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0) // Store bytes, if we don't get a reasonable improvement. if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { w.writeStoredHeader(len(input), eof) w.writeBytes(input) return } // Huffman. w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) encoding := w.literalEncoding.codes[:257] n := w.nbytes for _, t := range input { // Bitwriting inlined, ~30% speedup c := encoding[t] w.bits |= uint64(c.code) << w.nbits w.nbits += uint(c.len) if w.nbits < 48 { continue } // Store 6 bytes bits := w.bits w.bits >>= 48 w.nbits -= 48 bytes := w.bytes[n : n+6] bytes[0] = byte(bits) bytes[1] = byte(bits >> 8) bytes[2] = byte(bits >> 16) bytes[3] = byte(bits >> 24) bytes[4] = byte(bits >> 32) bytes[5] = byte(bits >> 40) n += 6 if n < bufferFlushSize { continue } w.write(w.bytes[:n]) if w.err != nil { return // Return early in the event of write failures } n = 0 } w.nbytes = n w.writeCode(encoding[endBlockMarker]) }
go
{ "resource": "" }
q8344
OpenReader
train
func OpenReader(name string) (*ReadCloser, error) { f, err := os.Open(name) if err != nil { return nil, err } fi, err := f.Stat() if err != nil { f.Close() return nil, err } r := new(ReadCloser) if err := r.init(f, fi.Size()); err != nil { f.Close() return nil, err } r.f = f return r, nil }
go
{ "resource": "" }
q8345
NewReader
train
func NewReader(r io.ReaderAt, size int64) (*Reader, error) { zr := new(Reader) if err := zr.init(r, size); err != nil { return nil, err } return zr, nil }
go
{ "resource": "" }
q8346
RegisterDecompressor
train
func (z *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) { if z.decompressors == nil { z.decompressors = make(map[uint16]Decompressor) } z.decompressors[method] = dcomp }
go
{ "resource": "" }
q8347
DataOffset
train
func (f *File) DataOffset() (offset int64, err error) { bodyOffset, err := f.findBodyOffset() if err != nil { return } return f.headerOffset + bodyOffset, nil }
go
{ "resource": "" }
q8348
Open
train
func (f *File) Open() (rc io.ReadCloser, err error) { bodyOffset, err := f.findBodyOffset() if err != nil { return } size := int64(f.CompressedSize64) r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size) dcomp := f.zip.decompressor(f.Method) if dcomp == nil { err = ErrAlgorithm return } rc = dcomp(r) var desr io.Reader if f.hasDataDescriptor() { desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen) } rc = &checksumReader{ rc: rc, hash: crc32.NewIEEE(), f: f, desr: desr, } return }
go
{ "resource": "" }
q8349
findBodyOffset
train
func (f *File) findBodyOffset() (int64, error) { var buf [fileHeaderLen]byte if _, err := f.zipr.ReadAt(buf[:], f.headerOffset); err != nil { return 0, err } b := readBuf(buf[:]) if sig := b.uint32(); sig != fileHeaderSignature { return 0, ErrFormat } b = b[22:] // skip over most of the header filenameLen := int(b.uint16()) extraLen := int(b.uint16()) return int64(fileHeaderLen + filenameLen + extraLen), nil }
go
{ "resource": "" }
q8350
readDirectory64End
train
func readDirectory64End(r io.ReaderAt, offset int64, d *directoryEnd) (err error) { buf := make([]byte, directory64EndLen) if _, err := r.ReadAt(buf, offset); err != nil { return err } b := readBuf(buf) if sig := b.uint32(); sig != directory64EndSignature { return ErrFormat } b = b[12:] // skip dir size, version and version needed (uint64 + 2x uint16) d.diskNbr = b.uint32() // number of this disk d.dirDiskNbr = b.uint32() // number of the disk with the start of the central directory d.dirRecordsThisDisk = b.uint64() // total number of entries in the central directory on this disk d.directoryRecords = b.uint64() // total number of entries in the central directory d.directorySize = b.uint64() // size of the central directory d.directoryOffset = b.uint64() // offset of start of central directory with respect to the starting disk number return nil }
go
{ "resource": "" }
q8351
FileInfoHeader
train
func FileInfoHeader(fi os.FileInfo) (*FileHeader, error) { size := fi.Size() fh := &FileHeader{ Name: fi.Name(), UncompressedSize64: uint64(size), } fh.SetModTime(fi.ModTime()) fh.SetMode(fi.Mode()) if fh.UncompressedSize64 > uint32max { fh.UncompressedSize = uint32max } else { fh.UncompressedSize = uint32(fh.UncompressedSize64) } return fh, nil }
go
{ "resource": "" }
q8352
ModTime
train
func (h *FileHeader) ModTime() time.Time { return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime) }
go
{ "resource": "" }
q8353
SetModTime
train
func (h *FileHeader) SetModTime(t time.Time) { h.ModifiedDate, h.ModifiedTime = timeToMsDosTime(t) }
go
{ "resource": "" }
q8354
Mode
train
func (h *FileHeader) Mode() (mode os.FileMode) { switch h.CreatorVersion >> 8 { case creatorUnix, creatorMacOSX: mode = unixModeToFileMode(h.ExternalAttrs >> 16) case creatorNTFS, creatorVFAT, creatorFAT: mode = msdosModeToFileMode(h.ExternalAttrs) } if len(h.Name) > 0 && h.Name[len(h.Name)-1] == '/' { mode |= os.ModeDir } return mode }
go
{ "resource": "" }
q8355
SetMode
train
func (h *FileHeader) SetMode(mode os.FileMode) { h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8 h.ExternalAttrs = fileModeToUnixMode(mode) << 16 // set MSDOS attributes too, as the original zip does. if mode&os.ModeDir != 0 { h.ExternalAttrs |= msdosDir } if mode&0200 == 0 { h.ExternalAttrs |= msdosReadOnly } }
go
{ "resource": "" }
q8356
isZip64
train
func (fh *FileHeader) isZip64() bool { return fh.CompressedSize64 >= uint32max || fh.UncompressedSize64 >= uint32max }
go
{ "resource": "" }
q8357
set
train
func (h *hcode) set(code uint16, length uint16) { h.len = length h.code = code }
go
{ "resource": "" }
q8358
generateFixedLiteralEncoding
train
func generateFixedLiteralEncoding() *huffmanEncoder { h := newHuffmanEncoder(maxNumLit) codes := h.codes var ch uint16 for ch = 0; ch < maxNumLit; ch++ { var bits uint16 var size uint16 switch { case ch < 144: // size 8, 000110000 .. 10111111 bits = ch + 48 size = 8 break case ch < 256: // size 9, 110010000 .. 111111111 bits = ch + 400 - 144 size = 9 break case ch < 280: // size 7, 0000000 .. 0010111 bits = ch - 256 size = 7 break default: // size 8, 11000000 .. 11000111 bits = ch + 192 - 280 size = 8 } codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} } return h }
go
{ "resource": "" }
q8359
assignEncodingAndSize
train
func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { code := uint16(0) for n, bits := range bitCount { code <<= 1 if n == 0 || bits == 0 { continue } // The literals list[len(list)-bits] .. list[len(list)-bits] // are encoded using "bits" bits, and get the values // code, code + 1, .... The code values are // assigned in literal order (not frequency order). chunk := list[len(list)-int(bits):] h.lns.sort(chunk) for _, node := range chunk { h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} code++ } list = list[0 : len(list)-int(bits)] } }
go
{ "resource": "" }
q8360
offsetCode
train
func offsetCode(off uint32) uint32 { if off < uint32(len(offsetCodes)) { return offsetCodes[off] } else if off>>7 < uint32(len(offsetCodes)) { return offsetCodes[off>>7] + 14 } else { return offsetCodes[off>>14] + 28 } }
go
{ "resource": "" }
q8361
init
train
func (dd *dictDecoder) init(size int, dict []byte) { *dd = dictDecoder{hist: dd.hist} if cap(dd.hist) < size { dd.hist = make([]byte, size) } dd.hist = dd.hist[:size] if len(dict) > len(dd.hist) { dict = dict[len(dict)-len(dd.hist):] } dd.wrPos = copy(dd.hist, dict) if dd.wrPos == len(dd.hist) { dd.wrPos = 0 dd.full = true } dd.rdPos = dd.wrPos }
go
{ "resource": "" }
q8362
histSize
train
func (dd *dictDecoder) histSize() int { if dd.full { return len(dd.hist) } return dd.wrPos }
go
{ "resource": "" }
q8363
readFlush
train
func (dd *dictDecoder) readFlush() []byte { toRead := dd.hist[dd.rdPos:dd.wrPos] dd.rdPos = dd.wrPos if dd.wrPos == len(dd.hist) { dd.wrPos, dd.rdPos = 0, 0 dd.full = true } return toRead }
go
{ "resource": "" }
q8364
Reset
train
func (e *snappyGen) Reset() { e.prev = e.prev[:0] e.cur += maxMatchOffset }
go
{ "resource": "" }
q8365
WithDecoderLowmem
train
func WithDecoderLowmem(b bool) DOption { return func(o *decoderOptions) error { o.lowMem = b; return nil } }
go
{ "resource": "" }
q8366
WithDecoderConcurrency
train
func WithDecoderConcurrency(n int) DOption { return func(o *decoderOptions) error { if n <= 0 { return fmt.Errorf("Concurrency must be at least 1") } o.concurrent = n return nil } }
go
{ "resource": "" }
q8367
HistogramFinished
train
func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { s.maxCount = maxCount s.symbolLen = uint16(maxSymbol) + 1 s.clearCount = maxCount != 0 }
go
{ "resource": "" }
q8368
prepare
train
func (s *Scratch) prepare(in []byte) (*Scratch, error) { if s == nil { s = &Scratch{} } if s.MaxSymbolValue == 0 { s.MaxSymbolValue = 255 } if s.TableLog == 0 { s.TableLog = defaultTablelog } if s.TableLog > maxTableLog { return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) } if cap(s.Out) == 0 { s.Out = make([]byte, 0, len(in)) } if s.clearCount && s.maxCount == 0 { for i := range s.count { s.count[i] = 0 } s.clearCount = false } s.br.init(in) if s.DecompressLimit == 0 { // Max size 2GB. s.DecompressLimit = 2 << 30 } return s, nil }
go
{ "resource": "" }
q8369
Estimate
train
func Estimate(b []byte) float64 { if len(b) < 16 { return 0 } // Correctly predicted order 1 hits := 0 lastMatch := false var o1 [256]byte var hist [256]int c1 := byte(0) for _, c := range b { if c == o1[c1] { // We only count a hit if there was two correct predictions in a row. if lastMatch { hits++ } lastMatch = true } else { lastMatch = false } o1[c1] = c c1 = c hist[c]++ } // Use x^0.6 to give better spread prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) // Calculate histogram distribution variance := float64(0) avg := float64(len(b)) / 256 for _, v := range hist { Δ := float64(v) - avg variance += Δ * Δ } stddev := math.Sqrt(float64(variance)) / float64(len(b)) exp := math.Sqrt(1 / float64(len(b))) // Subtract expected stddev stddev -= exp if stddev < 0 { stddev = 0 } stddev *= 1 + exp // Use x^0.4 to give better spread entropy := math.Pow(stddev, 0.4) // 50/50 weight between prediction and histogram distribution return math.Pow((prediction+entropy)/2, 0.9) }
go
{ "resource": "" }
q8370
init
train
func (b *byteReader) init(in []byte) { b.b = in b.off = 0 }
go
{ "resource": "" }
q8371
Uint8
train
func (b *byteReader) Uint8() uint8 { v := b.b[b.off] return v }
go
{ "resource": "" }
q8372
dataBlock
train
func (f *decompressor) dataBlock() { // Uncompressed. // Discard current half-byte. f.nb = 0 f.b = 0 // Length then ones-complement of length. nr, err := io.ReadFull(f.r, f.buf[0:4]) f.roffset += int64(nr) if err != nil { f.err = noEOF(err) return } n := int(f.buf[0]) | int(f.buf[1])<<8 nn := int(f.buf[2]) | int(f.buf[3])<<8 if uint16(nn) != uint16(^n) { f.err = CorruptInputError(f.roffset) return } if n == 0 { f.toRead = f.dict.readFlush() f.finishBlock() return } f.copyLen = n f.copyData() }
go
{ "resource": "" }
q8373
copyData
train
func (f *decompressor) copyData() { buf := f.dict.writeSlice() if len(buf) > f.copyLen { buf = buf[:f.copyLen] } cnt, err := io.ReadFull(f.r, buf) f.roffset += int64(cnt) f.copyLen -= cnt f.dict.writeMark(cnt) if err != nil { f.err = noEOF(err) return } if f.dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = f.dict.readFlush() f.step = (*decompressor).copyData return } f.finishBlock() }
go
{ "resource": "" }
q8374
noEOF
train
func noEOF(e error) error { if e == io.EOF { return io.ErrUnexpectedEOF } return e }
go
{ "resource": "" }
q8375
huffSym
train
func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(h.min) // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. nb, b := f.nb, f.b for { for nb < n { c, err := f.r.ReadByte() if err != nil { f.b = b f.nb = nb return 0, noEOF(err) } f.roffset++ b |= uint32(c) << (nb & 31) nb += 8 } chunk := h.chunks[b&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] n = uint(chunk & huffmanCountMask) } if n <= nb { if n == 0 { f.b = b f.nb = nb f.err = CorruptInputError(f.roffset) return 0, f.err } f.b = b >> (n & 31) f.nb = nb - n return int(chunk >> huffmanValueShift), nil } } }
go
{ "resource": "" }
q8376
NewReader
train
func NewReader(r io.Reader) io.ReadCloser { fixedHuffmanDecoderInit() var f decompressor f.r = makeReader(r) f.bits = new([maxNumLit + maxNumDist]int) f.codebits = new([numCodes]int) f.step = (*decompressor).nextBlock f.dict.init(maxMatchOffset, nil) return &f }
go
{ "resource": "" }
q8377
fillBase
train
func fillBase(dst []baseOffset, base uint32, bits ...uint8) { if len(bits) != len(dst) { panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) } for i, bit := range bits { if base > math.MaxInt32 { panic(fmt.Sprintf("invalid decoding table, base overflows int32")) } dst[i] = baseOffset{ baseLine: base, addBits: bit, } base += 1 << bit } }
go
{ "resource": "" }
q8378
Compress
train
func Compress(in []byte, s *Scratch) ([]byte, error) { if len(in) <= 1 { return nil, ErrIncompressible } if len(in) >= 2<<30 { return nil, errors.New("input too big, must be < 2GB") } s, err := s.prepare(in) if err != nil { return nil, err } // Create histogram, if none was provided. maxCount := s.maxCount if maxCount == 0 { maxCount = s.countSimple(in) } // Reset for next run. s.clearCount = true s.maxCount = 0 if maxCount == len(in) { // One symbol, use RLE return nil, ErrUseRLE } if maxCount == 1 || maxCount < (len(in)>>7) { // Each symbol present maximum once or too well distributed. return nil, ErrIncompressible } s.optimalTableLog() err = s.normalizeCount() if err != nil { return nil, err } err = s.writeCount() if err != nil { return nil, err } if false { err = s.validateNorm() if err != nil { return nil, err } } err = s.buildCTable() if err != nil { return nil, err } err = s.compress(in) if err != nil { return nil, err } s.Out = s.bw.out // Check if we compressed. if len(s.Out) >= len(in) { return nil, ErrIncompressible } return s.Out, nil }
go
{ "resource": "" }
q8379
init
train
func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { c.bw = bw c.stateTable = ct.stateTable nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 im := int32((nbBitsOut << 16) - first.deltaNbBits) lu := (im >> nbBitsOut) + first.deltaFindState c.state = c.stateTable[lu] return }
go
{ "resource": "" }
q8380
flush
train
func (c *cState) flush(tableLog uint8) { c.bw.flush32() c.bw.addBits16NC(c.state, tableLog) c.bw.flush() }
go
{ "resource": "" }
q8381
String
train
func (s symbolTransform) String() string { return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) }
go
{ "resource": "" }
q8382
allocCtable
train
func (s *Scratch) allocCtable() { tableSize := 1 << s.actualTableLog // get tableSymbol that is big enough. if cap(s.ct.tableSymbol) < int(tableSize) { s.ct.tableSymbol = make([]byte, tableSize) } s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] ctSize := tableSize if cap(s.ct.stateTable) < ctSize { s.ct.stateTable = make([]uint16, ctSize) } s.ct.stateTable = s.ct.stateTable[:ctSize] if cap(s.ct.symbolTT) < int(s.symbolLen) { s.ct.symbolTT = make([]symbolTransform, 256) } s.ct.symbolTT = s.ct.symbolTT[:256] }
go
{ "resource": "" }
q8383
normalizeCount
train
func (s *Scratch) normalizeCount() error { var ( tableLog = s.actualTableLog scale = 62 - uint64(tableLog) step = (1 << 62) / uint64(s.br.remain()) vStep = uint64(1) << (scale - 20) stillToDistribute = int16(1 << tableLog) largest int largestP int16 lowThreshold = (uint32)(s.br.remain() >> tableLog) ) for i, cnt := range s.count[:s.symbolLen] { // already handled // if (count[s] == s.length) return 0; /* rle special case */ if cnt == 0 { s.norm[i] = 0 continue } if cnt <= lowThreshold { s.norm[i] = -1 stillToDistribute-- } else { proba := (int16)((uint64(cnt) * step) >> scale) if proba < 8 { restToBeat := vStep * uint64(rtbTable[proba]) v := uint64(cnt)*step - (uint64(proba) << scale) if v > restToBeat { proba++ } } if proba > largestP { largestP = proba largest = i } s.norm[i] = proba stillToDistribute -= proba } } if -stillToDistribute >= (s.norm[largest] >> 1) { // corner case, need another normalization method return s.normalizeCount2() } s.norm[largest] += stillToDistribute return nil }
go
{ "resource": "" }
q8384
validateNorm
train
func (s *Scratch) validateNorm() (err error) { var total int for _, v := range s.norm[:s.symbolLen] { if v >= 0 { total += int(v) } else { total -= int(v) } } defer func() { if err == nil { return } fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) for i, v := range s.norm[:s.symbolLen] { fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) } }() if total != (1 << s.actualTableLog) { return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog) } for i, v := range s.count[s.symbolLen:] { if v != 0 { return fmt.Errorf("warning: Found symbol out of range, %d after cut", i) } } return nil }
go
{ "resource": "" }
q8385
Read
train
func (d *Decoder) Read(p []byte) (int, error) { if d.stream == nil { return 0, errors.New("no input has been initialized") } var n int for { if len(d.current.b) > 0 { filled := copy(p, d.current.b) p = p[filled:] d.current.b = d.current.b[filled:] n += filled } if len(p) == 0 { break } if len(d.current.b) == 0 { // We have an error and no more data if d.current.err != nil { break } d.nextBlock() } } if len(d.current.b) > 0 { // Only return error at end of block return n, nil } if d.current.err != nil { d.drainOutput() } if debug { println("returning", n, d.current.err, len(d.decoders)) } return n, d.current.err }
go
{ "resource": "" }
q8386
Reset
train
func (d *Decoder) Reset(r io.Reader) error { if d.current.err == ErrDecoderClosed { return d.current.err } if r == nil { return errors.New("nil Reader sent as input") } // TODO: If r is a *bytes.Buffer, we could automatically switch to sync operation. if d.stream == nil { d.stream = make(chan decodeStream, 1) go d.startStreamDecoder(d.stream) } d.drainOutput() // Remove current block. d.current.decodeOutput = decodeOutput{} d.current.err = nil d.current.cancel = make(chan struct{}) d.current.flushed = false d.current.d = nil d.stream <- decodeStream{ r: r, output: d.current.output, cancel: d.current.cancel, } return nil }
go
{ "resource": "" }
q8387
drainOutput
train
func (d *Decoder) drainOutput() { if d.current.cancel != nil { println("cancelling current") close(d.current.cancel) d.current.cancel = nil } if d.current.d != nil { println("re-adding current decoder", d.current.d, len(d.decoders)) d.decoders <- d.current.d d.current.d = nil d.current.b = nil } if d.current.output == nil || d.current.flushed { println("current already flushed") return } for { select { case v := <-d.current.output: if v.d != nil { println("got decoder", v.d) d.decoders <- v.d } if v.err == errEndOfStream { println("current flushed") d.current.flushed = true return } } } }
go
{ "resource": "" }
q8388
WriteTo
train
func (d *Decoder) WriteTo(w io.Writer) (int64, error) { if d.stream == nil { return 0, errors.New("no input has been initialized") } var n int64 for d.current.err == nil { if len(d.current.b) > 0 { n2, err2 := w.Write(d.current.b) n += int64(n2) if err2 != nil && d.current.err == nil { d.current.err = err2 break } } d.nextBlock() } err := d.current.err if err != nil { d.drainOutput() } if err == io.EOF { err = nil } return n, err }
go
{ "resource": "" }
q8389
DecodeAll
train
func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if d.current.err == ErrDecoderClosed { return dst, ErrDecoderClosed } //println(len(d.frames), len(d.decoders), d.current) block, frame := <-d.decoders, <-d.frames defer func() { d.decoders <- block frame.rawInput = nil d.frames <- frame }() if cap(dst) == 0 { // Allocate 1MB by default. dst = make([]byte, 0, 1<<20) } br := byteBuf(input) for { err := frame.reset(&br) if err == io.EOF { return dst, nil } if err != nil { return dst, err } if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { return dst, ErrDecoderSizeExceeded } if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { // Never preallocate moe than 1 GB up front. if uint64(cap(dst)) < frame.FrameContentSize { dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) copy(dst2, dst) dst = dst2 } } dst, err = frame.runDecoder(dst, block) if err != nil { return dst, err } if len(br) == 0 { break } } return dst, nil }
go
{ "resource": "" }
q8390
nextBlock
train
func (d *Decoder) nextBlock() { if d.current.d != nil { d.decoders <- d.current.d d.current.d = nil } if d.current.err != nil { // Keep error state. return } d.current.decodeOutput = <-d.current.output if debug { println("got", len(d.current.b), "bytes, error:", d.current.err) } }
go
{ "resource": "" }
q8391
Close
train
func (d *Decoder) Close() { if d.current.err == ErrDecoderClosed { return } d.drainOutput() if d.stream != nil { close(d.stream) d.streamWg.Wait() d.stream = nil } if d.decoders != nil { close(d.decoders) for dec := range d.decoders { dec.Close() } d.decoders = nil } if d.current.d != nil { d.current.d.Close() d.current.d = nil } d.current.err = ErrDecoderClosed }
go
{ "resource": "" }
q8392
BeginConditionalRender
train
func BeginConditionalRender(id uint32, mode uint32) { C.glowBeginConditionalRender(gpBeginConditionalRender, (C.GLuint)(id), (C.GLenum)(mode)) }
go
{ "resource": "" }
q8393
BindFragDataLocation
train
func BindFragDataLocation(program uint32, color uint32, name *uint8) { C.glowBindFragDataLocation(gpBindFragDataLocation, (C.GLuint)(program), (C.GLuint)(color), (*C.GLchar)(unsafe.Pointer(name))) }
go
{ "resource": "" }
q8394
ClampColor
train
func ClampColor(target uint32, clamp uint32) { C.glowClampColor(gpClampColor, (C.GLenum)(target), (C.GLenum)(clamp)) }
go
{ "resource": "" }
q8395
ColorTable
train
func ColorTable(target uint32, internalformat uint32, width int32, format uint32, xtype uint32, table unsafe.Pointer) { C.glowColorTable(gpColorTable, (C.GLenum)(target), (C.GLenum)(internalformat), (C.GLsizei)(width), (C.GLenum)(format), (C.GLenum)(xtype), table) }
go
{ "resource": "" }
q8396
ConvolutionFilter1D
train
func ConvolutionFilter1D(target uint32, internalformat uint32, width int32, format uint32, xtype uint32, image unsafe.Pointer) { C.glowConvolutionFilter1D(gpConvolutionFilter1D, (C.GLenum)(target), (C.GLenum)(internalformat), (C.GLsizei)(width), (C.GLenum)(format), (C.GLenum)(xtype), image) }
go
{ "resource": "" }
q8397
ConvolutionFilter2D
train
func ConvolutionFilter2D(target uint32, internalformat uint32, width int32, height int32, format uint32, xtype uint32, image unsafe.Pointer) { C.glowConvolutionFilter2D(gpConvolutionFilter2D, (C.GLenum)(target), (C.GLenum)(internalformat), (C.GLsizei)(width), (C.GLsizei)(height), (C.GLenum)(format), (C.GLenum)(xtype), image) }
go
{ "resource": "" }
q8398
CopyColorTable
train
func CopyColorTable(target uint32, internalformat uint32, x int32, y int32, width int32) { C.glowCopyColorTable(gpCopyColorTable, (C.GLenum)(target), (C.GLenum)(internalformat), (C.GLint)(x), (C.GLint)(y), (C.GLsizei)(width)) }
go
{ "resource": "" }
q8399
CopyConvolutionFilter1D
train
func CopyConvolutionFilter1D(target uint32, internalformat uint32, x int32, y int32, width int32) { C.glowCopyConvolutionFilter1D(gpCopyConvolutionFilter1D, (C.GLenum)(target), (C.GLenum)(internalformat), (C.GLint)(x), (C.GLint)(y), (C.GLsizei)(width)) }
go
{ "resource": "" }