_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q9600
IsHTTPError
train
func IsHTTPError(err error) bool { cause := errors.Cause(err) if nErr, ok := cause.(HTTPError); ok { return nErr.HTTPError() } return false }
go
{ "resource": "" }
q9601
IsNetworkError
train
func IsNetworkError(err error) bool { cause := errors.Cause(err) if nErr, ok := cause.(NetworkError); ok { return nErr.NetworkError() } return false }
go
{ "resource": "" }
q9602
IsServiceNotFoundError
train
func IsServiceNotFoundError(err error) bool { cause := errors.Cause(err) if nErr, ok := cause.(ServiceNotFoundError); ok { return nErr.ServiceNotFoundError() } return false }
go
{ "resource": "" }
q9603
IsTimeoutError
train
func IsTimeoutError(err error) bool { switch errType := errors.Cause(err).(type) { case TimeoutError: return errType.Timeout() default: return false } }
go
{ "resource": "" }
q9604
IsAuthenticationError
train
func IsAuthenticationError(err error) bool { cause := errors.Cause(err) if kvErr, ok := cause.(KeyValueError); ok && kvErr.KVError() { return kvErr.StatusCode() == int(gocbcore.StatusAuthError) } return false }
go
{ "resource": "" }
q9605
IsBucketMissingError
train
func IsBucketMissingError(err error) bool { cause := errors.Cause(err) if kvErr, ok := cause.(KeyValueError); ok && kvErr.KVError() { return kvErr.StatusCode() == int(gocbcore.StatusNoBucket) } return false }
go
{ "resource": "" }
q9606
IsAccessError
train
func IsAccessError(err error) bool { cause := errors.Cause(err) if kvErr, ok := cause.(KeyValueError); ok && kvErr.KVError() { return kvErr.StatusCode() == int(gocbcore.StatusAccessError) } return false }
go
{ "resource": "" }
q9607
IsConfigurationError
train
func IsConfigurationError(err error) bool { switch errType := errors.Cause(err).(type) { case ConfigurationError: return errType.ConfigurationError() default: return false } }
go
{ "resource": "" }
q9608
IsSubdocPathNotFoundError
train
func IsSubdocPathNotFoundError(err error) bool { cause := errors.Cause(err) if kvErr, ok := cause.(KeyValueError); ok && kvErr.KVError() { return kvErr.StatusCode() == int(gocbcore.StatusSubDocPathNotFound) } return false }
go
{ "resource": "" }
q9609
IsDurabilityError
train
func IsDurabilityError(err error) bool { switch errType := errors.Cause(err).(type) { case DurabilityError: return errType.DurabilityError() default: return false } }
go
{ "resource": "" }
q9610
IsNoResultsError
train
func IsNoResultsError(err error) bool { switch errType := errors.Cause(err).(type) { case NoResultsError: return errType.NoResultsError() default: return false } }
go
{ "resource": "" }
q9611
NextBytes
train
func (r *AnalyticsResults) NextBytes() []byte { if r.streamResult.Closed() { return nil } raw, err := r.streamResult.NextBytes() if err != nil { r.err = err return nil } return raw }
go
{ "resource": "" }
q9612
AnalyticsQuery
train
func (c *Cluster) AnalyticsQuery(statement string, opts *AnalyticsQueryOptions) (*AnalyticsResults, error) { if opts == nil { opts = &AnalyticsQueryOptions{} } ctx := opts.Context if ctx == nil { ctx = context.Background() } var span opentracing.Span if opts.ParentSpanContext == nil { span = opentracing.GlobalTracer().StartSpan("ExecuteAnalyticsQuery", opentracing.Tag{Key: "couchbase.service", Value: "cbas"}) } else { span = opentracing.GlobalTracer().StartSpan("ExecuteAnalyticsQuery", opentracing.Tag{Key: "couchbase.service", Value: "cbas"}, opentracing.ChildOf(opts.ParentSpanContext)) } defer span.Finish() provider, err := c.getHTTPProvider() if err != nil { return nil, err } return c.analyticsQuery(ctx, span.Context(), statement, opts, provider) }
go
{ "resource": "" }
q9613
ListenAndServe
train
func ListenAndServe(addr string, h http.Handler) error { if h == nil { h = http.DefaultServeMux } lambda.Start(func(ctx context.Context, e events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) { r, err := NewRequest(ctx, e) if err != nil { return events.APIGatewayProxyResponse{}, err } w := NewResponse() h.ServeHTTP(w, r) return w.End(), nil }) return nil }
go
{ "resource": "" }
q9614
newContext
train
func newContext(ctx context.Context, e events.APIGatewayProxyRequest) context.Context { return context.WithValue(ctx, requestContextKey, e.RequestContext) }
go
{ "resource": "" }
q9615
RequestContext
train
func RequestContext(ctx context.Context) (events.APIGatewayProxyRequestContext, bool) { c, ok := ctx.Value(requestContextKey).(events.APIGatewayProxyRequestContext) return c, ok }
go
{ "resource": "" }
q9616
Header
train
func (w *ResponseWriter) Header() http.Header { if w.header == nil { w.header = make(http.Header) } return w.header }
go
{ "resource": "" }
q9617
Write
train
func (w *ResponseWriter) Write(b []byte) (int, error) { if !w.wroteHeader { w.WriteHeader(http.StatusOK) } return w.buf.Write(b) }
go
{ "resource": "" }
q9618
WriteHeader
train
func (w *ResponseWriter) WriteHeader(status int) { if w.wroteHeader { return } if w.Header().Get("Content-Type") == "" { w.Header().Set("Content-Type", "text/plain; charset=utf8") } w.out.StatusCode = status h := make(map[string]string) for k, v := range w.Header() { if len(v) > 0 { h[k] = v[len(v)-1] } } w.out.Headers = h w.wroteHeader = true }
go
{ "resource": "" }
q9619
End
train
func (w *ResponseWriter) End() events.APIGatewayProxyResponse { w.out.IsBase64Encoded = isBinary(w.header) if w.out.IsBase64Encoded { w.out.Body = base64.StdEncoding.EncodeToString(w.buf.Bytes()) } else { w.out.Body = w.buf.String() } // notify end w.closeNotifyCh <- true return w.out }
go
{ "resource": "" }
q9620
isBinary
train
func isBinary(h http.Header) bool { switch { case !isTextMime(h.Get("Content-Type")): return true case h.Get("Content-Encoding") == "gzip": return true default: return false } }
go
{ "resource": "" }
q9621
isTextMime
train
func isTextMime(kind string) bool { mt, _, err := mime.ParseMediaType(kind) if err != nil { return false } if strings.HasPrefix(mt, "text/") { return true } switch mt { case "image/svg+xml": return true case "application/json": return true case "application/xml": return true default: return false } }
go
{ "resource": "" }
q9622
NewRequest
train
func NewRequest(ctx context.Context, e events.APIGatewayProxyRequest) (*http.Request, error) { // path u, err := url.Parse(e.Path) if err != nil { return nil, errors.Wrap(err, "parsing path") } // querystring q := u.Query() for k, v := range e.QueryStringParameters { q.Set(k, v) } u.RawQuery = q.Encode() // base64 encoded body body := e.Body if e.IsBase64Encoded { b, err := base64.StdEncoding.DecodeString(body) if err != nil { return nil, errors.Wrap(err, "decoding base64 body") } body = string(b) } // new request req, err := http.NewRequest(e.HTTPMethod, u.String(), strings.NewReader(body)) if err != nil { return nil, errors.Wrap(err, "creating request") } // remote addr req.RemoteAddr = e.RequestContext.Identity.SourceIP // header fields for k, v := range e.Headers { req.Header.Set(k, v) } // content-length if req.Header.Get("Content-Length") == "" && body != "" { req.Header.Set("Content-Length", strconv.Itoa(len(body))) } // custom fields req.Header.Set("X-Request-Id", e.RequestContext.RequestID) req.Header.Set("X-Stage", e.RequestContext.Stage) // custom context values req = req.WithContext(newContext(ctx, e)) // xray support if traceID := ctx.Value("x-amzn-trace-id"); traceID != nil { req.Header.Set("X-Amzn-Trace-Id", fmt.Sprintf("%v", traceID)) } // host req.URL.Host = req.Header.Get("Host") req.Host = req.URL.Host return req, nil }
go
{ "resource": "" }
q9623
BitsWritten
train
func (pw *Writer) BitsWritten() int64 { return 8*pw.Offset + 8*int64(pw.cntBuf) + int64(pw.numBits) }
go
{ "resource": "" }
q9624
WritePads
train
func (pw *Writer) WritePads(v uint) { nb := -pw.numBits & 7 pw.bufBits |= uint64(v) << pw.numBits pw.numBits += nb }
go
{ "resource": "" }
q9625
Write
train
func (pw *Writer) Write(buf []byte) (cnt int, err error) { if pw.numBits > 0 || pw.cntBuf > 0 { if pw.numBits%8 != 0 { return 0, errorf(errors.Invalid, "non-aligned bit buffer") } if _, err := pw.Flush(); err != nil { return 0, err } } cnt, err = pw.wr.Write(buf) pw.Offset += int64(cnt) return cnt, err }
go
{ "resource": "" }
q9626
TryWriteBits
train
func (pw *Writer) TryWriteBits(v, nb uint) bool { if 64-pw.numBits < nb { return false } pw.bufBits |= uint64(v) << pw.numBits pw.numBits += nb return true }
go
{ "resource": "" }
q9627
WriteBits
train
func (pw *Writer) WriteBits(v, nb uint) { if _, err := pw.PushBits(); err != nil { errors.Panic(err) } pw.bufBits |= uint64(v) << pw.numBits pw.numBits += nb }
go
{ "resource": "" }
q9628
TryWriteSymbol
train
func (pw *Writer) TryWriteSymbol(sym uint, pe *Encoder) bool { chunk := pe.chunks[uint32(sym)&pe.chunkMask] nb := uint(chunk & countMask) if 64-pw.numBits < nb { return false } pw.bufBits |= uint64(chunk>>countBits) << pw.numBits pw.numBits += nb return true }
go
{ "resource": "" }
q9629
WriteSymbol
train
func (pw *Writer) WriteSymbol(sym uint, pe *Encoder) { if _, err := pw.PushBits(); err != nil { errors.Panic(err) } chunk := pe.chunks[uint32(sym)&pe.chunkMask] nb := uint(chunk & countMask) pw.bufBits |= uint64(chunk>>countBits) << pw.numBits pw.numBits += nb }
go
{ "resource": "" }
q9630
Flush
train
func (pw *Writer) Flush() (int64, error) { if pw.numBits < 8 && pw.cntBuf == 0 { return pw.Offset, nil } if _, err := pw.PushBits(); err != nil { return pw.Offset, err } cnt, err := pw.wr.Write(pw.buf[:pw.cntBuf]) pw.cntBuf -= cnt pw.Offset += int64(cnt) return pw.Offset, err }
go
{ "resource": "" }
q9631
PushBits
train
func (pw *Writer) PushBits() (uint, error) { if pw.cntBuf >= len(pw.buf)-8 { cnt, err := pw.wr.Write(pw.buf[:pw.cntBuf]) pw.cntBuf -= cnt pw.Offset += int64(cnt) if err != nil { return 0, err } } u := pw.bufBits if pw.bigEndian { // Swap all the bits within each byte. u = (u&0xaaaaaaaaaaaaaaaa)>>1 | (u&0x5555555555555555)<<1 u = (u&0xcccccccccccccccc)>>2 | (u&0x3333333333333333)<<2 u = (u&0xf0f0f0f0f0f0f0f0)>>4 | (u&0x0f0f0f0f0f0f0f0f)<<4 } // Starting with Go 1.7, the compiler should use a wide integer // store here if the architecture supports it. binary.LittleEndian.PutUint64(pw.buf[pw.cntBuf:], u) nb := pw.numBits / 8 // Number of bytes to copy from bit buffer pw.cntBuf += int(nb) pw.bufBits >>= 8 * nb pw.numBits -= 8 * nb return 8 * nb, nil }
go
{ "resource": "" }
q9632
getFiles
train
func getFiles(paths []string, globs []string) []file { var fs []file for _, p := range paths { for _, g := range globs { ms, _ := filepath.Glob(filepath.Join(p, g)) for _, m := range ms { r, err1 := filepath.Rel(p, m) fi, err2 := os.Stat(m) if err1 == nil && err2 == nil && !fi.IsDir() { fs = append(fs, file{Abs: m, Rel: r}) } } } } return fs }
go
{ "resource": "" }
q9633
errWrap
train
func errWrap(err error, replaceCode int) error { if cerr, ok := err.(errors.Error); ok { if errors.IsInvalid(cerr) { cerr.Code = replaceCode } err = errorf(cerr.Code, "%s", cerr.Msg) } return err }
go
{ "resource": "" }
q9634
reverseUint32
train
func reverseUint32(v uint32) (x uint32) { x |= uint32(reverseLUT[byte(v>>0)]) << 24 x |= uint32(reverseLUT[byte(v>>8)]) << 16 x |= uint32(reverseLUT[byte(v>>16)]) << 8 x |= uint32(reverseLUT[byte(v>>24)]) << 0 return x }
go
{ "resource": "" }
q9635
NewReader
train
func NewReader(rd io.Reader) *Reader { mr := new(Reader) mr.Reset(rd) return mr }
go
{ "resource": "" }
q9636
Reset
train
func (mr *Reader) Reset(rd io.Reader) { *mr = Reader{ br: mr.br, bw: mr.bw, bb: mr.bb, } if br, ok := rd.(*prefix.Reader); ok { // Use input Reader directly as a prefix.Reader. mr.rd = br } else { // Use pre-allocated prefix.Reader to wrap input Reader. mr.rd = &mr.br mr.rd.Init(rd, false) } return }
go
{ "resource": "" }
q9637
Read
train
func (mr *Reader) Read(buf []byte) (int, error) { if mr.err != nil { return 0, mr.err } var rdCnt int for len(buf) > 0 { if len(mr.buf) > 0 { cpCnt := copy(buf, mr.buf) mr.buf = mr.buf[cpCnt:] rdCnt += cpCnt break } if mr.final != FinalNil { mr.FinalMode = mr.final mr.err = io.EOF break } mr.err = mr.decodeBlock() if mr.err != nil { break } } mr.OutputOffset += int64(rdCnt) return rdCnt, mr.err }
go
{ "resource": "" }
q9638
Close
train
func (mr *Reader) Close() error { if mr.err == errClosed { return nil } if mr.err != nil && mr.err != io.EOF { return mr.err } mr.FinalMode = mr.final mr.err = errClosed mr.rd = nil // Release reference to underlying Reader return nil }
go
{ "resource": "" }
q9639
initContextLUTs
train
func initContextLUTs() { for i := 0; i < 256; i++ { for m := 0; m < numContextModes; m++ { base := m << 8 // Operations performed here are specified in RFC section 7.1. switch m { case contextLSB6: contextP1LUT[base+i] = byte(i) & 0x3f contextP2LUT[base+i] = 0 case contextMSB6: contextP1LUT[base+i] = byte(i) >> 2 contextP2LUT[base+i] = 0 case contextUTF8: contextP1LUT[base+i] = contextLUT0[byte(i)] contextP2LUT[base+i] = contextLUT1[byte(i)] case contextSigned: contextP1LUT[base+i] = contextLUT2[byte(i)] << 3 contextP2LUT[base+i] = contextLUT2[byte(i)] default: panic("unknown context mode") } } } }
go
{ "resource": "" }
q9640
getLitContextID
train
func getLitContextID(p1, p2 byte, mode uint8) uint8 { base := uint(mode) << 8 return contextP1LUT[base+uint(p1)] | contextP2LUT[base+uint(p2)] }
go
{ "resource": "" }
q9641
HistSize
train
func (dd *dictDecoder) HistSize() int { if dd.full { return dd.size } return dd.wrPos }
go
{ "resource": "" }
q9642
ReadFlush
train
func (dd *dictDecoder) ReadFlush() []byte { toRead := dd.hist[dd.rdPos:dd.wrPos] dd.rdPos = dd.wrPos if dd.wrPos == len(dd.hist) { if len(dd.hist) == dd.size { dd.wrPos, dd.rdPos = 0, 0 dd.full = true } else { // Allocate a larger history buffer. size := cap(dd.hist) * growFactor if size > dd.size { size = dd.size } hist := make([]byte, size) copy(hist, dd.hist) dd.hist = hist } } return toRead }
go
{ "resource": "" }
q9643
LastBytes
train
func (dd *dictDecoder) LastBytes() (p1, p2 byte) { if dd.wrPos > 1 { return dd.hist[dd.wrPos-1], dd.hist[dd.wrPos-2] } else if dd.wrPos > 0 { return dd.hist[dd.wrPos-1], dd.hist[len(dd.hist)-1] } else { return dd.hist[len(dd.hist)-1], dd.hist[len(dd.hist)-2] } }
go
{ "resource": "" }
q9644
setDefaults
train
func setDefaults() { formats = defaultFormats() tests = defaultTests() codecs = defaultCodecs() paths = defaultPaths() globs = []string{"*.txt", "*.bin"} levels = []int{1, 6, 9} sizes = []int{1e4, 1e5, 1e6} }
go
{ "resource": "" }
q9645
intName
train
func intName(n int64) string { switch n { case 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12: s := fmt.Sprintf("%e", float64(n)) re := regexp.MustCompile("\\.0*e\\+0*") return re.ReplaceAllString(s, "e") default: s := unitconv.FormatPrefix(float64(n), unitconv.Base1024, 2) return strings.Replace(s, ".00", "", -1) } }
go
{ "resource": "" }
q9646
BitsRead
train
func (pr *Reader) BitsRead() int64 { offset := 8*pr.Offset - int64(pr.numBits) if pr.bufRd != nil { discardBits := pr.discardBits + int(pr.fedBits-pr.numBits) offset = 8*pr.Offset + int64(discardBits) } return offset }
go
{ "resource": "" }
q9647
Read
train
func (pr *Reader) Read(buf []byte) (cnt int, err error) { if pr.numBits > 0 { if pr.numBits%8 != 0 { return 0, errorf(errors.Invalid, "non-aligned bit buffer") } for cnt = 0; len(buf) > cnt && pr.numBits > 0; cnt++ { if pr.bigEndian { buf[cnt] = internal.ReverseLUT[byte(pr.bufBits)] } else { buf[cnt] = byte(pr.bufBits) } pr.bufBits >>= 8 pr.numBits -= 8 } return cnt, nil } if _, err := pr.Flush(); err != nil { return 0, err } cnt, err = pr.rd.Read(buf) pr.Offset += int64(cnt) return cnt, err }
go
{ "resource": "" }
q9648
ReadOffset
train
func (pr *Reader) ReadOffset(pd *Decoder, rcs RangeCodes) uint { rc := rcs[pr.ReadSymbol(pd)] return uint(rc.Base) + pr.ReadBits(uint(rc.Len)) }
go
{ "resource": "" }
q9649
ReadBits
train
func (pr *Reader) ReadBits(nb uint) uint { if err := pr.PullBits(nb); err != nil { errors.Panic(err) } val := uint(pr.bufBits & uint64(1<<nb-1)) pr.bufBits >>= nb pr.numBits -= nb return val }
go
{ "resource": "" }
q9650
ReadSymbol
train
func (pr *Reader) ReadSymbol(pd *Decoder) uint { if len(pd.chunks) == 0 { panicf(errors.Invalid, "decode with empty prefix tree") } nb := uint(pd.MinBits) for { if err := pr.PullBits(nb); err != nil { errors.Panic(err) } chunk := pd.chunks[uint32(pr.bufBits)&pd.chunkMask] nb = uint(chunk & countMask) if nb > uint(pd.chunkBits) { linkIdx := chunk >> countBits chunk = pd.links[linkIdx][uint32(pr.bufBits>>pd.chunkBits)&pd.linkMask] nb = uint(chunk & countMask) } if nb <= pr.numBits { pr.bufBits >>= nb pr.numBits -= nb return uint(chunk >> countBits) } } }
go
{ "resource": "" }
q9651
Flush
train
func (pr *Reader) Flush() (int64, error) { if pr.bufRd == nil { return pr.Offset, nil } // Update the number of total bits to discard. pr.discardBits += int(pr.fedBits - pr.numBits) pr.fedBits = pr.numBits // Discard some bytes to update read offset. var err error nd := (pr.discardBits + 7) / 8 // Round up to nearest byte nd, err = pr.bufRd.Discard(nd) pr.discardBits -= nd * 8 // -7..0 pr.Offset += int64(nd) // These are invalid after Discard. pr.bufPeek = nil return pr.Offset, err }
go
{ "resource": "" }
q9652
transformUppercase
train
func transformUppercase(word []byte, once bool) { for i := 0; i < len(word); { c := word[i] if c < 192 { if c >= 97 && c <= 122 { word[i] ^= 32 } i += 1 } else if c < 224 { if i+1 < len(word) { word[i+1] ^= 32 } i += 2 } else { if i+2 < len(word) { word[i+2] ^= 5 } i += 3 } if once { return } } }
go
{ "resource": "" }
q9653
BenchmarkEncoder
train
func BenchmarkEncoder(input []byte, enc Encoder, lvl int) testing.BenchmarkResult { return testing.Benchmark(func(b *testing.B) { b.StopTimer() if enc == nil { b.Fatalf("unexpected error: nil Encoder") } runtime.GC() b.StartTimer() for i := 0; i < b.N; i++ { wr := enc(ioutil.Discard, lvl) _, err := io.Copy(wr, bytes.NewBuffer(input)) if err := wr.Close(); err != nil { b.Fatalf("unexpected error: %v", err) } if err != nil { b.Fatalf("unexpected error: %v", err) } b.SetBytes(int64(len(input))) } }) }
go
{ "resource": "" }
q9654
BenchmarkDecoder
train
func BenchmarkDecoder(input []byte, dec Decoder) testing.BenchmarkResult { return testing.Benchmark(func(b *testing.B) { b.StopTimer() if dec == nil { b.Fatalf("unexpected error: nil Decoder") } runtime.GC() b.StartTimer() for i := 0; i < b.N; i++ { rd := dec(bufio.NewReader(bytes.NewBuffer(input))) cnt, err := io.Copy(ioutil.Discard, rd) if err := rd.Close(); err != nil { b.Fatalf("unexpected error: %v", err) } if err != nil { b.Fatalf("unexpected error: %v", err) } b.SetBytes(cnt) } }) }
go
{ "resource": "" }
q9655
Init
train
func (pe *Encoder) Init(codes PrefixCodes) { // Handle special case trees. if len(codes) <= 1 { switch { case len(codes) == 0: // Empty tree (should error if used later) *pe = Encoder{chunks: pe.chunks[:0], NumSyms: 0} case len(codes) == 1 && codes[0].Len == 0: // Single code tree (bit-length of zero) pe.chunks = append(pe.chunks[:0], codes[0].Val<<countBits|0) *pe = Encoder{chunks: pe.chunks[:1], NumSyms: 1} default: panic("invalid codes") } return } if internal.Debug && !sort.IsSorted(prefixCodesBySymbol(codes)) { panic("input codes is not sorted") } if internal.Debug && !(codes.checkLengths() && codes.checkPrefixes()) { panic("detected incomplete or overlapping codes") } // Enough chunks to contain all the symbols. numChunks := 1 for n := len(codes) - 1; n > 0; n >>= 1 { numChunks <<= 1 } pe.NumSyms = uint32(len(codes)) retry: // Allocate and reset chunks. pe.chunks = allocUint32s(pe.chunks, numChunks) pe.chunkMask = uint32(numChunks - 1) for i := range pe.chunks { pe.chunks[i] = 0 // Logic below relies on zero value as uninitialized } // Insert each symbol, checking that there are no conflicts. for _, c := range codes { if pe.chunks[c.Sym&pe.chunkMask] > 0 { // Collision found our "hash" table, so grow and try again. numChunks <<= 1 goto retry } pe.chunks[c.Sym&pe.chunkMask] = c.Val<<countBits | c.Len } }
go
{ "resource": "" }
q9656
decodeMeta
train
func decodeMeta(data []byte) ([]byte, bool) { r := bytes.NewReader(data) mr := xflate.NewMetaReader(r) b, err := ioutil.ReadAll(mr) if err != nil { return nil, false } pos := int(r.Size()) - r.Len() decompressMeta(data[:pos]) return b, true }
go
{ "resource": "" }
q9657
decompressMeta
train
func decompressMeta(data []byte) { // Make a copy and append DEFLATE terminator block. data = append([]byte(nil), data...) data = append(data, []byte{0x01, 0x00, 0x00, 0xff, 0xff}...) r := bytes.NewReader(data) for r.Len() > 0 { zr := flate.NewReader(r) b, err := ioutil.ReadAll(zr) if err != nil { panic(err) } if len(b) > 0 { panic("non-zero meta-encoded block") } if err := zr.Close(); err != nil { panic(err) } } }
go
{ "resource": "" }
q9658
initDictLUTs
train
func initDictLUTs() { // Sweep from minDictLen to maxDictLen, inclusive. for i := minDictLen; i <= maxDictLen; i++ { dictSizes[i] = 1 << uint(dictBitSizes[i]) dictOffsets[i] = dictOffsets[i-1] + (i-1)*dictSizes[i-1] } }
go
{ "resource": "" }
q9659
MakeRangeCodes
train
func MakeRangeCodes(minBase uint, bits []uint) (rc RangeCodes) { for _, nb := range bits { rc = append(rc, RangeCode{Base: uint32(minBase), Len: uint32(nb)}) minBase += 1 << nb } return rc }
go
{ "resource": "" }
q9660
handleDegenerateCodes
train
func handleDegenerateCodes(codes prefix.PrefixCodes, maxSyms uint) prefix.PrefixCodes { if len(codes) != 1 { return codes } return append(codes, prefix.PrefixCode{Sym: uint32(maxSyms), Len: 1}) }
go
{ "resource": "" }
q9661
ReverseUint32
train
func ReverseUint32(v uint32) (x uint32) { x |= uint32(ReverseLUT[byte(v>>0)]) << 24 x |= uint32(ReverseLUT[byte(v>>8)]) << 16 x |= uint32(ReverseLUT[byte(v>>16)]) << 8 x |= uint32(ReverseLUT[byte(v>>24)]) << 0 return x }
go
{ "resource": "" }
q9662
ReverseUint32N
train
func ReverseUint32N(v uint32, n uint) (x uint32) { return ReverseUint32(v << (32 - n)) }
go
{ "resource": "" }
q9663
ReverseUint64
train
func ReverseUint64(v uint64) (x uint64) { x |= uint64(ReverseLUT[byte(v>>0)]) << 56 x |= uint64(ReverseLUT[byte(v>>8)]) << 48 x |= uint64(ReverseLUT[byte(v>>16)]) << 40 x |= uint64(ReverseLUT[byte(v>>24)]) << 32 x |= uint64(ReverseLUT[byte(v>>32)]) << 24 x |= uint64(ReverseLUT[byte(v>>40)]) << 16 x |= uint64(ReverseLUT[byte(v>>48)]) << 8 x |= uint64(ReverseLUT[byte(v>>56)]) << 0 return x }
go
{ "resource": "" }
q9664
ReverseUint64N
train
func ReverseUint64N(v uint64, n uint) (x uint64) { return ReverseUint64(v << (64 - n)) }
go
{ "resource": "" }
q9665
ReverseSearch
train
func ReverseSearch(data []byte) int { var magic uint32 for i := len(data) - 1; i >= 0; i-- { magic = (magic << 8) | uint32(data[i]) if magic&magicMask == magicVals { return i } } return -1 }
go
{ "resource": "" }
q9666
numBits
train
func numBits(b byte) (zeros, ones int) { ones = int(oneBitsLUT[b]) zeros = 8 - ones return }
go
{ "resource": "" }
q9667
NewReader
train
func NewReader(rs io.ReadSeeker, conf *ReaderConfig) (*Reader, error) { xr := new(Reader) err := xr.Reset(rs) return xr, err }
go
{ "resource": "" }
q9668
Reset
train
func (xr *Reader) Reset(rs io.ReadSeeker) error { *xr = Reader{ rd: rs, mr: xr.mr, zr: xr.zr, idx: xr.idx, br: xr.br, bw: xr.bw, idxs: xr.idxs, chunks: xr.chunks, } if xr.zr == nil { xr.zr, _ = newFlateReader(nil) } xr.idx.Reset() // Read entire index. var backSize, footSize int64 if backSize, footSize, xr.err = xr.decodeFooter(); xr.err != nil { return xr.err } if xr.err = xr.decodeIndexes(backSize); xr.err != nil { return xr.err } if !xr.idx.AppendRecord(footSize, 0, footerType) { xr.err = errCorrupted return xr.err } // Setup initial chunk reader. _, xr.err = xr.Seek(0, io.SeekStart) return xr.err }
go
{ "resource": "" }
q9669
Read
train
func (xr *Reader) Read(buf []byte) (int, error) { if xr.err != nil { return 0, xr.err } // Discard some data to reach the expected raw offset. if xr.discard > 0 { var n int64 xr.lr = io.LimitedReader{R: xr.zr, N: xr.discard} n, xr.err = io.Copy(ioutil.Discard, &xr.lr) if xr.err != nil { return 0, xr.err } if n != xr.discard { xr.err = errCorrupted return 0, xr.err } xr.discard = 0 } var cnt int for cnt == 0 && xr.err == nil { cnt, xr.err = xr.zr.Read(buf) xr.offset += int64(cnt) if xr.err == io.EOF { xr.err = nil // Clear io.EOF temporarily // Verify that the compressed section ends with an empty raw block. if xr.chk.typ == deflateType && xr.cr.sync != 0x0000ffff { xr.err = errCorrupted break } // Verify that the compressed and raw sizes match. if xr.chk.typ != footerType { xr.chk.csize += int64(len(endBlock)) // Side of effect of using chunkReader } if xr.chk.csize != xr.zr.InputOffset || xr.chk.rsize != xr.zr.OutputOffset { xr.err = errCorrupted break } // Seek to next chunk. if _, xr.err = xr.Seek(xr.offset, io.SeekStart); xr.err != nil { break } if xr.chk.typ == unknownType { xr.err = io.EOF } } } return cnt, xr.err }
go
{ "resource": "" }
q9670
Seek
train
func (xr *Reader) Seek(offset int64, whence int) (int64, error) { if xr.err != nil && xr.err != io.EOF { return 0, xr.err } // Determine which position to seek to. var pos int64 end := xr.idx.LastRecord().RawOffset switch whence { case io.SeekStart: pos = offset case io.SeekCurrent: pos = xr.offset + offset case io.SeekEnd: pos = end + offset default: return 0, errorf(errors.Invalid, "invalid whence: %d", whence) } if pos < 0 { return 0, errorf(errors.Invalid, "negative position: %d", pos) } // As an optimization if the new position is within the current chunk, // then just adjust the discard value. discard := pos - xr.offset remain := xr.chk.rsize - xr.zr.OutputOffset if discard > 0 && remain > 0 && discard < remain { xr.offset, xr.discard = pos, discard return pos, nil } // Query the index for the chunk to start decoding from. // Attempt to use the subsequent record before resorting to binary search. prev, curr := xr.idx.GetRecords(xr.ri) if !(prev.RawOffset <= pos && pos <= curr.RawOffset) { xr.ri = xr.idx.Search(pos) prev, curr = xr.idx.GetRecords(xr.ri) } xr.ri++ if xr.ri > len(xr.idx.Records) { xr.ri = len(xr.idx.Records) } // Setup a chunk reader at the given position. xr.chk = chunk{ csize: curr.CompOffset - prev.CompOffset, rsize: curr.RawOffset - prev.RawOffset, typ: curr.Type, } xr.offset, xr.discard = pos, pos-prev.RawOffset if pos > end { // In case pos is really large, only discard data that actually exists. xr.discard = end - prev.RawOffset } _, xr.err = xr.rd.Seek(prev.CompOffset, io.SeekStart) xr.cr.Reset(xr.rd, xr.chk.csize) xr.zr.Reset(&xr.cr) return pos, xr.err }
go
{ "resource": "" }
q9671
Close
train
func (xr *Reader) Close() error { if xr.err == errClosed { return nil } if xr.err != nil && xr.err != io.EOF { return xr.err } xr.err = errClosed return nil }
go
{ "resource": "" }
q9672
decodeIndexes
train
func (xr *Reader) decodeIndexes(backSize int64) error { pos, err := xr.rd.Seek(0, io.SeekCurrent) if err != nil { return err } // Read all indexes. var compSize int64 xr.idxs = xr.idxs[:0] for { // Seek backwards past index and compressed blocks. newPos := pos - (backSize + compSize) if newPos < 0 || newPos > pos { return errCorrupted // Integer overflow for new seek position } if pos, err = xr.rd.Seek(newPos, io.SeekStart); err != nil { return err } if backSize == 0 { break } // Read the index. if cap(xr.idxs) > len(xr.idxs) { xr.idxs = xr.idxs[:len(xr.idxs)+1] } else { xr.idxs = append(xr.idxs, index{}) } idx := &xr.idxs[len(xr.idxs)-1] idx.Reset() idx.IndexSize = backSize if err = xr.decodeIndex(idx); err != nil { return err } backSize, compSize = idx.BackSize, idx.LastRecord().CompOffset } if pos != 0 { return errCorrupted } // Compact all indexes into one. for i := len(xr.idxs) - 1; i >= 0; i-- { idx := xr.idxs[i] if !xr.idx.AppendIndex(&idx) { return errCorrupted } if !xr.idx.AppendRecord(idx.IndexSize, 0, indexType) { return errCorrupted } } return nil }
go
{ "resource": "" }
q9673
decodeIndex
train
func (xr *Reader) decodeIndex(idx *index) error { // Helper function to read VLIs. var errVLI error readVLI := func() int64 { x, n := binary.Uvarint(xr.bw.Bytes()) if n <= 0 || x > math.MaxInt64 { errVLI = errCorrupted return 0 } xr.bw.Next(n) return int64(x) } // Read the index and restore the underlying reader offset. xr.br.Reset() xr.lr = io.LimitedReader{R: xr.rd, N: idx.IndexSize} n, err := io.Copy(&xr.br, &xr.lr) if err != nil { return err } if _, err := xr.rd.Seek(-n, io.SeekCurrent); err != nil { return err } // Parse the index. var crc uint32 xr.chunks = xr.chunks[:0] xr.bw.Reset() xr.mr.Reset(&xr.br) if _, err := io.Copy(&xr.bw, &xr.mr); err != nil { return errWrap(err) } if xr.bw.Len() > 4 { crc = crc32.ChecksumIEEE(xr.bw.Bytes()[:xr.bw.Len()-4]) } idx.BackSize = readVLI() numRecs := readVLI() totalCompSize := readVLI() totalRawSize := readVLI() if errVLI != nil { return errVLI } for i := int64(0); i < numRecs; i++ { xr.chunks = append(xr.chunks, chunk{readVLI(), readVLI(), 0}) } if xr.bw.Len() != 4 || binary.LittleEndian.Uint32(xr.bw.Bytes()) != crc { return errCorrupted } if xr.mr.FinalMode != meta.FinalMeta { return errCorrupted } if xr.mr.InputOffset != idx.IndexSize { return errCorrupted } // Convert individual index sizes to be absolute offsets. for _, chk := range xr.chunks { if chk.csize <= 4 { return errCorrupted // Every chunk has a sync marker } if !idx.AppendRecord(chk.csize, chk.rsize, deflateType) { return errCorrupted } } lastRec := idx.LastRecord() if lastRec.CompOffset != totalCompSize || lastRec.RawOffset != totalRawSize { return errCorrupted } return nil }
go
{ "resource": "" }
q9674
decodeFooter
train
func (xr *Reader) decodeFooter() (backSize, footSize int64, err error) { // Read the last few bytes of the stream. end, err := xr.rd.Seek(0, io.SeekEnd) if err != nil { return 0, 0, err } if end > meta.MaxEncBytes { end = meta.MaxEncBytes } if _, err := xr.rd.Seek(-end, io.SeekEnd); err != nil { return 0, 0, err } xr.br.Reset() if _, err := io.Copy(&xr.br, xr.rd); err != nil { return 0, 0, err } // Search for and read the meta block. idx := meta.ReverseSearch(xr.br.Bytes()) if idx < 0 { return 0, 0, errCorrupted } xr.br.Next(idx) // Skip data until magic marker xr.bw.Reset() xr.mr.Reset(&xr.br) if _, err := io.Copy(&xr.bw, &xr.mr); err != nil { return 0, 0, errWrap(err) } if xr.br.Len() != 0 || xr.mr.NumBlocks != 1 { return 0, 0, errCorrupted } if xr.mr.FinalMode != meta.FinalStream { return 0, 0, errCorrupted } if _, err := xr.rd.Seek(-xr.mr.InputOffset, io.SeekCurrent); err != nil { return 0, 0, err } // Parse the footer. bufRaw := xr.bw.Bytes() if len(bufRaw) < 3 || !bytes.Equal(bufRaw[:3], magic[:]) { return 0, 0, errCorrupted // Magic value mismatch } backSizeU64, cnt := binary.Uvarint(bufRaw[3:]) if cnt <= 0 { return 0, 0, errCorrupted // Integer overflow for VLI } if len(bufRaw[3+cnt:]) > 0 { return 0, 0, errCorrupted // Trailing unread bytes } return int64(backSizeU64), xr.mr.InputOffset, nil }
go
{ "resource": "" }
q9675
AppendRecord
train
func (idx *index) AppendRecord(compSize, rawSize int64, typ int) bool { if rawSize < 0 || compSize < 0 { return false // Invalid size } lastRec := idx.LastRecord() rec := record{ CompOffset: lastRec.CompOffset + compSize, RawOffset: lastRec.RawOffset + rawSize, Type: typ, } if rec.CompOffset < lastRec.CompOffset || rec.RawOffset < lastRec.RawOffset { return false // Overflow detected } idx.Records = append(idx.Records, rec) return true }
go
{ "resource": "" }
q9676
AppendIndex
train
func (idx *index) AppendIndex(other *index) bool { var preRec record for i, rec := range other.Records { csize, rsize := rec.CompOffset-preRec.CompOffset, rec.RawOffset-preRec.RawOffset if !idx.AppendRecord(csize, rsize, rec.Type) { idx.Records = idx.Records[:len(idx.Records)-i] // Ensure atomic append return false } preRec = rec } return true }
go
{ "resource": "" }
q9677
GetRecords
train
func (idx *index) GetRecords(i int) (prev, curr record) { recs := idx.Records if i > len(recs) { i = len(recs) } if i-1 >= 0 && i-1 < len(recs) { prev = recs[i-1] } if i >= 0 && i < len(recs) { curr = recs[i] } else { curr = prev curr.Type = unknownType } return prev, curr }
go
{ "resource": "" }
q9678
LastRecord
train
func (idx *index) LastRecord() record { var rec record if len(idx.Records) > 0 { rec = idx.Records[len(idx.Records)-1] } return rec }
go
{ "resource": "" }
q9679
update
train
func (r *stringReader) update() { pos, _ := r.Seek(0, io.SeekCurrent) if off := pos - r.pos; off >= 0 && off < int64(len(r.buf)) { r.buf, r.pos = r.buf[off:], pos } else { r.buf, r.pos = nil, pos } }
go
{ "resource": "" }
q9680
computeRecords
train
func computeRecords(r io.Reader, lvl, chnkSize int) (strmRec indexRecord, chnkRecs []indexRecord) { var cw1, cw2 countWriter zw1, _ := flate.NewWriter(&cw1, lvl) // Streamed compressor zw2, _ := flate.NewWriter(&cw2, lvl) // Chunked compressor buf := make([]byte, chnkSize) for { // Read a full chunks worth of data. cnt, err := io.ReadFull(r, buf) strmRec.rawSize += int64(cnt) if err == io.EOF { break } // Write chunk to both compressors. if _, err := zw1.Write(buf[:cnt]); err != nil { log.Fatal(err) } if _, err := zw2.Write(buf[:cnt]); err != nil { log.Fatal(err) } // Flush the chunked compressor, append the record, and reset. if err := zw2.Flush(); err != nil { log.Fatal(err) } chnkRecs = append(chnkRecs, indexRecord{rawSize: int64(cnt), compSize: int64(cw2)}) cw2 = 0 zw2.Reset(&cw2) if err == io.ErrUnexpectedEOF { break } if err != nil { log.Fatal(err) } } // Flush the streamed compressor and record the compressed size. if err := zw1.Flush(); err != nil { log.Fatal(err) } strmRec.compSize = int64(cw1) return strmRec, chnkRecs }
go
{ "resource": "" }
q9681
compress
train
func compress(b []byte, lvl int) []byte { var buf bytes.Buffer w, err := flate.NewWriter(&buf, lvl) if err != nil { log.Fatal(err) } if _, err := io.Copy(w, bytes.NewReader(b)); err != nil { log.Fatal(err) } if err := w.Close(); err != nil { log.Fatal(err) } return buf.Bytes() }
go
{ "resource": "" }
q9682
encode
train
func encode(b []byte) []byte { var buf bytes.Buffer mw := meta.NewWriter(&buf) mw.FinalMode = meta.FinalMeta if _, err := io.Copy(mw, bytes.NewReader(b)); err != nil { log.Fatal(err) } if err := mw.Close(); err != nil { log.Fatal(err) } return buf.Bytes() }
go
{ "resource": "" }
q9683
NewWriter
train
func NewWriter(wr io.Writer, conf *WriterConfig) (*Writer, error) { var lvl int var nchk, nidx int64 if conf != nil { lvl = conf.Level switch { case conf.ChunkSize < 0: return nil, errorf(errors.Invalid, "invalid chunk size: %d", conf.ChunkSize) case conf.ChunkSize > 0: nchk = conf.ChunkSize } switch { case conf.IndexSize < 0: nidx = -1 case conf.IndexSize > 0: nidx = conf.IndexSize } } zw, err := newFlateWriter(wr, lvl) if err != nil { return nil, err } xw := &Writer{wr: wr, zw: zw, nchk: nchk, nidx: nidx} xw.Reset(wr) return xw, nil }
go
{ "resource": "" }
q9684
Reset
train
func (xw *Writer) Reset(wr io.Writer) error { *xw = Writer{ wr: wr, mw: xw.mw, zw: xw.zw, nchk: xw.nchk, nidx: xw.nidx, idx: xw.idx, } if xw.zw == nil { xw.zw, _ = newFlateWriter(wr, DefaultCompression) } else { xw.zw.Reset(wr) } if xw.nchk == 0 { xw.nchk = DefaultChunkSize } if xw.nidx == 0 { xw.nidx = DefaultIndexSize } xw.idx.Reset() return nil }
go
{ "resource": "" }
q9685
Write
train
func (xw *Writer) Write(buf []byte) (int, error) { if xw.err != nil { return 0, xw.err } var n, cnt int for len(buf) > 0 && xw.err == nil { // Flush chunk if necessary. remain := xw.nchk - xw.zw.InputOffset if remain <= 0 { xw.err = xw.Flush(FlushFull) continue } if remain > int64(len(buf)) { remain = int64(len(buf)) } // Compress data for current chunk. offset := xw.zw.OutputOffset n, xw.err = xw.zw.Write(buf[:remain]) xw.OutputOffset += xw.zw.OutputOffset - offset buf = buf[n:] cnt += n } xw.InputOffset += int64(cnt) return cnt, xw.err }
go
{ "resource": "" }
q9686
Flush
train
func (xw *Writer) Flush(mode FlushMode) error { if xw.err != nil { return xw.err } switch mode { case FlushSync: offset := xw.zw.OutputOffset xw.err = xw.zw.Flush() xw.OutputOffset += xw.zw.OutputOffset - offset return xw.err case FlushFull: if xw.err = xw.Flush(FlushSync); xw.err != nil { return xw.err } xw.idx.AppendRecord(xw.zw.OutputOffset, xw.zw.InputOffset, deflateType) xw.zw.Reset(xw.wr) if int64(len(xw.idx.Records)) == xw.nidx { xw.err = xw.Flush(FlushIndex) } return xw.err case FlushIndex: if xw.zw.InputOffset+xw.zw.OutputOffset > 0 { if err := xw.Flush(FlushFull); err != nil { return err } } xw.err = xw.encodeIndex(&xw.idx) backSize := xw.idx.IndexSize xw.idx.Reset() xw.idx.BackSize = backSize return xw.err default: return errorf(errors.Invalid, "invalid flush mode: %d", mode) } }
go
{ "resource": "" }
q9687
Close
train
func (xw *Writer) Close() error { if xw.err == errClosed { return nil } if xw.err != nil { return xw.err } // Flush final index. if xw.zw.OutputOffset+xw.zw.InputOffset > 0 || len(xw.idx.Records) > 0 { xw.err = xw.Flush(FlushIndex) if xw.err != nil { return xw.err } } // Encode the footer. err := xw.encodeFooter(xw.idx.BackSize) if err != nil { xw.err = err } else { xw.err = errClosed } return err }
go
{ "resource": "" }
q9688
encodeIndex
train
func (xw *Writer) encodeIndex(index *index) error { // Helper function to write VLIs. var crc uint32 var errVLI error writeVLI := func(x int64) { b := xw.scratch[:binary.PutUvarint(xw.scratch[:], uint64(x))] crc = crc32.Update(crc, crc32.MakeTable(crc32.IEEE), b) if _, err := xw.mw.Write(b); err != nil { errVLI = errWrap(err) } } // Write the index. xw.mw.Reset(xw.wr) defer func() { xw.OutputOffset += xw.mw.OutputOffset }() xw.mw.FinalMode = meta.FinalMeta writeVLI(index.BackSize) writeVLI(int64(len(index.Records))) writeVLI(index.LastRecord().CompOffset) writeVLI(index.LastRecord().RawOffset) var preRec record for _, rec := range index.Records { writeVLI(rec.CompOffset - preRec.CompOffset) writeVLI(rec.RawOffset - preRec.RawOffset) preRec = rec } if errVLI != nil { return errWrap(errVLI) } binary.LittleEndian.PutUint32(xw.scratch[:], crc) if _, err := xw.mw.Write(xw.scratch[:4]); err != nil { return errWrap(err) } if err := xw.mw.Close(); err != nil { return errWrap(err) } index.IndexSize = xw.mw.OutputOffset // Record the encoded size return nil }
go
{ "resource": "" }
q9689
encodeFooter
train
func (xw *Writer) encodeFooter(backSize int64) error { var n int n += copy(xw.scratch[n:], magic[:]) n += binary.PutUvarint(xw.scratch[n:], uint64(backSize)) xw.mw.Reset(xw.wr) defer func() { xw.OutputOffset += xw.mw.OutputOffset }() xw.mw.FinalMode = meta.FinalStream if _, err := xw.mw.Write(xw.scratch[:n]); err != nil { return errWrap(err) } if err := xw.mw.Close(); err != nil { return errWrap(err) } if xw.mw.NumBlocks != 1 { return errorf(errors.Internal, "footer was not a single block") } return nil }
go
{ "resource": "" }
q9690
NewWriter
train
func NewWriter(wr io.Writer) *Writer { mw := new(Writer) mw.Reset(wr) return mw }
go
{ "resource": "" }
q9691
Reset
train
func (mw *Writer) Reset(wr io.Writer) { *mw = Writer{ wr: wr, bw: mw.bw, bb: mw.bb, cnts: mw.cnts, } return }
go
{ "resource": "" }
q9692
Write
train
func (mw *Writer) Write(buf []byte) (int, error) { if mw.err != nil { return 0, mw.err } var wrCnt int for _, b := range buf { zeros, ones := numBits(b) // If possible, avoid flushing to maintain high efficiency. if ensured := mw.bufCnt < EnsureRawBytes; ensured { goto skipEncode } if huffLen, _ := mw.computeHuffLen(mw.buf0s+zeros, mw.buf1s+ones); huffLen > 0 { goto skipEncode } mw.err = mw.encodeBlock(FinalNil) if mw.err != nil { break } skipEncode: mw.buf0s += zeros mw.buf1s += ones mw.buf[mw.bufCnt] = b mw.bufCnt++ wrCnt++ } mw.InputOffset += int64(wrCnt) return wrCnt, mw.err }
go
{ "resource": "" }
q9693
Close
train
func (mw *Writer) Close() error { if mw.err == errClosed { return nil } if mw.err != nil { return mw.err } err := mw.encodeBlock(mw.FinalMode) if err != nil { mw.err = err } else { mw.err = errClosed } mw.wr = nil // Release reference to underlying Writer return err }
go
{ "resource": "" }
q9694
update
train
func (c *crc) update(buf []byte) { cval := internal.ReverseUint32(c.val) for len(buf) > 0 { n := len(buf) if n > len(c.buf) { n = len(c.buf) } for i, b := range buf[:n] { c.buf[i] = internal.ReverseLUT[b] } cval = crc32.Update(cval, crc32.IEEETable, c.buf[:n]) buf = buf[n:] } c.val = internal.ReverseUint32(cval) }
go
{ "resource": "" }
q9695
readBlockHeader
train
func (zr *Reader) readBlockHeader() { zr.last = zr.rd.ReadBits(1) == 1 switch zr.rd.ReadBits(2) { case 0: // Raw block (RFC section 3.2.4). zr.rd.ReadPads() n := uint16(zr.rd.ReadBits(16)) nn := uint16(zr.rd.ReadBits(16)) if n^nn != 0xffff { panicf(errors.Corrupted, "raw block size mismatch") } zr.blkLen = int(n) // By convention, an empty block flushes the read buffer. if zr.blkLen == 0 { zr.toRead = zr.dict.ReadFlush() zr.finishBlock() return } zr.step = (*Reader).readRawData case 1: // Fixed prefix block (RFC section 3.2.6). zr.litTree, zr.distTree = &decLit, &decDist zr.step = (*Reader).readBlock case 2: // Dynamic prefix block (RFC section 3.2.7). zr.litTree, zr.distTree = &zr.pd1, &zr.pd2 zr.rd.ReadPrefixCodes(zr.litTree, zr.distTree) zr.step = (*Reader).readBlock default: // Reserved block (RFC section 3.2.3). panicf(errors.Corrupted, "encountered reserved block") } }
go
{ "resource": "" }
q9696
readRawData
train
func (zr *Reader) readRawData() { buf := zr.dict.WriteSlice() if len(buf) > zr.blkLen { buf = buf[:zr.blkLen] } cnt, err := zr.rd.Read(buf) zr.blkLen -= cnt zr.dict.WriteMark(cnt) if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } errors.Panic(err) } if zr.blkLen > 0 { zr.toRead = zr.dict.ReadFlush() zr.step = (*Reader).readRawData // We need to continue this work return } zr.finishBlock() }
go
{ "resource": "" }
q9697
readBlock
train
func (zr *Reader) readBlock() { const ( stateInit = iota // Zero value must be stateInit stateDict ) switch zr.stepState { case stateInit: goto readLiteral case stateDict: goto copyDistance } readLiteral: // Read literal and/or (length, distance) according to RFC section 3.2.3. { if zr.dict.AvailSize() == 0 { zr.toRead = zr.dict.ReadFlush() zr.step = (*Reader).readBlock zr.stepState = stateInit // Need to continue work here return } // Read the literal symbol. litSym, ok := zr.rd.TryReadSymbol(zr.litTree) if !ok { litSym = zr.rd.ReadSymbol(zr.litTree) } switch { case litSym < endBlockSym: zr.dict.WriteByte(byte(litSym)) goto readLiteral case litSym == endBlockSym: zr.finishBlock() zr.stepState = stateInit // Next call to readBlock must start here return case litSym < maxNumLitSyms: // Decode the copy length. rec := lenRanges[litSym-257] extra, ok := zr.rd.TryReadBits(uint(rec.Len)) if !ok { extra = zr.rd.ReadBits(uint(rec.Len)) } zr.cpyLen = int(rec.Base) + int(extra) // Read the distance symbol. distSym, ok := zr.rd.TryReadSymbol(zr.distTree) if !ok { distSym = zr.rd.ReadSymbol(zr.distTree) } if distSym >= maxNumDistSyms { panicf(errors.Corrupted, "invalid distance symbol: %d", distSym) } // Decode the copy distance. rec = distRanges[distSym] extra, ok = zr.rd.TryReadBits(uint(rec.Len)) if !ok { extra = zr.rd.ReadBits(uint(rec.Len)) } zr.dist = int(rec.Base) + int(extra) if zr.dist > zr.dict.HistSize() { panicf(errors.Corrupted, "copy distance exceeds window history") } goto copyDistance default: panicf(errors.Corrupted, "invalid literal symbol: %d", litSym) } } copyDistance: // Perform a backwards copy according to RFC section 3.2.3. { cnt := zr.dict.TryWriteCopy(zr.dist, zr.cpyLen) if cnt == 0 { cnt = zr.dict.WriteCopy(zr.dist, zr.cpyLen) } zr.cpyLen -= cnt if zr.cpyLen > 0 { zr.toRead = zr.dict.ReadFlush() zr.step = (*Reader).readBlock zr.stepState = stateDict // Need to continue work here return } goto readLiteral } }
go
{ "resource": "" }
q9698
finishBlock
train
func (zr *Reader) finishBlock() { if zr.last { zr.rd.ReadPads() zr.err = io.EOF } zr.step = (*Reader).readBlockHeader }
go
{ "resource": "" }
q9699
FlushOffset
train
func (br *bitReader) FlushOffset() int64 { if br.bufRd == nil { return br.offset } // Update the number of total bits to discard. br.discardBits += int(br.fedBits - br.numBits) br.fedBits = br.numBits // Discard some bytes to update read offset. nd := (br.discardBits + 7) / 8 // Round up to nearest byte nd, _ = br.bufRd.Discard(nd) br.discardBits -= nd * 8 // -7..0 br.offset += int64(nd) // These are invalid after Discard. br.bufPeek = nil return br.offset }
go
{ "resource": "" }