_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q17400
FromContext
train
func FromContext(ctx context.Context) lager.Logger { l, ok := ctx.Value(contextKey{}).(lager.Logger) if !ok { return &discardLogger{} } return l }
go
{ "resource": "" }
q17401
WithSession
train
func WithSession(ctx context.Context, task string, data ...lager.Data) lager.Logger { return FromContext(ctx).Session(task, data...) }
go
{ "resource": "" }
q17402
WithData
train
func WithData(ctx context.Context, data lager.Data) lager.Logger { return FromContext(ctx).WithData(data) }
go
{ "resource": "" }
q17403
Adquire
train
func (allLocks *URLLock) Adquire(key string) *sync.Mutex { bucketIndex := allLocks.getBucketIndexForKey(key) allLocks.globalLocks[bucketIndex].Lock() defer allLocks.globalLocks[bucketIndex].Unlock() lock, exists := allLocks.keys[bucketIndex][key] if !exists { lock = new(sync.Mutex) allLocks.keys[bucketIndex][key] = lock } lock.Lock() return lock }
go
{ "resource": "" }
q17404
NewHandler
train
func NewHandler(Next httpserver.Handler, config *Config) *Handler { return &Handler{ Config: config, Cache: NewHTTPCache(config.CacheKeyTemplate), URLLocks: NewURLLock(), Next: Next, } }
go
{ "resource": "" }
q17405
Flush
train
func (b *NoStorage) Flush() error { if f, ok := b.w.(http.Flusher); ok { f.Flush() } return nil }
go
{ "resource": "" }
q17406
NewHTTPCacheEntry
train
func NewHTTPCacheEntry(key string, request *http.Request, response *Response, config *Config) *HTTPCacheEntry { isPublic, expiration := getCacheableStatus(request, response, config) return &HTTPCacheEntry{ key: key, isPublic: isPublic, expiration: expiration, Request: request, Response: response, } }
go
{ "resource": "" }
q17407
WriteBodyTo
train
func (e *HTTPCacheEntry) WriteBodyTo(w http.ResponseWriter) error { if !e.isPublic { return e.writePrivateResponse(w) } return e.writePublicResponse(w) }
go
{ "resource": "" }
q17408
NewFileStorage
train
func NewFileStorage(path string) (ResponseStorage, error) { file, err := ioutil.TempFile(path, "caddy-cache-") if err != nil { return nil, err } return &FileStorage{ file: file, subscription: NewSubscription(), }, nil }
go
{ "resource": "" }
q17409
Flush
train
func (f *FileStorage) Flush() error { defer f.subscription.NotifyAll(0) return f.file.Sync() }
go
{ "resource": "" }
q17410
Clean
train
func (f *FileStorage) Clean() error { f.subscription.WaitAll() // Wait until every subscriber ends waiting every result return os.Remove(f.file.Name()) }
go
{ "resource": "" }
q17411
Close
train
func (f *FileStorage) Close() error { f.subscription.Close() return f.file.Close() }
go
{ "resource": "" }
q17412
GetReader
train
func (f *FileStorage) GetReader() (io.ReadCloser, error) { newFile, err := os.Open(f.file.Name()) if err != nil { return nil, err } return &FileReader{ content: newFile, subscription: f.subscription.NewSubscriber(), unsubscribe: f.subscription.RemoveSubscriber, }, nil }
go
{ "resource": "" }
q17413
Close
train
func (r *FileReader) Close() error { err := r.content.Close() r.unsubscribe(r.subscription) return err }
go
{ "resource": "" }
q17414
Setup
train
func Setup(c *caddy.Controller) error { config, err := cacheParse(c) if err != nil { return err } httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { return NewHandler(next, config) }) c.OnStartup(func() error { if config.Path == "" { return nil } return os.MkdirAll(config.Path, 0600) }) return nil }
go
{ "resource": "" }
q17415
NewResponse
train
func NewResponse() *Response { r := &Response{ Code: 200, HeaderMap: http.Header{}, body: nil, closeNotify: make(chan bool, 1), bodyLock: new(sync.RWMutex), closedLock: new(sync.RWMutex), headersLock: new(sync.RWMutex), } r.bodyLock.Lock() r.closedLock.Lock() r.headersLock.Lock() return r }
go
{ "resource": "" }
q17416
writeHeader
train
func (rw *Response) writeHeader(b []byte, str string) { if rw.wroteHeader { return } if len(str) > 512 { str = str[:512] } m := rw.Header() _, hasType := m["Content-Type"] hasTE := m.Get("Transfer-Encoding") != "" if !hasType && !hasTE { if b == nil { b = []byte(str) } m.Set("Content-Type", http.DetectContentType(b)) } rw.WriteHeader(200) }
go
{ "resource": "" }
q17417
Write
train
func (rw *Response) Write(buf []byte) (int, error) { if !rw.wroteHeader { rw.writeHeader(buf, "") } if !rw.firstByteSent { rw.firstByteSent = true rw.WaitBody() } if rw.body != nil { return rw.body.Write(buf) } return 0, errors.New("No storage") }
go
{ "resource": "" }
q17418
Clean
train
func (rw *Response) Clean() error { if rw.body == nil { return nil } return rw.body.Clean() }
go
{ "resource": "" }
q17419
Get
train
func Get(name string, c echo.Context) (*sessions.Session, error) { store := c.Get(key).(sessions.Store) return store.Get(c.Request(), name) }
go
{ "resource": "" }
q17420
Middleware
train
func Middleware(store sessions.Store) echo.MiddlewareFunc { c := DefaultConfig c.Store = store return MiddlewareWithConfig(c) }
go
{ "resource": "" }
q17421
Middleware
train
func Middleware(ce *casbin.Enforcer) echo.MiddlewareFunc { c := DefaultConfig c.Enforcer = ce return MiddlewareWithConfig(c) }
go
{ "resource": "" }
q17422
update
train
func (s *stats) update(channel string, eventID int64) { s.Lock() if _, ok := s.ChannelStats[channel]; !ok { s.ChannelStats[channel] = make(eventIDStat) } s.ChannelStats[channel][eventID]++ s.EventCount++ s.Unlock() }
go
{ "resource": "" }
q17423
print
train
func (s *stats) print() { fmt.Printf("Channel,EventID,Count\n") for c := range s.ChannelStats { for eid, cnt := range s.ChannelStats[c] { fmt.Printf("%s,%d,%d\n", c, eid, cnt) } } //fmt.Printf("Total Events: %d\n", s.EventCount) }
go
{ "resource": "" }
q17424
fetchChunkFromReader
train
func fetchChunkFromReader(r io.ReadSeeker, offset int64) (evtx.Chunk, error) { var err error c := evtx.NewChunk() evtx.GoToSeeker(r, offset) c.Offset = offset c.Data = make([]byte, evtx.ChunkSize) if _, err = r.Read(c.Data); err != nil { return c, err } reader := bytes.NewReader(c.Data) c.ParseChunkHeader(reader) if err = c.Header.Validate(); err != nil { return c, err } // Go to after Header evtx.GoToSeeker(reader, int64(c.Header.SizeHeader)) c.ParseStringTable(reader) err = c.ParseTemplateTable(reader) if err != nil { return c, err } err = c.ParseEventOffsets(reader) if err != nil { return c, err } return c, nil }
go
{ "resource": "" }
q17425
carveFile
train
func carveFile(datafile string, offset int64, limit int) { chunkCnt := 0 f, err := os.Open(datafile) if err != nil { log.LogErrorAndExit(err) } defer f.Close() f.Seek(offset, os.SEEK_SET) dup, err := os.Open(datafile) if err != nil { log.LogErrorAndExit(err) } defer dup.Close() dup.Seek(offset, os.SEEK_SET) for offset := range findChunksOffsets(f) { log.Infof("Parsing Chunk @ Offset: %d (0x%08[1]x)", offset) chunk, err := fetchChunkFromReader(dup, offset) if err != nil { log.LogError(err) } for e := range chunk.Events() { printEvent(e) } chunkCnt++ if limit > 0 && chunkCnt >= limit { break } log.Debug("End of the loop") } }
go
{ "resource": "" }
q17426
printEvent
train
func printEvent(e *evtx.GoEvtxMap) { if e != nil { t, err := e.GetTime(&evtx.SystemTimePath) // If not between start and stop we do not print if time.Time(start) != defaultTime && time.Time(stop) != defaultTime { if t.Before(time.Time(start)) || t.After(time.Time(stop)) { return } } // If before start we do not print if time.Time(start) != defaultTime { if t.Before(time.Time(start)) { return } } // If after stop we do not print if time.Time(stop) != defaultTime { if t.After(time.Time(stop)) { return } } if timestamp { if err == nil { fmt.Printf("%d: %s\n", t.Unix(), string(evtx.ToJSON(e))) } else { log.Errorf("Event time not found: %s", string(evtx.ToJSON(e))) } } else { fmt.Printf("%s\n", string(evtx.ToJSON(e))) } } }
go
{ "resource": "" }
q17427
New
train
func New(filepath string) (ef File, err error) { file, err := os.Open(filepath) if err != nil { return } ef.file = file ef.ParseFileHeader() return }
go
{ "resource": "" }
q17428
ParseFileHeader
train
func (ef *File) ParseFileHeader() { ef.Lock() defer ef.Unlock() GoToSeeker(ef.file, 0) err := encoding.Unmarshal(ef.file, &ef.Header, Endianness) if err != nil { panic(err) } }
go
{ "resource": "" }
q17429
FetchChunk
train
func (ef *File) FetchChunk(offset int64) (Chunk, error) { ef.Lock() defer ef.Unlock() c := NewChunk() GoToSeeker(ef.file, offset) c.Offset = offset c.Data = make([]byte, ChunkSize) if _, err := ef.file.Read(c.Data); err != nil { return c, err } reader := bytes.NewReader(c.Data) c.ParseChunkHeader(reader) // Go to after Header GoToSeeker(reader, int64(c.Header.SizeHeader)) c.ParseStringTable(reader) if err := c.ParseTemplateTable(reader); err != nil { return c, err } if err := c.ParseEventOffsets(reader); err != nil { return c, err } return c, nil }
go
{ "resource": "" }
q17430
monitorChunks
train
func (ef *File) monitorChunks(stop chan bool, sleep time.Duration) (cc chan Chunk) { cc = make(chan Chunk, 4) sleepTime := sleep markedChunks := datastructs.NewSyncedSet() // Main routine to feed the Chunk Channel go func() { defer close(cc) firstLoopFlag := !ef.monitorExisting for { // Parse the file header again to get the updates in the file ef.ParseFileHeader() // check if we should stop or not select { case <-stop: return default: // go through } curChunks := datastructs.NewSyncedSet() //cs := make(ChunkSorter, 0, ef.Header.ChunkCount) ss := datastructs.NewSortedSlice(0, int(ef.Header.ChunkCount)) for i := uint16(0); i < ef.Header.ChunkCount; i++ { offsetChunk := int64(ef.Header.ChunkDataOffset) + int64(ChunkSize)*int64(i) chunk, err := ef.FetchRawChunk(offsetChunk) curChunks.Add(chunk.Header.FirstEventRecID, chunk.Header.LastEventRecID) // We append only the Chunks whose EventRecordIds have not been treated yet if markedChunks.Contains(chunk.Header.FirstEventRecID) && markedChunks.Contains(chunk.Header.LastEventRecID) { continue } switch { case err != nil && err != io.EOF: panic(err) case err == nil: markedChunks.Add(chunk.Header.FirstEventRecID) markedChunks.Add(chunk.Header.LastEventRecID) if !firstLoopFlag { //cs = append(cs, chunk) ss.Insert(chunk) } } } // Cleanup the useless cache entries (consider putting in go routine if worth) markedChunks = datastructs.NewSyncedSet(markedChunks.Intersect(&curChunks)) // We flag out of first loop firstLoopFlag = false // We sort out the chunks //sort.Stable(cs) //for _, rc := range cs { for rc := range ss.ReversedIter() { chunk, err := ef.FetchChunk(rc.(Chunk).Offset) switch { case err != nil && err != io.EOF: panic(err) case err == nil: cc <- chunk } } // Check if we should quit if ef.Header.ChunkCount >= math.MaxUint16 { log.Info("Monitoring stopped: maximum chunk number reached") break } // Sleep between loops time.Sleep(sleepTime) } }() return }
go
{ "resource": "" }
q17431
MonitorEvents
train
func (ef *File) MonitorEvents(stop chan bool, sleep ...time.Duration) (cgem chan *GoEvtxMap) { // Normally, it should not be needed to add a second check here on the // EventRecordID since the record ids in the chunks are not supposed to overlap // TODO: Add a EventRecordID marker if needed sleepTime := DefaultMonitorSleep if len(sleep) > 0 { sleepTime = sleep[0] } jobs := MaxJobs cgem = make(chan *GoEvtxMap, 42) go func() { defer close(cgem) chanQueue := make(chan (chan *GoEvtxMap), jobs) go func() { defer close(chanQueue) // this chan ends only when value is put into stop for pc := range ef.monitorChunks(stop, sleepTime) { // We have to create a copy here because otherwise cpc.EventsChan() fails // I guess that because EventsChan takes a pointer to an object // and thus the chan is taken on the pointer and since the object pointed // changes -> kaboom cpc := pc ev := cpc.Events() chanQueue <- ev } }() for ec := range chanQueue { for event := range ec { cgem <- event } } }() return }
go
{ "resource": "" }
q17432
MarshalJSON
train
func (u UTCTime) MarshalJSON() ([]byte, error) { return []byte(fmt.Sprintf("\"%s\"", time.Time(u).UTC().Format(time.RFC3339Nano))), nil }
go
{ "resource": "" }
q17433
Validate
train
func (ch *ChunkHeader) Validate() error { if string(ch.Magic[:]) != ChunkMagic { return fmt.Errorf("Invalid chunk magic: %q", ch.Magic) } if ch.SizeHeader != 128 { return fmt.Errorf("Invalid chunk header size: %d instead of 128", ch.SizeHeader) } if ch.OffsetLastRec >= ChunkSize { return fmt.Errorf("Last event offset exceed size of chunk") } return nil }
go
{ "resource": "" }
q17434
ParseChunkHeader
train
func (c *Chunk) ParseChunkHeader(reader io.ReadSeeker) { err := encoding.Unmarshal(reader, &c.Header, Endianness) if err != nil { panic(err) } }
go
{ "resource": "" }
q17435
Less
train
func (c Chunk) Less(s *datastructs.Sortable) bool { other := (*s).(Chunk) return c.Header.NumFirstRecLog < other.Header.NumFirstRecLog }
go
{ "resource": "" }
q17436
ParseStringTable
train
func (c *Chunk) ParseStringTable(reader io.ReadSeeker) { strOffset := int32(0) for i := int64(0); i < sizeStringBucket*4; i += 4 { encoding.Unmarshal(reader, &strOffset, Endianness) if strOffset > 0 { cs, err := StringAt(reader, int64(strOffset)) if err != nil { if !ModeCarving { panic(err) } } c.StringTable[strOffset] = cs } } return }
go
{ "resource": "" }
q17437
ParseTemplateTable
train
func (c *Chunk) ParseTemplateTable(reader io.ReadSeeker) error { templateDataOffset := int32(0) for i := int32(0); i < sizeTemplateBucket*4; i = i + 4 { //parse(buf, i, &tempOffsetTable[j]) err := encoding.Unmarshal(reader, &templateDataOffset, Endianness) if err != nil { // panic(err) log.DebugDontPanic(err) return err } if templateDataOffset > 0 { backup := BackupSeeker(reader) // We arrive in template data, we have to do some offset patching in order to get // back to TemplateInstance token and make it easily parsable by binxml.Parse GoToSeeker(reader, int64(templateDataOffset)) tdd := TemplateDefinitionData{} err := tdd.Parse(reader) if err != nil { //panic(err) log.DebugDontPanic(err) return err } c.TemplateTable[templateDataOffset] = tdd GoToSeeker(reader, backup) } } return nil }
go
{ "resource": "" }
q17438
ParseEventOffsets
train
func (c *Chunk) ParseEventOffsets(reader io.ReadSeeker) (err error) { c.EventOffsets = make([]int32, 0) offsetEvent := int32(BackupSeeker(reader)) c.EventOffsets = append(c.EventOffsets, offsetEvent) for offsetEvent <= c.Header.OffsetLastRec { eh := EventHeader{} GoToSeeker(reader, int64(offsetEvent)) if err = encoding.Unmarshal(reader, &eh, Endianness); err != nil { log.DebugDontPanic(err) return err } // Event Header is not valid if err = eh.Validate(); err != nil { // we bruteforce in carving mode if ModeCarving { offsetEvent++ continue } return err } offsetEvent += eh.Size c.EventOffsets = append(c.EventOffsets, offsetEvent) } return nil }
go
{ "resource": "" }
q17439
ParseEvent
train
func (c *Chunk) ParseEvent(offset int64) (e Event) { if int64(c.Header.OffsetLastRec) < offset { return } reader := bytes.NewReader(c.Data) GoToSeeker(reader, offset) e.Offset = offset err := encoding.Unmarshal(reader, &e.Header, Endianness) if err != nil { panic(err) } /*err := encoding.Unmarshal(reader, &e.Magic, Endianness) if err != nil { panic(err) } err = encoding.Unmarshal(reader, &e.Size, Endianness) if err != nil { panic(err) } err = encoding.Unmarshal(reader, &e.ID, Endianness) if err != nil { panic(err) } err = encoding.Unmarshal(reader, &e.Timestamp, Endianness) if err != nil { panic(err) }*/ return e }
go
{ "resource": "" }
q17440
Parse
train
func (e *BinXMLEntityReference) Parse(reader io.ReadSeeker) error { err := encoding.Unmarshal(reader, &e.Token, Endianness) if err != nil { return err } err = encoding.Unmarshal(reader, &e.NameOffset, Endianness) if err != nil { return err } o := BackupSeeker(reader) // if the Entity Name is just after if int64(e.NameOffset) == o { return e.Name.Parse(reader) } // We jump to the right offset GoToSeeker(reader, int64(e.NameOffset)) err = e.Name.Parse(reader) // We restore our position GoToSeeker(reader, o) return err }
go
{ "resource": "" }
q17441
Get
train
func (pg *GoEvtxMap) Get(path *GoEvtxPath) (*GoEvtxElement, error) { if len(*path) > 0 { if i, ok := (*pg)[(*path)[0]]; ok { if len(*path) == 1 { cge := GoEvtxElement(i) return &cge, nil } switch i.(type) { case GoEvtxMap: p := i.(GoEvtxMap) np := (*path)[1:] return p.Get(&np) case map[string]interface{}: p := GoEvtxMap(i.(map[string]interface{})) np := (*path)[1:] return p.Get(&np) } } } return nil, &ErrEvtxEltNotFound{*path} }
go
{ "resource": "" }
q17442
GetString
train
func (pg *GoEvtxMap) GetString(path *GoEvtxPath) (string, error) { pE, err := pg.Get(path) if err != nil { return "", err } if s, ok := (*pE).(string); ok { return s, nil } return "", fmt.Errorf("Bad type expect string got %T", (*pE)) }
go
{ "resource": "" }
q17443
GetBool
train
func (pg *GoEvtxMap) GetBool(path *GoEvtxPath) (bool, error) { s, err := pg.GetString(path) if err != nil { return false, &ErrEvtxEltNotFound{*path} } b, err := strconv.ParseBool(s) if err != nil { return false, err } return b, err }
go
{ "resource": "" }
q17444
GetInt
train
func (pg *GoEvtxMap) GetInt(path *GoEvtxPath) (int64, error) { s, err := pg.GetString(path) if err != nil { return 0, &ErrEvtxEltNotFound{*path} } i, err := strconv.ParseInt(s, 10, 64) if err != nil { return 0, err } return i, nil }
go
{ "resource": "" }
q17445
GetUint
train
func (pg *GoEvtxMap) GetUint(path *GoEvtxPath) (uint64, error) { s, err := pg.GetString(path) if err != nil { return 0, &ErrEvtxEltNotFound{*path} } u, err := strconv.ParseUint(s, 10, 64) if err != nil { return 0, err } return u, nil }
go
{ "resource": "" }
q17446
GetTime
train
func (pg *GoEvtxMap) GetTime(path *GoEvtxPath) (time.Time, error) { t, err := pg.Get(path) if err != nil { return time.Time{}, &ErrEvtxEltNotFound{*path} } // If the value was extracted from raw BinXML (not a template) it may happen // that the value stored at path is a string since in raw BinXML everything // seems to be ValueText switch (*t).(type) { case time.Time: return (*t).(time.Time), nil case UTCTime: return time.Time((*t).(UTCTime)), nil case string: return time.Parse(time.RFC3339Nano, (*t).(string)) default: return time.Time{}, fmt.Errorf("Cannot convert %T to time.Time", *t) } }
go
{ "resource": "" }
q17447
Equal
train
func (pg *GoEvtxMap) Equal(path *GoEvtxPath, i interface{}) bool { t, err := pg.Get(path) if err != nil { return false } return reflect.DeepEqual(*t, i) }
go
{ "resource": "" }
q17448
AnyEqual
train
func (pg *GoEvtxMap) AnyEqual(path *GoEvtxPath, is []interface{}) bool { t, err := pg.Get(path) if err != nil { return false } for _, i := range is { if reflect.DeepEqual(i, *t) { return true } } return false }
go
{ "resource": "" }
q17449
RegexMatch
train
func (pg *GoEvtxMap) RegexMatch(path *GoEvtxPath, pattern *regexp.Regexp) bool { s, err := pg.GetString(path) if err != nil { return false } return pattern.MatchString(s) }
go
{ "resource": "" }
q17450
Set
train
func (pg *GoEvtxMap) Set(path *GoEvtxPath, new GoEvtxElement) error { if len(*path) > 0 { i := (*pg)[(*path)[0]] if len(*path) == 1 { (*pg)[(*path)[0]] = new return nil } switch i.(type) { case GoEvtxMap: p := i.(GoEvtxMap) np := (*path)[1:] return p.Set(&np, new) case map[string]interface{}: p := GoEvtxMap(i.(map[string]interface{})) np := (*path)[1:] return p.Set(&np, new) } } return &ErrEvtxEltNotFound{*path} }
go
{ "resource": "" }
q17451
Del
train
func (pg *GoEvtxMap) Del(path ...string) { if len(path) > 0 { if ge, ok := (*pg)[path[0]]; ok { if len(path) == 1 { delete((*pg), path[0]) } switch ge.(type) { case GoEvtxMap: p := ge.(GoEvtxMap) p.Del(path[1:]...) } } } }
go
{ "resource": "" }
q17452
Validate
train
func (h *EventHeader) Validate() error { // Validate the event magic if string(h.Magic[:]) != EventMagic { return fmt.Errorf("Bad event magic %q", h.Magic) } // An event cannot be bigger than a Chunk since an event is embedded into a // chunk if h.Size >= ChunkSize { return fmt.Errorf("Too big event") } // An event cannot be smaller than its header since the event size include the // size of the header if h.Size < EventHeaderSize { return fmt.Errorf("Too small event") } return nil }
go
{ "resource": "" }
q17453
GoEvtxMap
train
func (e Event) GoEvtxMap(c *Chunk) (pge *GoEvtxMap, err error) { // An Event can contain only BinXMLFragments if !e.IsValid() { err = ErrInvalidEvent return } reader := bytes.NewReader(c.Data) GoToSeeker(reader, e.Offset+EventHeaderSize) // Bug here if we put c element, err := Parse(reader, c, false) if err != nil && err != io.EOF { //panic(err) log.Error(err) } // If not a BinXMLFragment a panic will be raised fragment, ok := element.(*Fragment) switch { case !ok && ModeCarving: return case !ok: // Way to raise panic _ = element.(*Fragment) } return fragment.GoEvtxMap(), err }
go
{ "resource": "" }
q17454
AddColumn
train
func (index *Index) AddColumn(cols ...string) { for _, col := range cols { index.Cols = append(index.Cols, col) } }
go
{ "resource": "" }
q17455
NewIndex
train
func NewIndex(name string, indexType int) *Index { return &Index{true, name, indexType, make([]string, 0)} }
go
{ "resource": "" }
q17456
Type2SQLType
train
func Type2SQLType(t reflect.Type) (st SQLType) { switch k := t.Kind(); k { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: st = SQLType{Int, 0, 0} case reflect.Int64, reflect.Uint64: st = SQLType{BigInt, 0, 0} case reflect.Float32: st = SQLType{Float, 0, 0} case reflect.Float64: st = SQLType{Double, 0, 0} case reflect.Complex64, reflect.Complex128: st = SQLType{Varchar, 64, 0} case reflect.Array, reflect.Slice, reflect.Map: if t.Elem() == reflect.TypeOf(c_BYTE_DEFAULT) { st = SQLType{Blob, 0, 0} } else { st = SQLType{Text, 0, 0} } case reflect.Bool: st = SQLType{Bool, 0, 0} case reflect.String: st = SQLType{Varchar, 255, 0} case reflect.Struct: if t.ConvertibleTo(TimeType) { st = SQLType{DateTime, 0, 0} } else { // TODO need to handle association struct st = SQLType{Text, 0, 0} } case reflect.Ptr: st = Type2SQLType(t.Elem()) default: st = SQLType{Text, 0, 0} } return }
go
{ "resource": "" }
q17457
SQLType2Type
train
func SQLType2Type(st SQLType) reflect.Type { name := strings.ToUpper(st.Name) switch name { case Bit, TinyInt, SmallInt, MediumInt, Int, Integer, Serial: return reflect.TypeOf(1) case BigInt, BigSerial: return reflect.TypeOf(int64(1)) case Float, Real: return reflect.TypeOf(float32(1)) case Double: return reflect.TypeOf(float64(1)) case Char, NChar, Varchar, NVarchar, TinyText, Text, NText, MediumText, LongText, Enum, Set, Uuid, Clob, SysName: return reflect.TypeOf("") case TinyBlob, Blob, LongBlob, Bytea, Binary, MediumBlob, VarBinary, UniqueIdentifier: return reflect.TypeOf([]byte{}) case Bool: return reflect.TypeOf(true) case DateTime, Date, Time, TimeStamp, TimeStampz, SmallDateTime: return reflect.TypeOf(c_TIME_DEFAULT) case Decimal, Numeric, Money, SmallMoney: return reflect.TypeOf("") default: return reflect.TypeOf("") } }
go
{ "resource": "" }
q17458
ValueOf
train
func (col *Column) ValueOf(bean interface{}) (*reflect.Value, error) { dataStruct := reflect.Indirect(reflect.ValueOf(bean)) return col.ValueOfV(&dataStruct) }
go
{ "resource": "" }
q17459
ValueOfV
train
func (col *Column) ValueOfV(dataStruct *reflect.Value) (*reflect.Value, error) { var fieldValue reflect.Value fieldPath := strings.Split(col.FieldName, ".") if dataStruct.Type().Kind() == reflect.Map { keyValue := reflect.ValueOf(fieldPath[len(fieldPath)-1]) fieldValue = dataStruct.MapIndex(keyValue) return &fieldValue, nil } else if dataStruct.Type().Kind() == reflect.Interface { structValue := reflect.ValueOf(dataStruct.Interface()) dataStruct = &structValue } level := len(fieldPath) fieldValue = dataStruct.FieldByName(fieldPath[0]) for i := 0; i < level-1; i++ { if !fieldValue.IsValid() { break } if fieldValue.Kind() == reflect.Struct { fieldValue = fieldValue.FieldByName(fieldPath[i+1]) } else if fieldValue.Kind() == reflect.Ptr { if fieldValue.IsNil() { fieldValue.Set(reflect.New(fieldValue.Type().Elem())) } fieldValue = fieldValue.Elem().FieldByName(fieldPath[i+1]) } else { return nil, fmt.Errorf("field %v is not valid", col.FieldName) } } if !fieldValue.IsValid() { return nil, fmt.Errorf("field %v is not valid", col.FieldName) } return &fieldValue, nil }
go
{ "resource": "" }
q17460
PKColumns
train
func (table *Table) PKColumns() []*Column { columns := make([]*Column, len(table.PrimaryKeys)) for i, name := range table.PrimaryKeys { columns[i] = table.GetColumn(name) } return columns }
go
{ "resource": "" }
q17461
AddColumn
train
func (table *Table) AddColumn(col *Column) { table.columnsSeq = append(table.columnsSeq, col.Name) table.columns = append(table.columns, col) colName := strings.ToLower(col.Name) if c, ok := table.columnsMap[colName]; ok { table.columnsMap[colName] = append(c, col) } else { table.columnsMap[colName] = []*Column{col} } if col.IsPrimaryKey { table.PrimaryKeys = append(table.PrimaryKeys, col.Name) } if col.IsAutoIncrement { table.AutoIncrement = col.Name } if col.IsCreated { table.Created[col.Name] = true } if col.IsUpdated { table.Updated = col.Name } if col.IsDeleted { table.Deleted = col.Name } if col.IsVersion { table.Version = col.Name } }
go
{ "resource": "" }
q17462
AddIndex
train
func (table *Table) AddIndex(index *Index) { table.Indexes[index.Name] = index }
go
{ "resource": "" }
q17463
Open
train
func Open(driverName, dataSourceName string) (*DB, error) { db, err := sql.Open(driverName, dataSourceName) if err != nil { return nil, err } return &DB{ DB: db, Mapper: NewCacheMapper(&SnakeMapper{}), reflectCache: make(map[reflect.Type]*cacheStruct), }, nil }
go
{ "resource": "" }
q17464
FromDB
train
func FromDB(db *sql.DB) *DB { return &DB{ DB: db, Mapper: NewCacheMapper(&SnakeMapper{}), reflectCache: make(map[reflect.Type]*cacheStruct), } }
go
{ "resource": "" }
q17465
QueryContext
train
func (db *DB) QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { rows, err := db.DB.QueryContext(ctx, query, args...) if err != nil { if rows != nil { rows.Close() } return nil, err } return &Rows{rows, db}, nil }
go
{ "resource": "" }
q17466
Query
train
func (db *DB) Query(query string, args ...interface{}) (*Rows, error) { return db.QueryContext(context.Background(), query, args...) }
go
{ "resource": "" }
q17467
ScanStructByIndex
train
func (rs *Rows) ScanStructByIndex(dest ...interface{}) error { if len(dest) == 0 { return errors.New("at least one struct") } vvvs := make([]reflect.Value, len(dest)) for i, s := range dest { vv := reflect.ValueOf(s) if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { return errors.New("dest should be a struct's pointer") } vvvs[i] = vv.Elem() } cols, err := rs.Columns() if err != nil { return err } newDest := make([]interface{}, len(cols)) var i = 0 for _, vvv := range vvvs { for j := 0; j < vvv.NumField(); j++ { newDest[i] = vvv.Field(j).Addr().Interface() i = i + 1 } } return rs.Rows.Scan(newDest...) }
go
{ "resource": "" }
q17468
ScanSlice
train
func (rs *Rows) ScanSlice(dest interface{}) error { vv := reflect.ValueOf(dest) if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Slice { return errors.New("dest should be a slice's pointer") } vvv := vv.Elem() cols, err := rs.Columns() if err != nil { return err } newDest := make([]interface{}, len(cols)) for j := 0; j < len(cols); j++ { if j >= vvv.Len() { newDest[j] = reflect.New(vvv.Type().Elem()).Interface() } else { newDest[j] = vvv.Index(j).Addr().Interface() } } err = rs.Rows.Scan(newDest...) if err != nil { return err } srcLen := vvv.Len() for i := srcLen; i < len(cols); i++ { vvv = reflect.Append(vvv, reflect.ValueOf(newDest[i]).Elem()) } return nil }
go
{ "resource": "" }
q17469
RegisterDialect
train
func RegisterDialect(dbName DbType, dialectFunc func() Dialect) { if dialectFunc == nil { panic("core: Register dialect is nil") } dialects[strings.ToLower(string(dbName))] = dialectFunc // !nashtsai! allow override dialect }
go
{ "resource": "" }
q17470
QueryDialect
train
func QueryDialect(dbName DbType) Dialect { if d, ok := dialects[strings.ToLower(string(dbName))]; ok { return d() } return nil }
go
{ "resource": "" }
q17471
Walk
train
func Walk(v Visitor, n Node) { visitor{Visitor: v}.visit(nil, n) }
go
{ "resource": "" }
q17472
ValuesAreEqual
train
func ValuesAreEqual(left, right Value) bool { if left.typ != right.typ { return false } switch left.typ { case TBool: return left.GetBool() == right.GetBool() case TI8: return left.GetI8() == right.GetI8() case TDouble: return left.GetDouble() == right.GetDouble() case TI16: return left.GetI16() == right.GetI16() case TI32: return left.GetI32() == right.GetI32() case TI64: return left.GetI64() == right.GetI64() case TBinary: return bytes.Equal(left.tbinary, right.tbinary) case TStruct: return StructsAreEqual(left.tstruct, right.tstruct) case TMap: return MapsAreEqual(left.tcoll.(MapItemList), right.tcoll.(MapItemList)) case TSet: return SetsAreEqual(left.tcoll.(ValueList), right.tcoll.(ValueList)) case TList: return ListsAreEqual(left.tcoll.(ValueList), right.tcoll.(ValueList)) default: return false } }
go
{ "resource": "" }
q17473
StructsAreEqual
train
func StructsAreEqual(left, right Struct) bool { if len(left.Fields) != len(right.Fields) { return false } // Fields are unordered so we need to build a map to actually compare // them. leftFields := left.fieldMap() rightFields := right.fieldMap() for i, lvalue := range leftFields { if rvalue, ok := rightFields[i]; !ok { return false } else if !ValuesAreEqual(lvalue, rvalue) { return false } } return true }
go
{ "resource": "" }
q17474
SetsAreEqual
train
func SetsAreEqual(left, right ValueList) bool { if left.ValueType() != right.ValueType() { return false } if left.Size() != right.Size() { return false } if isHashable(left.ValueType()) { return setsArEqualHashable(left.Size(), left, right) } return setsAreEqualUnhashable(left.Size(), left, right) }
go
{ "resource": "" }
q17475
MapsAreEqual
train
func MapsAreEqual(left, right MapItemList) bool { if left.KeyType() != right.KeyType() { return false } if left.ValueType() != right.ValueType() { return false } if left.Size() != right.Size() { return false } if isHashable(left.KeyType()) { return mapsAreEqualHashable(left.Size(), left, right) } return mapsAreEqualUnhashable(left.Size(), left, right) }
go
{ "resource": "" }
q17476
ListsAreEqual
train
func ListsAreEqual(left, right ValueList) bool { if left.ValueType() != right.ValueType() { return false } if left.Size() != right.Size() { return false } leftItems := ValueListToSlice(left) rightItems := ValueListToSlice(right) for i, lv := range leftItems { rv := rightItems[i] if !ValuesAreEqual(lv, rv) { return false } } return true }
go
{ "resource": "" }
q17477
compileTypedef
train
func compileTypedef(file string, src *ast.Typedef) (*TypedefSpec, error) { typ, err := compileTypeReference(src.Type) if err != nil { return nil, err } annotations, err := compileAnnotations(src.Annotations) if err != nil { return nil, compileError{ Target: src.Name, Line: src.Line, Reason: err, } } return &TypedefSpec{ Name: src.Name, File: file, Target: typ, Annotations: annotations, Doc: src.Doc, }, nil }
go
{ "resource": "" }
q17478
Link
train
func (t *TypedefSpec) Link(scope Scope) (TypeSpec, error) { if t.linked() { return t, nil } var err error t.Target, err = t.Target.Link(scope) if err == nil { t.root = RootTypeSpec(t.Target) } return t, err }
go
{ "resource": "" }
q17479
ForEachTypeReference
train
func (t *TypedefSpec) ForEachTypeReference(f func(TypeSpec) error) error { return f(t.Target) }
go
{ "resource": "" }
q17480
Parse
train
func Parse(s []byte) (*ast.Program, error) { lex := newLexer(s) e := yyParse(lex) if e == 0 && !lex.parseFailed { return lex.program, nil } return nil, lex.err }
go
{ "resource": "" }
q17481
WriteEnveloped
train
func (bw *Writer) WriteEnveloped(e wire.Envelope) error { version := uint32(version1) | uint32(e.Type) if err := bw.writeInt32(int32(version)); err != nil { return err } if err := bw.writeString(e.Name); err != nil { return err } if err := bw.writeInt32(e.SeqID); err != nil { return err } return bw.WriteValue(e.Value) }
go
{ "resource": "" }
q17482
TypeDefinition
train
func TypeDefinition(g Generator, spec compile.TypeSpec) error { switch s := spec.(type) { case *compile.EnumSpec: return enum(g, s) case *compile.StructSpec: return structure(g, s) case *compile.TypedefSpec: return typedef(g, s) default: panic(fmt.Sprintf("%q is not a defined type", spec.ThriftName())) } }
go
{ "resource": "" }
q17483
isReferenceType
train
func isReferenceType(spec compile.TypeSpec) bool { spec = compile.RootTypeSpec(spec) if _, ok := spec.(*compile.BinarySpec); ok { return true } switch spec.(type) { case *compile.MapSpec, *compile.ListSpec, *compile.SetSpec: return true default: return false } }
go
{ "resource": "" }
q17484
typeReference
train
func typeReference(g Generator, spec compile.TypeSpec) (string, error) { name, err := typeName(g, spec) if err != nil { return "", err } if isStructType(spec) { // Prepend "*" to the result if the field is not required and the type // isn't a reference type. name = "*" + name } return name, nil }
go
{ "resource": "" }
q17485
typeReferencePtr
train
func typeReferencePtr(g Generator, spec compile.TypeSpec) (string, error) { ref, err := typeName(g, spec) if err != nil { return "", err } if !isReferenceType(spec) { // need * prefix for everything but map, string, and list. return "*" + ref, nil } return ref, nil }
go
{ "resource": "" }
q17486
typeName
train
func typeName(g Generator, spec compile.TypeSpec) (string, error) { switch s := spec.(type) { case *compile.BoolSpec: return "bool", nil case *compile.I8Spec: return "int8", nil case *compile.I16Spec: return "int16", nil case *compile.I32Spec: return "int32", nil case *compile.I64Spec: return "int64", nil case *compile.DoubleSpec: return "float64", nil case *compile.StringSpec: return "string", nil case *compile.BinarySpec: return "[]byte", nil case *compile.MapSpec: k, err := typeReference(g, s.KeySpec) if err != nil { return "", err } v, err := typeReference(g, s.ValueSpec) if err != nil { return "", err } if !isHashable(s.KeySpec) { // unhashable type return fmt.Sprintf("[]struct{Key %s; Value %s}", k, v), nil } return fmt.Sprintf("map[%s]%s", k, v), nil case *compile.ListSpec: v, err := typeReference(g, s.ValueSpec) if err != nil { return "", err } return "[]" + v, nil case *compile.SetSpec: v, err := typeReference(g, s.ValueSpec) if err != nil { return "", err } // not annotated to be slice and hashable value type if setUsesMap(s) { return fmt.Sprintf("map[%s]struct{}", v), nil } return fmt.Sprintf("[]%s", v), nil case *compile.EnumSpec, *compile.StructSpec, *compile.TypedefSpec: return g.LookupTypeName(spec) default: panic(fmt.Sprintf("Unknown type (%T) %v", spec, spec)) } }
go
{ "resource": "" }
q17487
resolveService
train
func resolveService(src ast.ServiceReference, scope Scope) (*ServiceSpec, error) { s, err := scope.LookupService(src.Name) if err == nil { err = s.Link(scope) return s, err } mname, iname := splitInclude(src.Name) if len(mname) == 0 { return nil, referenceError{ Target: src.Name, Line: src.Line, ScopeName: scope.GetName(), Reason: err, } } includedScope, err := getIncludedScope(scope, mname) if err != nil { return nil, referenceError{ Target: src.Name, Line: src.Line, ScopeName: scope.GetName(), Reason: err, } } return resolveService(ast.ServiceReference{Name: iname}, includedScope) }
go
{ "resource": "" }
q17488
Link
train
func (s *ServiceSpec) Link(scope Scope) error { if s.linked() { return nil } if s.parentSrc != nil { parent, err := resolveService(*s.parentSrc, scope) if err != nil { return compileError{ Target: s.Name, Reason: referenceError{ Target: s.parentSrc.Name, Line: s.parentSrc.Line, ScopeName: scope.GetName(), Reason: err, }, } } if err := parent.Link(scope); err != nil { return compileError{Target: s.Name, Reason: err} } s.Parent = parent s.parentSrc = nil } for _, function := range s.Functions { if err := function.Link(scope); err != nil { return compileError{ Target: s.Name + "." + function.Name, Reason: err, } } } return nil }
go
{ "resource": "" }
q17489
Link
train
func (f *FunctionSpec) Link(scope Scope) error { if f.linked() { return nil } if err := f.ArgsSpec.Link(scope); err != nil { return compileError{Target: f.Name, Reason: err} } if f.ResultSpec != nil { if err := f.ResultSpec.Link(scope); err != nil { return compileError{Target: f.Name, Reason: err} } } return nil }
go
{ "resource": "" }
q17490
CallType
train
func (f *FunctionSpec) CallType() wire.EnvelopeType { if f.OneWay { return wire.OneWay } return wire.Call }
go
{ "resource": "" }
q17491
Link
train
func (as ArgsSpec) Link(scope Scope) error { return FieldGroup(as).Link(scope) }
go
{ "resource": "" }
q17492
Link
train
func (rs *ResultSpec) Link(scope Scope) (err error) { if rs.ReturnType != nil { rs.ReturnType, err = rs.ReturnType.Link(scope) if err != nil { return err } } if err := rs.Exceptions.Link(scope); err != nil { return err } // verify that everything listed under throws is an exception. for _, exception := range rs.Exceptions { spec, ok := exception.Type.(*StructSpec) if !ok || spec.Type != ast.ExceptionType { return notAnExceptionError{ FieldName: exception.ThriftName(), TypeName: spec.ThriftName(), } } } return nil }
go
{ "resource": "" }
q17493
Contains
train
func (r *Range) Contains(other Version) bool { return other.Compare(&r.Begin) >= 0 && other.Compare(&r.End) < 0 }
go
{ "resource": "" }
q17494
zapMarshalerPtr
train
func (z *zapGenerator) zapMarshalerPtr(g Generator, spec compile.TypeSpec, fieldValue string) (string, error) { if isPrimitiveType(spec) { fieldValue = "*" + fieldValue } return z.zapMarshaler(g, spec, fieldValue) }
go
{ "resource": "" }
q17495
Compile
train
func Compile(path string, opts ...Option) (*Module, error) { c := newCompiler() for _, opt := range opts { opt(&c) } m, err := c.load(path) if err != nil { return nil, err } err = m.Walk(func(m *Module) error { if err := c.link(m); err != nil { return compileError{ Target: m.ThriftPath, Reason: err, } } return nil }) return m, err }
go
{ "resource": "" }
q17496
load
train
func (c compiler) load(p string) (*Module, error) { p, err := c.fs.Abs(p) if err != nil { return nil, err } if m, ok := c.Modules[p]; ok { // Already loaded. return m, nil } s, err := c.fs.Read(p) if err != nil { return nil, fileReadError{Path: p, Reason: err} } prog, err := idl.Parse(s) if err != nil { return nil, parseError{Path: p, Reason: err} } m := &Module{ Name: fileBaseName(p), ThriftPath: p, Includes: make(map[string]*IncludedModule), Constants: make(map[string]*Constant), Types: make(map[string]TypeSpec), Services: make(map[string]*ServiceSpec), } m.Raw = s c.Modules[p] = m // the module is added to the map before processing includes to break // cyclic includes. if err := c.gather(m, prog); err != nil { return nil, fileCompileError{Path: p, Reason: err} } return m, nil }
go
{ "resource": "" }
q17497
include
train
func (c compiler) include(m *Module, include *ast.Include) (*IncludedModule, error) { if len(include.Name) > 0 { // TODO(abg): Add support for include-as flag somewhere. return nil, includeError{ Include: include, Reason: includeAsDisabledError{}, } } ipath := filepath.Join(filepath.Dir(m.ThriftPath), include.Path) incM, err := c.load(ipath) if err != nil { return nil, includeError{Include: include, Reason: err} } return &IncludedModule{Name: fileBaseName(include.Path), Module: incM}, nil }
go
{ "resource": "" }
q17498
zapMarshaler
train
func (l *listGenerator) zapMarshaler( g Generator, spec *compile.ListSpec, fieldValue string, ) (string, error) { name := zapperName(g, spec) if err := g.EnsureDeclared( ` <$zapcore := import "go.uber.org/zap/zapcore"> type <.Name> <typeReference .Type> <$l := newVar "l"> <$v := newVar "v"> <$enc := newVar "enc"> // MarshalLogArray implements zapcore.ArrayMarshaler, enabling // fast logging of <.Name>. func (<$l> <.Name>) MarshalLogArray(<$enc> <$zapcore>.ArrayEncoder) (err error) { for _, <$v> := range <$l> { <zapEncodeBegin .Type.ValueSpec -> <$enc>.Append<zapEncoder .Type.ValueSpec>(<zapMarshaler .Type.ValueSpec $v>) <- zapEncodeEnd .Type.ValueSpec> } return err } `, struct { Name string Type *compile.ListSpec }{ Name: name, Type: spec, }, ); err != nil { return "", err } return fmt.Sprintf("(%v)(%v)", name, fieldValue), nil }
go
{ "resource": "" }
q17499
Handle
train
func (h ServiceGeneratorHandler) Handle(name string, reqValue wire.Value) (wire.Value, error) { switch name { case "generate": var args ServiceGenerator_Generate_Args if err := args.FromWire(reqValue); err != nil { return wire.Value{}, err } result, err := ServiceGenerator_Generate_Helper.WrapResponse( h.impl.Generate(args.Request), ) if err != nil { return wire.Value{}, err } return result.ToWire() default: return wire.Value{}, envelope.ErrUnknownMethod(name) } }
go
{ "resource": "" }