_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q10100
CompareAndSet
train
func (ai *AtomicInt) CompareAndSet(expect int, update int) bool { res := atomic.CompareAndSwapInt64(&ai.val, int64(expect), int64(update)) return res }
go
{ "resource": "" }
q10101
DecrementAndGet
train
func (ai *AtomicInt) DecrementAndGet() int { res := int(atomic.AddInt64(&ai.val, -1)) return res }
go
{ "resource": "" }
q10102
Get
train
func (ai *AtomicInt) Get() int { res := int(atomic.LoadInt64(&ai.val)) return res }
go
{ "resource": "" }
q10103
GetAndAdd
train
func (ai *AtomicInt) GetAndAdd(delta int) int { newVal := atomic.AddInt64(&ai.val, int64(delta)) res := int(newVal - int64(delta)) return res }
go
{ "resource": "" }
q10104
GetAndDecrement
train
func (ai *AtomicInt) GetAndDecrement() int { newVal := atomic.AddInt64(&ai.val, -1) res := int(newVal + 1) return res }
go
{ "resource": "" }
q10105
GetAndSet
train
func (ai *AtomicInt) GetAndSet(newValue int) int { res := int(atomic.SwapInt64(&ai.val, int64(newValue))) return res }
go
{ "resource": "" }
q10106
IncrementAndGet
train
func (ai *AtomicInt) IncrementAndGet() int { res := int(atomic.AddInt64(&ai.val, 1)) return res }
go
{ "resource": "" }
q10107
Set
train
func (ai *AtomicInt) Set(newValue int) { atomic.StoreInt64(&ai.val, int64(newValue)) }
go
{ "resource": "" }
q10108
NewEqualFilter
train
func NewEqualFilter(binName string, value interface{}) *Filter { val := NewValue(value) return newFilter(binName, ICT_DEFAULT, val.GetType(), val, val) }
go
{ "resource": "" }
q10109
NewRangeFilter
train
func NewRangeFilter(binName string, begin int64, end int64) *Filter { vBegin, vEnd := NewValue(begin), NewValue(end) return newFilter(binName, ICT_DEFAULT, vBegin.GetType(), vBegin, vEnd) }
go
{ "resource": "" }
q10110
NewContainsFilter
train
func NewContainsFilter(binName string, indexCollectionType IndexCollectionType, value interface{}) *Filter { v := NewValue(value) return newFilter(binName, indexCollectionType, v.GetType(), v, v) }
go
{ "resource": "" }
q10111
NewContainsRangeFilter
train
func NewContainsRangeFilter(binName string, indexCollectionType IndexCollectionType, begin, end int64) *Filter { vBegin, vEnd := NewValue(begin), NewValue(end) return newFilter(binName, indexCollectionType, vBegin.GetType(), vBegin, vEnd) }
go
{ "resource": "" }
q10112
NewGeoWithinRegionFilter
train
func NewGeoWithinRegionFilter(binName, region string) *Filter { v := NewStringValue(region) return newFilter(binName, ICT_DEFAULT, ParticleType.GEOJSON, v, v) }
go
{ "resource": "" }
q10113
NewGeoWithinRegionForCollectionFilter
train
func NewGeoWithinRegionForCollectionFilter(binName string, collectionType IndexCollectionType, region string) *Filter { v := NewStringValue(region) return newFilter(binName, collectionType, ParticleType.GEOJSON, v, v) }
go
{ "resource": "" }
q10114
NewGeoRegionsContainingPointFilter
train
func NewGeoRegionsContainingPointFilter(binName, point string) *Filter { v := NewStringValue(point) return newFilter(binName, ICT_DEFAULT, ParticleType.GEOJSON, v, v) }
go
{ "resource": "" }
q10115
NewGeoRegionsContainingPointForCollectionFilter
train
func NewGeoRegionsContainingPointForCollectionFilter(binName string, collectionType IndexCollectionType, point string) *Filter { v := NewStringValue(point) return newFilter(binName, collectionType, ParticleType.GEOJSON, v, v) }
go
{ "resource": "" }
q10116
newFilter
train
func newFilter(name string, indexCollectionType IndexCollectionType, valueParticleType int, begin Value, end Value) *Filter { return &Filter{ name: name, idxType: indexCollectionType, valueParticleType: valueParticleType, begin: begin, end: end, } }
go
{ "resource": "" }
q10117
parseKey
train
func (cmd *batchCommandGet) parseKey(fieldCount int) error { // var digest [20]byte // var namespace, setName string // var userKey Value var err error for i := 0; i < fieldCount; i++ { if err = cmd.readBytes(4); err != nil { return err } fieldlen := int(Buffer.BytesToUint32(cmd.dataBuffer, 0)) if err = cmd.readBytes(fieldlen); err != nil { return err } fieldtype := FieldType(cmd.dataBuffer[0]) size := fieldlen - 1 switch fieldtype { case DIGEST_RIPE: copy(cmd.key.digest[:], cmd.dataBuffer[1:size+1]) case NAMESPACE: cmd.key.namespace = string(cmd.dataBuffer[1 : size+1]) case TABLE: cmd.key.setName = string(cmd.dataBuffer[1 : size+1]) case KEY: if cmd.key.userKey, err = bytesToKeyValue(int(cmd.dataBuffer[1]), cmd.dataBuffer, 2, size-1); err != nil { return err } } } return nil }
go
{ "resource": "" }
q10118
parseRecord
train
func (cmd *batchCommandGet) parseRecord(key *Key, opCount int, generation, expiration uint32) (*Record, error) { bins := make(BinMap, opCount) for i := 0; i < opCount; i++ { if err := cmd.readBytes(8); err != nil { return nil, err } opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, 0)) particleType := int(cmd.dataBuffer[5]) nameSize := int(cmd.dataBuffer[7]) if err := cmd.readBytes(nameSize); err != nil { return nil, err } name := string(cmd.dataBuffer[:nameSize]) particleBytesSize := opSize - (4 + nameSize) if err := cmd.readBytes(particleBytesSize); err != nil { return nil, err } value, err := bytesToParticle(particleType, cmd.dataBuffer, 0, particleBytesSize) if err != nil { return nil, err } bins[name] = value } return newRecord(cmd.node, key, bins, generation, expiration), nil }
go
{ "resource": "" }
q10119
expireExample
train
func expireExample(client *as.Client) { key, _ := as.NewKey(*shared.Namespace, *shared.Set, "expirekey ") bin := as.NewBin("expirebin", "expirevalue") log.Printf("Put: namespace=%s set=%s key=%s bin=%s value=%s expiration=2", key.Namespace(), key.SetName(), key.Value(), bin.Name, bin.Value) // Specify that record expires 2 seconds after it's written. writePolicy := as.NewWritePolicy(0, 2) client.PutBins(writePolicy, key, bin) // Read the record before it expires, showing it is there. log.Printf("Get: namespace=%s set=%s key=%s", key.Namespace(), key.SetName(), key.Value()) record, err := client.Get(shared.Policy, key, bin.Name) shared.PanicOnError(err) if record == nil { log.Fatalf( "Failed to get record: namespace=%s set=%s key=%s", key.Namespace(), key.SetName(), key.Value()) } received := record.Bins[bin.Name] expected := bin.Value.String() if received == expected { log.Printf("Get record successful: namespace=%s set=%s key=%s bin=%s value=%s", key.Namespace(), key.SetName(), key.Value(), bin.Name, received) } else { log.Fatalf("Expire record mismatch: Expected %s. Received %s.", expected, received) } // Read the record after it expires, showing it's gone. log.Printf("Sleeping for 3 seconds ...") time.Sleep(3 * time.Second) record, err = client.Get(shared.Policy, key, bin.Name) shared.PanicOnError(err) if record == nil { log.Printf("Expiry of record successful. Record not found.") } else { log.Fatalf("Found record when it should have expired.") } }
go
{ "resource": "" }
q10120
onComplete
train
func (btsk *baseTask) onComplete(ifc Task) chan error { ch := make(chan error, 1) // goroutine will loop every <interval> until IsDone() returns true or error go func() { // always close the channel on return defer close(ch) var interval = 100 * time.Millisecond for { select { case <-time.After(interval): done, err := ifc.IsDone() // Every 5 failed retries increase the interval if btsk.retries.IncrementAndGet()%5 == 0 { interval *= 2 println(interval) if interval > 5*time.Second { interval = 5 * time.Second } } if err != nil { ae, ok := err.(AerospikeError) if ok && ae.ResultCode() == TIMEOUT { ae.MarkInDoubt() } ch <- ae return } else if done { ch <- nil return } } // select } // for }() return ch }
go
{ "resource": "" }
q10121
NewTransaction
train
func (d *Datastore) NewTransaction(readOnly bool) (ds.Txn, error) { d.closeLk.RLock() defer d.closeLk.RUnlock() if d.closed { return nil, ErrClosed } return &txn{d, d.DB.NewTransaction(!readOnly), false}, nil }
go
{ "resource": "" }
q10122
newImplicitTransaction
train
func (d *Datastore) newImplicitTransaction(readOnly bool) *txn { return &txn{d, d.DB.NewTransaction(!readOnly), true} }
go
{ "resource": "" }
q10123
DiskUsage
train
func (d *Datastore) DiskUsage() (uint64, error) { d.closeLk.RLock() defer d.closeLk.RUnlock() if d.closed { return 0, ErrClosed } lsm, vlog := d.DB.Size() return uint64(lsm + vlog), nil }
go
{ "resource": "" }
q10124
Close
train
func (t *txn) Close() error { t.ds.closeLk.RLock() defer t.ds.closeLk.RUnlock() if t.ds.closed { return ErrClosed } return t.close() }
go
{ "resource": "" }
q10125
updatePriority
train
func (pq *priorityQueue) updatePriority(item *expiringBuffer, priority int64) { item.priority = priority // NOTE: fix is a slightly more efficient version of calling Remove() and // then Push() heap.Fix(pq, item.index) }
go
{ "resource": "" }
q10126
peakTopPriority
train
func (pq *priorityQueue) peakTopPriority() (int64, error) { if len(*pq) > 0 { return (*pq)[0].priority, nil } else { return -1, fmt.Errorf("PriorityQueue is empty. No top priority.") } }
go
{ "resource": "" }
q10127
getEventSubscriptionHandler
train
func getEventSubscriptionHandler(manager *golongpoll.LongpollManager) func(w http.ResponseWriter, r *http.Request) { // Creates closure that captures the LongpollManager // Wraps the manager.SubscriptionHandler with a layer of dummy access control validation return func(w http.ResponseWriter, r *http.Request) { category := r.URL.Query().Get("category") user := r.URL.Query().Get("user") // NOTE: real user authentication should be used in the real world! // Dummy user access control in the event the client is requesting // a user's private activity stream: if category == "larry_actions" && user != "larry" { w.WriteHeader(http.StatusForbidden) w.Write([]byte("You're not Larry.")) return } if category == "moe_actions" && user != "moe" { w.WriteHeader(http.StatusForbidden) w.Write([]byte("You're not Moe.")) return } if category == "curly_actions" && user != "curly" { w.WriteHeader(http.StatusForbidden) w.Write([]byte("You're not Curly.")) return } // Only allow supported subscription categories: if category != "public_actions" && category != "larry_actions" && category != "moe_actions" && category != "curly_actions" { w.WriteHeader(http.StatusBadRequest) w.Write([]byte("Subscription channel does not exist.")) return } // Client is either requesting the public stream, or a private // stream that they're allowed to see. // Go ahead and let the subscription happen: manager.SubscriptionHandler(w, r) } }
go
{ "resource": "" }
q10128
QueueEvent
train
func (eb *eventBuffer) QueueEvent(event *lpEvent) error { if event == nil { return errors.New("event was nil") } // Cull our buffer if we're at max capacity if eb.List.Len() >= eb.MaxBufferSize { oldestEvent := eb.List.Back() if oldestEvent != nil { eb.List.Remove(oldestEvent) } } // Add event to front of our list eb.List.PushFront(event) // Update oldestEventTime with the time of our least recent event (at back) // keeping track of this allows for more efficient event TTL expiration purges if lastElement := eb.List.Back(); lastElement != nil { lastEvent, ok := lastElement.Value.(*lpEvent) if !ok { return fmt.Errorf("Found non-event type in event buffer.") } eb.oldestEventTime = lastEvent.Timestamp } return nil }
go
{ "resource": "" }
q10129
getLongPollSubscriptionHandler
train
func getLongPollSubscriptionHandler(maxTimeoutSeconds int, subscriptionRequests chan clientSubscription, clientTimeouts chan<- clientCategoryPair, loggingEnabled bool) func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { timeout, err := strconv.Atoi(r.URL.Query().Get("timeout")) if loggingEnabled { log.Println("Handling HTTP request at ", r.URL) } // We are going to return json no matter what: w.Header().Set("Content-Type", "application/json") // Don't cache response: w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1. w.Header().Set("Pragma", "no-cache") // HTTP 1.0. w.Header().Set("Expires", "0") // Proxies. if err != nil || timeout > maxTimeoutSeconds || timeout < 1 { if loggingEnabled { log.Printf("Error: Invalid timeout param. Must be 1-%d. Got: %q.\n", maxTimeoutSeconds, r.URL.Query().Get("timeout")) } io.WriteString(w, fmt.Sprintf("{\"error\": \"Invalid timeout arg. Must be 1-%d.\"}", maxTimeoutSeconds)) return } category := r.URL.Query().Get("category") if len(category) == 0 || len(category) > 1024 { if loggingEnabled { log.Printf("Error: Invalid subscription category, must be 1-1024 characters long.\n") } io.WriteString(w, "{\"error\": \"Invalid subscription category, must be 1-1024 characters long.\"}") return } // Default to only looking for current events lastEventTime := time.Now() // since_time is string of milliseconds since epoch lastEventTimeParam := r.URL.Query().Get("since_time") if len(lastEventTimeParam) > 0 { // Client is requesting any event from given timestamp // parse time var parseError error lastEventTime, parseError = millisecondStringToTime(lastEventTimeParam) if parseError != nil { if loggingEnabled { log.Printf("Error parsing last_event_time arg. Parm Value: %s, Error: %s.\n", lastEventTimeParam, err) } io.WriteString(w, "{\"error\": \"Invalid last_event_time arg.\"}") return } } subscription, err := newclientSubscription(category, lastEventTime) if err != nil { if loggingEnabled { log.Printf("Error creating new Subscription: %s.\n", err) } io.WriteString(w, "{\"error\": \"Error creating new Subscription.\"}") return } subscriptionRequests <- *subscription // Listens for connection close and un-register subscription in the // event that a client crashes or the connection goes down. We don't // need to wait around to fulfill a subscription if no one is going to // receive it disconnectNotify := w.(http.CloseNotifier).CloseNotify() select { case <-time.After(time.Duration(timeout) * time.Second): // Lets the subscription manager know it can discard this request's // channel. clientTimeouts <- subscription.clientCategoryPair timeout_resp := makeTimeoutResponse(time.Now()) if jsonData, err := json.Marshal(timeout_resp); err == nil { io.WriteString(w, string(jsonData)) } else { io.WriteString(w, "{\"error\": \"json marshaller failed\"}") } case events := <-subscription.Events: // Consume event. Subscription manager will automatically discard // this client's channel upon sending event // NOTE: event is actually []Event if jsonData, err := json.Marshal(eventResponse{&events}); err == nil { io.WriteString(w, string(jsonData)) } else { io.WriteString(w, "{\"error\": \"json marshaller failed\"}") } case <-disconnectNotify: // Client connection closed before any events occurred and before // the timeout was exceeded. Tell manager to forget about this // client. clientTimeouts <- subscription.clientCategoryPair } } }
go
{ "resource": "" }
q10130
run
train
func (sm *subscriptionManager) run() error { if sm.LoggingEnabled { log.Println("SubscriptionManager: Starting run.") } for { // NOTE: we check to see if its time to purge old buffers whenever // something happens or a period of inactivity has occurred. // An alternative would be to have another goroutine with a // select case time.After() but then you'd have concurrency issues // with access to the sm.SubEventBuffer and sm.bufferPriorityQueue objs // So instead of introducing mutexes we have this uglier manual time check calls select { case newClient := <-sm.clientSubscriptions: sm.handleNewClient(&newClient) sm.seeIfTimeToPurgeStaleCategories() case disconnected := <-sm.ClientTimeouts: sm.handleClientDisconnect(&disconnected) sm.seeIfTimeToPurgeStaleCategories() case event := <-sm.Events: sm.handleNewEvent(&event) sm.seeIfTimeToPurgeStaleCategories() case <-time.After(time.Duration(5) * time.Second): sm.seeIfTimeToPurgeStaleCategories() case _ = <-sm.Quit: if sm.LoggingEnabled { log.Println("SubscriptionManager: received quit signal, stopping.") } // break out of our infinite loop/select return nil } } }
go
{ "resource": "" }
q10131
Start
train
func (c *Client) Start() { u := c.url if c.LoggingEnabled { log.Println("Now observing changes on", u.String()) } atomic.AddUint64(&(c.runID), 1) currentRunID := atomic.LoadUint64(&(c.runID)) go func(runID uint64, u *url.URL) { since := time.Now().Unix() * 1000 for { pr, err := c.fetchEvents(since) if err != nil { if c.LoggingEnabled { log.Println(err) log.Printf("Reattempting to connect to %s in %d seconds", u.String(), c.Reattempt) } time.Sleep(c.Reattempt) continue } // We check that its still the same runID as when this goroutine was started clientRunID := atomic.LoadUint64(&(c.runID)) if clientRunID != runID { if c.LoggingEnabled { log.Printf("Client on URL %s has been stopped, not sending events", u.String()) } return } if len(pr.Events) > 0 { if c.LoggingEnabled { log.Println("Got", len(pr.Events), "event(s) from URL", u.String()) } for _, event := range pr.Events { since = event.Timestamp c.EventsChan <- event } } else { // Only push timestamp forward if its greater than the last we checked if pr.Timestamp > since { since = pr.Timestamp } } } }(currentRunID, u) }
go
{ "resource": "" }
q10132
fetchEvents
train
func (c Client) fetchEvents(since int64) (PollResponse, error) { u := c.url if c.LoggingEnabled { log.Println("Checking for changes events since", since, "on URL", u.String()) } query := u.Query() query.Set("category", c.category) query.Set("since_time", fmt.Sprintf("%d", since)) query.Set("timeout", fmt.Sprintf("%d", c.Timeout)) u.RawQuery = query.Encode() req, _ := http.NewRequest("GET", u.String(), nil) if c.BasicAuthUsername != "" && c.BasicAuthPassword != "" { req.SetBasicAuth(c.BasicAuthUsername, c.BasicAuthPassword) } resp, err := c.HttpClient.Do(req) if err != nil { msg := fmt.Sprintf("Error while connecting to %s to observe changes. Error was: %s", u, err) return PollResponse{}, errors.New(msg) } if resp.StatusCode != http.StatusOK { msg := fmt.Sprintf("Wrong status code received from longpoll server: %d", resp.StatusCode) return PollResponse{}, errors.New(msg) } decoder := json.NewDecoder(resp.Body) defer resp.Body.Close() var pr PollResponse err = decoder.Decode(&pr) if err != nil { if c.LoggingEnabled { log.Println("Error while decoding poll response: %s", err) } return PollResponse{}, err } return pr, nil }
go
{ "resource": "" }
q10133
NewGenerator
train
func NewGenerator(i *GeneratorInput) (*Generator, error) { if i == nil { i = new(GeneratorInput) } g := &Generator{ lowerLetters: i.LowerLetters, upperLetters: i.UpperLetters, digits: i.Digits, symbols: i.Symbols, } if g.lowerLetters == "" { g.lowerLetters = LowerLetters } if g.upperLetters == "" { g.upperLetters = UpperLetters } if g.digits == "" { g.digits = Digits } if g.symbols == "" { g.symbols = Symbols } return g, nil }
go
{ "resource": "" }
q10134
Generate
train
func (g *Generator) Generate(length, numDigits, numSymbols int, noUpper, allowRepeat bool) (string, error) { letters := g.lowerLetters if !noUpper { letters += g.upperLetters } chars := length - numDigits - numSymbols if chars < 0 { return "", ErrExceedsTotalLength } if !allowRepeat && chars > len(letters) { return "", ErrLettersExceedsAvailable } if !allowRepeat && numDigits > len(g.digits) { return "", ErrDigitsExceedsAvailable } if !allowRepeat && numSymbols > len(g.symbols) { return "", ErrSymbolsExceedsAvailable } var result string // Characters for i := 0; i < chars; i++ { ch, err := randomElement(letters) if err != nil { return "", err } if !allowRepeat && strings.Contains(result, ch) { i-- continue } result, err = randomInsert(result, ch) if err != nil { return "", err } } // Digits for i := 0; i < numDigits; i++ { d, err := randomElement(g.digits) if err != nil { return "", err } if !allowRepeat && strings.Contains(result, d) { i-- continue } result, err = randomInsert(result, d) if err != nil { return "", err } } // Symbols for i := 0; i < numSymbols; i++ { sym, err := randomElement(g.symbols) if err != nil { return "", err } if !allowRepeat && strings.Contains(result, sym) { i-- continue } result, err = randomInsert(result, sym) if err != nil { return "", err } } return result, nil }
go
{ "resource": "" }
q10135
MustGenerate
train
func (g *Generator) MustGenerate(length, numDigits, numSymbols int, noUpper, allowRepeat bool) string { res, err := g.Generate(length, numDigits, numSymbols, noUpper, allowRepeat) if err != nil { panic(err) } return res }
go
{ "resource": "" }
q10136
Generate
train
func Generate(length, numDigits, numSymbols int, noUpper, allowRepeat bool) (string, error) { gen, err := NewGenerator(nil) if err != nil { return "", err } return gen.Generate(length, numDigits, numSymbols, noUpper, allowRepeat) }
go
{ "resource": "" }
q10137
randomInsert
train
func randomInsert(s, val string) (string, error) { if s == "" { return val, nil } n, err := rand.Int(rand.Reader, big.NewInt(int64(len(s)+1))) if err != nil { return "", err } i := n.Int64() return s[0:i] + val + s[i:len(s)], nil }
go
{ "resource": "" }
q10138
randomElement
train
func randomElement(s string) (string, error) { n, err := rand.Int(rand.Reader, big.NewInt(int64(len(s)))) if err != nil { return "", err } return string(s[n.Int64()]), nil }
go
{ "resource": "" }
q10139
init
train
func init() { sock, err := net.Listen("tcp", "localhost:8080") if err != nil { log.Printf("net listen error: %v", err) } go func() { fmt.Println("Metrics available at http://localhost:8080/debug/vars") http.Serve(sock, nil) }() }
go
{ "resource": "" }
q10140
ShouldRetry
train
func (r *MyRetryer) ShouldRetry(err error) bool { if awsErr, ok := err.(awserr.Error); ok { switch awsErr.Code() { case dynamodb.ErrCodeProvisionedThroughputExceededException, dynamodb.ErrCodeLimitExceededException: return true default: return false } } return false }
go
{ "resource": "" }
q10141
start
train
func (b *broker) start(ctx context.Context) { b.findNewShards() ticker := time.NewTicker(30 * time.Second) // Note: while ticker is a rather naive approach to this problem, // it actually simplies a few things. i.e. If we miss a new shard while // AWS is resharding we'll pick it up max 30 seconds later. // It might be worth refactoring this flow to allow the consumer to // to notify the broker when a shard is closed. However, shards don't // necessarily close at the same time, so we could potentially get a // thundering heard of notifications from the consumer. for { select { case <-ctx.Done(): ticker.Stop() return case <-ticker.C: b.findNewShards() } } }
go
{ "resource": "" }
q10142
findNewShards
train
func (b *broker) findNewShards() { b.shardMu.Lock() defer b.shardMu.Unlock() b.logger.Log("[BROKER]", "fetching shards") shards, err := b.listShards() if err != nil { b.logger.Log("[BROKER]", err) return } for _, shard := range shards { if _, ok := b.shards[*shard.ShardId]; ok { continue } b.shards[*shard.ShardId] = shard b.shardc <- shard } }
go
{ "resource": "" }
q10143
listShards
train
func (b *broker) listShards() ([]*kinesis.Shard, error) { var ss []*kinesis.Shard var listShardsInput = &kinesis.ListShardsInput{ StreamName: aws.String(b.streamName), } for { resp, err := b.client.ListShards(listShardsInput) if err != nil { return nil, fmt.Errorf("ListShards error: %v", err) } ss = append(ss, resp.Shards...) if resp.NextToken == nil { return ss, nil } listShardsInput = &kinesis.ListShardsInput{ NextToken: resp.NextToken, StreamName: aws.String(b.streamName), } } }
go
{ "resource": "" }
q10144
WithMaxInterval
train
func WithMaxInterval(maxInterval time.Duration) Option { return func(c *Checkpoint) { c.maxInterval = maxInterval } }
go
{ "resource": "" }
q10145
WithDynamoClient
train
func WithDynamoClient(svc dynamodbiface.DynamoDBAPI) Option { return func(c *Checkpoint) { c.client = svc } }
go
{ "resource": "" }
q10146
New
train
func New(appName, tableName string, opts ...Option) (*Checkpoint, error) { client := dynamodb.New(session.New(aws.NewConfig())) ck := &Checkpoint{ tableName: tableName, appName: appName, client: client, maxInterval: time.Duration(1 * time.Minute), done: make(chan struct{}), mu: &sync.Mutex{}, checkpoints: map[key]string{}, retryer: &DefaultRetryer{}, } for _, opt := range opts { opt(ck) } go ck.loop() return ck, nil }
go
{ "resource": "" }
q10147
WithClient
train
func WithClient(client kinesisiface.KinesisAPI) Option { return func(c *Consumer) { c.client = client } }
go
{ "resource": "" }
q10148
New
train
func New(streamName string, opts ...Option) (*Consumer, error) { if streamName == "" { return nil, fmt.Errorf("must provide stream name") } // new consumer with no-op checkpoint, counter, and logger c := &Consumer{ streamName: streamName, initialShardIteratorType: kinesis.ShardIteratorTypeLatest, checkpoint: &noopCheckpoint{}, counter: &noopCounter{}, logger: &noopLogger{ logger: log.New(ioutil.Discard, "", log.LstdFlags), }, } // override defaults for _, opt := range opts { opt(c) } // default client if none provided if c.client == nil { newSession, err := session.NewSession(aws.NewConfig()) if err != nil { return nil, err } c.client = kinesis.New(newSession) } return c, nil }
go
{ "resource": "" }
q10149
Scan
train
func (c *Consumer) Scan(ctx context.Context, fn ScanFunc) error { var ( errc = make(chan error, 1) shardc = make(chan *kinesis.Shard, 1) broker = newBroker(c.client, c.streamName, shardc, c.logger) ) ctx, cancel := context.WithCancel(ctx) defer cancel() go broker.start(ctx) go func() { <-ctx.Done() close(shardc) }() // process each of the shards for shard := range shardc { go func(shardID string) { if err := c.ScanShard(ctx, shardID, fn); err != nil { select { case errc <- fmt.Errorf("shard %s error: %v", shardID, err): // first error to occur cancel() default: // error has already occured } } }(aws.StringValue(shard.ShardId)) } close(errc) return <-errc }
go
{ "resource": "" }
q10150
ScanShard
train
func (c *Consumer) ScanShard(ctx context.Context, shardID string, fn ScanFunc) error { // get last seq number from checkpoint lastSeqNum, err := c.checkpoint.Get(c.streamName, shardID) if err != nil { return fmt.Errorf("get checkpoint error: %v", err) } // get shard iterator shardIterator, err := c.getShardIterator(c.streamName, shardID, lastSeqNum) if err != nil { return fmt.Errorf("get shard iterator error: %v", err) } c.logger.Log("[START]\t", shardID, lastSeqNum) defer func() { c.logger.Log("[STOP]\t", shardID) }() for { select { case <-ctx.Done(): return nil default: resp, err := c.client.GetRecords(&kinesis.GetRecordsInput{ ShardIterator: shardIterator, }) // attempt to recover from GetRecords error by getting new shard iterator if err != nil { shardIterator, err = c.getShardIterator(c.streamName, shardID, lastSeqNum) if err != nil { return fmt.Errorf("get shard iterator error: %v", err) } continue } // loop over records, call callback func for _, r := range resp.Records { select { case <-ctx.Done(): return nil default: err := fn(r) if err != nil && err != SkipCheckpoint { return err } if err != SkipCheckpoint { if err := c.checkpoint.Set(c.streamName, shardID, *r.SequenceNumber); err != nil { return err } } c.counter.Add("records", 1) lastSeqNum = *r.SequenceNumber } } if isShardClosed(resp.NextShardIterator, shardIterator) { c.logger.Log("[CLOSED]\t", shardID) return nil } shardIterator = resp.NextShardIterator } } }
go
{ "resource": "" }
q10151
New
train
func New(appName string) (*Checkpoint, error) { addr := os.Getenv("REDIS_URL") if addr == "" { addr = localhost } client := redis.NewClient(&redis.Options{Addr: addr}) // verify we can ping server _, err := client.Ping().Result() if err != nil { return nil, err } return &Checkpoint{ appName: appName, client: client, }, nil }
go
{ "resource": "" }
q10152
Get
train
func (c *Checkpoint) Get(streamName, shardID string) (string, error) { val, _ := c.client.Get(c.key(streamName, shardID)).Result() return val, nil }
go
{ "resource": "" }
q10153
key
train
func (c *Checkpoint) key(streamName, shardID string) string { return fmt.Sprintf("%v:checkpoint:%v:%v", c.appName, streamName, shardID) }
go
{ "resource": "" }
q10154
New
train
func New(appName, tableName, connectionStr string, opts ...Option) (*Checkpoint, error) { if appName == "" { return nil, errors.New("application name not defined") } if tableName == "" { return nil, errors.New("table name not defined") } conn, err := sql.Open("postgres", connectionStr) if err != nil { return nil, err } ck := &Checkpoint{ conn: conn, appName: appName, tableName: tableName, done: make(chan struct{}), maxInterval: 1 * time.Minute, mu: new(sync.Mutex), checkpoints: map[key]string{}, } for _, opt := range opts { opt(ck) } go ck.loop() return ck, nil }
go
{ "resource": "" }
q10155
Shutdown
train
func (c *Checkpoint) Shutdown() error { defer c.conn.Close() c.done <- struct{}{} return c.save() }
go
{ "resource": "" }
q10156
newNonTS
train
func newNonTS() *SetNonTS { s := &SetNonTS{} s.m = make(map[interface{}]struct{}) // Ensure interface compliance var _ Interface = s return s }
go
{ "resource": "" }
q10157
IsEqual
train
func (s *set) IsEqual(t Interface) bool { // Force locking only if given set is threadsafe. if conv, ok := t.(*Set); ok { conv.l.RLock() defer conv.l.RUnlock() } // return false if they are no the same size if sameSize := len(s.m) == t.Size(); !sameSize { return false } equal := true t.Each(func(item interface{}) bool { _, equal = s.m[item] return equal // if false, Each() will end }) return equal }
go
{ "resource": "" }
q10158
String
train
func (s *set) String() string { t := make([]string, 0, len(s.List())) for _, item := range s.List() { t = append(t, fmt.Sprintf("%v", item)) } return fmt.Sprintf("[%s]", strings.Join(t, ", ")) }
go
{ "resource": "" }
q10159
Intersection
train
func Intersection(set1, set2 Interface, sets ...Interface) Interface { all := Union(set1, set2, sets...) result := Union(set1, set2, sets...) all.Each(func(item interface{}) bool { if !set1.Has(item) || !set2.Has(item) { result.Remove(item) } for _, set := range sets { if !set.Has(item) { result.Remove(item) } } return true }) return result }
go
{ "resource": "" }
q10160
SymmetricDifference
train
func SymmetricDifference(s Interface, t Interface) Interface { u := Difference(s, t) v := Difference(t, s) return Union(u, v) }
go
{ "resource": "" }
q10161
StringSlice
train
func StringSlice(s Interface) []string { slice := make([]string, 0) for _, item := range s.List() { v, ok := item.(string) if !ok { continue } slice = append(slice, v) } return slice }
go
{ "resource": "" }
q10162
IntSlice
train
func IntSlice(s Interface) []int { slice := make([]int, 0) for _, item := range s.List() { v, ok := item.(int) if !ok { continue } slice = append(slice, v) } return slice }
go
{ "resource": "" }
q10163
RequestTeamInfo
train
func RequestTeamInfo(tc *ginoauth2.TokenContainer, uri string) ([]byte, error) { var uv = make(url.Values) uv.Set("member", tc.Scopes["uid"].(string)) info_url := uri + "?" + uv.Encode() client := &http.Client{Transport: &ginoauth2.Transport} req, err := http.NewRequest("GET", info_url, nil) if err != nil { return nil, err } req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", tc.Token.AccessToken)) resp, err := client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() return ioutil.ReadAll(resp.Body) }
go
{ "resource": "" }
q10164
GroupCheck
train
func GroupCheck(at []AccessTuple) func(tc *ginoauth2.TokenContainer, ctx *gin.Context) bool { ats := at return func(tc *ginoauth2.TokenContainer, ctx *gin.Context) bool { blob, err := RequestTeamInfo(tc, TeamAPI) if err != nil { glog.Errorf("[Gin-OAuth] failed to get team info, caused by: %s", err) return false } var data []TeamInfo err = json.Unmarshal(blob, &data) if err != nil { glog.Errorf("[Gin-OAuth] JSON.Unmarshal failed, caused by: %s", err) return false } granted := false for _, teamInfo := range data { for idx := range ats { at := ats[idx] if teamInfo.Id == at.Uid { granted = true glog.Infof("[Gin-OAuth] Grant access to %s as team member of \"%s\"\n", tc.Scopes["uid"].(string), teamInfo.Id) } if teamInfo.Type == "official" { ctx.Set("uid", tc.Scopes["uid"].(string)) ctx.Set("team", teamInfo.Id) } } } return granted } }
go
{ "resource": "" }
q10165
ScopeCheck
train
func ScopeCheck(name string, scopes ...string) func(tc *ginoauth2.TokenContainer, ctx *gin.Context) bool { glog.Infof("ScopeCheck %s configured to grant access for scopes: %v", name, scopes) configuredScopes := scopes return func(tc *ginoauth2.TokenContainer, ctx *gin.Context) bool { scopesFromToken := make([]string, 0) for _, s := range configuredScopes { if cur, ok := tc.Scopes[s]; ok { glog.V(2).Infof("Found configured scope %s", s) scopesFromToken = append(scopesFromToken, s) ctx.Set(s, cur) // set value from token of configured scope to the context, which you can use in your application. } } //Getting the uid for identification of the service calling if cur, ok := tc.Scopes["uid"]; ok { ctx.Set("uid", cur) } return len(scopesFromToken) > 0 } }
go
{ "resource": "" }
q10166
Valid
train
func (t *TokenContainer) Valid() bool { if t.Token == nil { return false } return t.Token.Valid() }
go
{ "resource": "" }
q10167
Setup
train
func Setup(redirectURL, credFile string, scopes []string, secret []byte) { store = sessions.NewCookieStore(secret) var c Credentials file, err := ioutil.ReadFile(credFile) if err != nil { glog.Fatalf("[Gin-OAuth] File error: %v\n", err) } json.Unmarshal(file, &c) conf = &oauth2.Config{ ClientID: c.ClientID, ClientSecret: c.ClientSecret, RedirectURL: redirectURL, Scopes: scopes, Endpoint: google.Endpoint, } }
go
{ "resource": "" }
q10168
Create
train
func Create(root vfs.Filesystem, prefix string) *FS { return &FS{root, prefix} }
go
{ "resource": "" }
q10169
PrefixPath
train
func (fs *FS) PrefixPath(path string) string { return fs.Prefix + string(fs.PathSeparator()) + path }
go
{ "resource": "" }
q10170
OpenFile
train
func (fs *FS) OpenFile(name string, flag int, perm os.FileMode) (vfs.File, error) { return fs.Filesystem.OpenFile(fs.PrefixPath(name), flag, perm) }
go
{ "resource": "" }
q10171
Remove
train
func (fs *FS) Remove(name string) error { return fs.Filesystem.Remove(fs.PrefixPath(name)) }
go
{ "resource": "" }
q10172
Rename
train
func (fs *FS) Rename(oldpath, newpath string) error { return fs.Filesystem.Rename(fs.PrefixPath(oldpath), fs.PrefixPath(newpath)) }
go
{ "resource": "" }
q10173
Mkdir
train
func (fs *FS) Mkdir(name string, perm os.FileMode) error { return fs.Filesystem.Mkdir(fs.PrefixPath(name), perm) }
go
{ "resource": "" }
q10174
Stat
train
func (fs *FS) Stat(name string) (os.FileInfo, error) { return fs.Filesystem.Stat(fs.PrefixPath(name)) }
go
{ "resource": "" }
q10175
ReadDir
train
func (fs *FS) ReadDir(path string) ([]os.FileInfo, error) { return fs.Filesystem.ReadDir(fs.PrefixPath(path)) }
go
{ "resource": "" }
q10176
NewMemFile
train
func NewMemFile(name string, rwMutex *sync.RWMutex, buf *[]byte) *MemFile { return &MemFile{ Buffer: NewBuffer(buf), mutex: rwMutex, name: name, } }
go
{ "resource": "" }
q10177
Truncate
train
func (b MemFile) Truncate(size int64) (err error) { b.mutex.Lock() err = b.Buffer.Truncate(size) b.mutex.Unlock() return }
go
{ "resource": "" }
q10178
ReadFile
train
func ReadFile(fs Filesystem, filename string) ([]byte, error) { f, err := fs.OpenFile(filename, os.O_RDONLY, 0) if err != nil { return nil, err } defer f.Close() // It's a good but not certain bet that FileInfo will tell us exactly how // much to read, so let's try it but be prepared for the answer to be wrong. var n int64 if fi, err := fs.Stat(filename); err == nil { if size := fi.Size(); size < 1e9 { n = size } } // As initial capacity for readAll, use n + a little extra in case Size is // zero, and to avoid another allocation after Read has filled the buffer. // The readAll call will read into its allocated internal buffer cheaply. If // the size was wrong, we'll either waste some space off the end or // reallocate as needed, but in the overwhelmingly common case we'll get it // just right. return readAll(f, n+bytes.MinRead) }
go
{ "resource": "" }
q10179
Mkdir
train
func (fs RoFS) Mkdir(name string, perm os.FileMode) error { return ErrReadOnly }
go
{ "resource": "" }
q10180
Create
train
func Create() *MemFS { root := &fileInfo{ name: "/", dir: true, } return &MemFS{ root: root, wd: root, lock: &sync.RWMutex{}, } }
go
{ "resource": "" }
q10181
Mkdir
train
func (fs *MemFS) Mkdir(name string, perm os.FileMode) error { fs.lock.Lock() defer fs.lock.Unlock() name = filepath.Clean(name) base := filepath.Base(name) parent, fi, err := fs.fileInfo(name) if err != nil { return &os.PathError{"mkdir", name, err} } if fi != nil { return &os.PathError{"mkdir", name, fmt.Errorf("Directory %q already exists", name)} } fi = &fileInfo{ name: base, dir: true, mode: perm, parent: parent, modTime: time.Now(), fs: fs, } parent.childs[base] = fi return nil }
go
{ "resource": "" }
q10182
Write
train
func (f *roFile) Write(p []byte) (n int, err error) { return 0, ErrReadOnly }
go
{ "resource": "" }
q10183
Read
train
func (f *woFile) Read(p []byte) (n int, err error) { return 0, ErrWriteOnly }
go
{ "resource": "" }
q10184
makeSlice
train
func makeSlice(n int) (b []byte, err error) { // If the make fails, give a known error. defer func() { if recover() != nil { b = nil err = ErrTooLarge return } }() b = make([]byte, n) return }
go
{ "resource": "" }
q10185
OpenFile
train
func (fs DummyFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { return nil, fs.err }
go
{ "resource": "" }
q10186
Mkdir
train
func (fs DummyFS) Mkdir(name string, perm os.FileMode) error { return fs.err }
go
{ "resource": "" }
q10187
Stat
train
func (fs DummyFS) Stat(name string) (os.FileInfo, error) { return nil, fs.err }
go
{ "resource": "" }
q10188
ReadDir
train
func (fs DummyFS) ReadDir(path string) ([]os.FileInfo, error) { return nil, fs.err }
go
{ "resource": "" }
q10189
Write
train
func (f DumFile) Write(p []byte) (n int, err error) { return 0, f.err }
go
{ "resource": "" }
q10190
ReadAt
train
func (f DumFile) ReadAt(p []byte, off int64) (n int, err error) { return 0, f.err }
go
{ "resource": "" }
q10191
Seek
train
func (f DumFile) Seek(offset int64, whence int) (int64, error) { return 0, f.err }
go
{ "resource": "" }
q10192
OpenFile
train
func (fs OsFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { return os.OpenFile(name, flag, perm) }
go
{ "resource": "" }
q10193
Mkdir
train
func (fs OsFS) Mkdir(name string, perm os.FileMode) error { return os.Mkdir(name, perm) }
go
{ "resource": "" }
q10194
Rename
train
func (fs OsFS) Rename(oldpath, newpath string) error { return os.Rename(oldpath, newpath) }
go
{ "resource": "" }
q10195
ReadDir
train
func (fs OsFS) ReadDir(path string) ([]os.FileInfo, error) { return ioutil.ReadDir(path) }
go
{ "resource": "" }
q10196
Create
train
func Create(rootFS vfs.Filesystem) *MountFS { return &MountFS{ rootFS: rootFS, mounts: make(map[string]vfs.Filesystem), parents: make(map[string][]string), } }
go
{ "resource": "" }
q10197
findMount
train
func findMount(path string, mounts map[string]vfs.Filesystem, fallback vfs.Filesystem, pathSeparator string) (vfs.Filesystem, string) { path = filepath.Clean(path) segs := vfs.SplitPath(path, pathSeparator) l := len(segs) for i := l; i > 0; i-- { mountPath := strings.Join(segs[0:i], pathSeparator) if fs, ok := mounts[mountPath]; ok { return fs, "/" + strings.Join(segs[i:l], pathSeparator) } } return fallback, path }
go
{ "resource": "" }
q10198
Remove
train
func (fs MountFS) Remove(name string) error { mount, innerPath := findMount(name, fs.mounts, fs.rootFS, string(fs.PathSeparator())) return mount.Remove(innerPath) }
go
{ "resource": "" }
q10199
Rename
train
func (fs MountFS) Rename(oldpath, newpath string) error { oldMount, oldInnerPath := findMount(oldpath, fs.mounts, fs.rootFS, string(fs.PathSeparator())) newMount, newInnerPath := findMount(newpath, fs.mounts, fs.rootFS, string(fs.PathSeparator())) if oldMount != newMount { return ErrBoundary } return oldMount.Rename(oldInnerPath, newInnerPath) }
go
{ "resource": "" }