_id stringlengths 2 7 | title stringlengths 1 118 | partition stringclasses 3 values | text stringlengths 52 85.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q15400 | authMiddleware | train | func (s *Server) authMiddleware(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
reqOrigin := r.Header.Get("Origin")
s.setAllowedOrigins(rw, reqOrigin)
defer r.Body.Close()
if s.auth {
_, password, ok := r.BasicAuth()
// If we have valid password or going to tribe/agreements endpoint
// go to next. tribe/agreements endpoint used for populating
// snaptel help page when tribe mode is turned on.
if ok && password == s.authpwd {
next(rw, r)
} else {
v2.Write(401, v2.UnauthError{Code: 401, Message: "Not authorized. Please specify the same password that used to start snapteld. E.g: [snaptel -p plugin list] or [curl http://localhost:8181/v2/plugins -u snap]"}, rw)
}
} else {
next(rw, r)
}
} | go | {
"resource": ""
} |
q15401 | setAllowedOrigins | train | func (s *Server) setAllowedOrigins(rw http.ResponseWriter, ro string) {
if len(s.allowedOrigins) > 0 {
if _, ok := s.allowedOrigins[ro]; ok {
// localhost CORS is not supported by all browsers. It has to use "*".
if strings.Contains(ro, "127.0.0.1") || strings.Contains(ro, "localhost") {
ro = "*"
}
rw.Header().Set("Access-Control-Allow-Origin", ro)
rw.Header().Set("Access-Control-Allow-Methods", allowedMethods)
rw.Header().Set("Access-Control-Allow-Headers", allowedHeaders)
rw.Header().Set("Access-Control-Max-Age", strconv.Itoa(maxAge))
}
}
} | go | {
"resource": ""
} |
q15402 | NewCronSchedule | train | func NewCronSchedule(entry string) *CronSchedule {
schedule := cron.New()
return &CronSchedule{
entry: entry,
schedule: schedule,
enabled: false,
}
} | go | {
"resource": ""
} |
q15403 | Validate | train | func (c *CronSchedule) Validate() error {
if c.entry == "" {
return ErrMissingCronEntry
}
_, err := cron.Parse(c.entry)
if err != nil {
return err
}
return nil
} | go | {
"resource": ""
} |
q15404 | Wait | train | func (c *CronSchedule) Wait(last time.Time) Response {
var err error
now := time.Now()
// first run
if (last == time.Time{}) {
last = now
}
// schedule not enabled, either due to first run or invalid cron entry
if !c.enabled {
err = c.schedule.AddFunc(c.entry, func() {})
if err != nil {
c.state = Error
} else {
c.enabled = true
}
}
var misses uint
if c.enabled {
s := c.schedule.Entries()[0].Schedule
// calculate misses
for next := last; next.Before(now); {
next = s.Next(next)
if next.After(now) {
break
}
misses++
}
// wait
waitTime := s.Next(now)
time.Sleep(waitTime.Sub(now))
}
return &CronScheduleResponse{
state: c.GetState(),
err: err,
missed: misses,
lastTime: time.Now(),
}
} | go | {
"resource": ""
} |
q15405 | Add | train | func (c *ConfigDataTree) Add(ns []string, cdn *ConfigDataNode) {
c.cTree.Add(ns, cdn)
} | go | {
"resource": ""
} |
q15406 | Get | train | func (c *ConfigDataTree) Get(ns []string) *ConfigDataNode {
n := c.cTree.Get(ns)
if n == nil {
return nil
}
switch t := n.(type) {
case ConfigDataNode:
return &t
default:
return t.(*ConfigDataNode)
}
} | go | {
"resource": ""
} |
q15407 | CacheTTL | train | func CacheTTL(t time.Duration) metaOp {
return func(m *PluginMeta) {
m.CacheTTL = t
}
} | go | {
"resource": ""
} |
q15408 | NewPluginMeta | train | func NewPluginMeta(name string, version int, pluginType PluginType, acceptContentTypes, returnContentTypes []string, opts ...metaOp) *PluginMeta {
// An empty accepted content type default to "snap.*"
if len(acceptContentTypes) == 0 {
acceptContentTypes = append(acceptContentTypes, "snap.*")
}
// Validate content type formats
for _, s := range acceptContentTypes {
b, e := regexp.MatchString(`^[a-z0-9*]+\.[a-z0-9*]+$`, s)
if e != nil {
panic(e)
}
if !b {
panic(fmt.Sprintf("Bad accept content type [%s] for [%d] [%s]", name, version, s))
}
}
for _, s := range returnContentTypes {
b, e := regexp.MatchString(`^[a-z0-9*]+\.[a-z0-9*]+$`, s)
if e != nil {
panic(e)
}
if !b {
panic(fmt.Sprintf("Bad return content type [%s] for [%d] [%s]", name, version, s))
}
}
p := &PluginMeta{
Name: name,
Version: version,
Type: pluginType,
AcceptedContentTypes: acceptContentTypes,
ReturnedContentTypes: returnContentTypes,
//set the default for concurrency count to 1
ConcurrencyCount: 1,
}
for _, opt := range opts {
opt(p)
}
return p
} | go | {
"resource": ""
} |
q15409 | Select | train | func (cb *configBased) Select(aps []AvailablePlugin, id string) (AvailablePlugin, error) {
if ap, ok := cb.plugins[id]; ok && ap != nil {
return ap, nil
}
// add first one in case it's new id
for _, ap := range aps {
available := true
for _, busyPlugin := range cb.plugins {
if ap == busyPlugin {
available = false
}
}
if available {
cb.plugins[id] = ap
return ap, nil
}
}
cb.logger.WithFields(log.Fields{
"_block": "findAvailablePlugin",
"strategy": cb.String(),
"error": fmt.Sprintf("%v of %v plugins are available", len(aps)-len(cb.plugins), len(aps)),
}).Error(ErrCouldNotSelect)
return nil, ErrCouldNotSelect
} | go | {
"resource": ""
} |
q15410 | GobEncode | train | func (c *ConfigDataNode) GobEncode() ([]byte, error) {
w := new(bytes.Buffer)
encoder := gob.NewEncoder(w)
if err := encoder.Encode(&c.table); err != nil {
return nil, err
}
return w.Bytes(), nil
} | go | {
"resource": ""
} |
q15411 | GobDecode | train | func (c *ConfigDataNode) GobDecode(buf []byte) error {
r := bytes.NewBuffer(buf)
c.mutex = new(sync.Mutex)
decoder := gob.NewDecoder(r)
return decoder.Decode(&c.table)
} | go | {
"resource": ""
} |
q15412 | UnmarshalJSON | train | func (c *ConfigDataNode) UnmarshalJSON(data []byte) error {
t := map[string]interface{}{}
c.table = map[string]ctypes.ConfigValue{}
dec := json.NewDecoder(bytes.NewReader(data))
dec.UseNumber()
if err := dec.Decode(&t); err != nil {
return err
}
for k, i := range t {
switch t := i.(type) {
case string:
c.table[k] = ctypes.ConfigValueStr{Value: t}
case bool:
c.table[k] = ctypes.ConfigValueBool{Value: t}
case json.Number:
if v, err := t.Int64(); err == nil {
c.table[k] = ctypes.ConfigValueInt{Value: int(v)}
continue
}
if v, err := t.Float64(); err == nil {
c.table[k] = ctypes.ConfigValueFloat{Value: v}
continue
}
default:
return fmt.Errorf("Error Unmarshalling JSON ConfigDataNode. Key: %v Type: %v is unsupported.", k, t)
}
}
c.mutex = new(sync.Mutex)
return nil
} | go | {
"resource": ""
} |
q15413 | NewNode | train | func NewNode() *ConfigDataNode {
return &ConfigDataNode{
mutex: new(sync.Mutex),
table: make(map[string]ctypes.ConfigValue),
}
} | go | {
"resource": ""
} |
q15414 | AddItem | train | func (c *ConfigDataNode) AddItem(k string, v ctypes.ConfigValue) {
// And empty is a noop
if k == "" {
return
}
c.mutex.Lock()
defer c.mutex.Unlock()
c.table[k] = v
} | go | {
"resource": ""
} |
q15415 | ReverseMergeInPlace | train | func (c *ConfigDataNode) ReverseMergeInPlace(n ctree.Node) ctree.Node {
cd := n.(*ConfigDataNode)
new_table := make(map[string]ctypes.ConfigValue)
// Lock here since we are modifying c.table
c.mutex.Lock()
defer c.mutex.Unlock()
t := cd.Table()
t2 := c.table
for k, v := range t {
new_table[k] = v
}
for k, v := range t2 {
new_table[k] = v
}
c.table = new_table
return c
} | go | {
"resource": ""
} |
q15416 | ReverseMerge | train | func (c *ConfigDataNode) ReverseMerge(n ctree.Node) *ConfigDataNode {
cd := n.(*ConfigDataNode)
copy := NewNode()
t2 := c.table
for k, v := range cd.Table() {
copy.table[k] = v
}
for k, v := range t2 {
copy.table[k] = v
}
return copy
} | go | {
"resource": ""
} |
q15417 | ApplyDefaults | train | func (c *ConfigDataNode) ApplyDefaults(defaults map[string]ctypes.ConfigValue) {
// Lock here since we are modifying c.table
c.mutex.Lock()
defer c.mutex.Unlock()
for name, def := range defaults {
if _, ok := c.table[name]; !ok {
c.table[name] = def
}
}
} | go | {
"resource": ""
} |
q15418 | DeleteItem | train | func (c ConfigDataNode) DeleteItem(k string) {
c.mutex.Lock()
defer c.mutex.Unlock()
delete(c.table, k)
} | go | {
"resource": ""
} |
q15419 | broadcast | train | func (t *tribe) broadcast(mt msgType, msg interface{}, notify chan<- struct{}) error {
raw, err := encodeMessage(mt, msg)
if err != nil {
return err
}
t.broadcasts.QueueBroadcast(&broadcast{
msg: raw,
notify: notify,
})
return nil
} | go | {
"resource": ""
} |
q15420 | GetDefaultConfig | train | func GetDefaultConfig() *Config {
return &Config{
Enable: defaultEnable,
Port: defaultPort,
Address: defaultAddress,
HTTPS: defaultHTTPS,
RestCertificate: defaultRestCertificate,
RestKey: defaultRestKey,
RestAuth: defaultAuth,
RestAuthPassword: defaultAuthPassword,
portSetByConfig: defaultPortSetByConfig,
Pprof: defaultPprof,
Corsd: defaultCorsd,
}
} | go | {
"resource": ""
} |
q15421 | TaskDeadlineDuration | train | func TaskDeadlineDuration(v time.Duration) TaskOption {
return func(t Task) TaskOption {
previous := t.DeadlineDuration()
t.SetDeadlineDuration(v)
log.WithFields(log.Fields{
"_module": "core",
"_block": "TaskDeadlineDuration",
"task-id": t.ID(),
"task-name": t.GetName(),
"task deadline duration": t.DeadlineDuration(),
}).Debug("Setting deadlineDuration on task")
return TaskDeadlineDuration(previous)
}
} | go | {
"resource": ""
} |
q15422 | OptionStopOnFailure | train | func OptionStopOnFailure(v int) TaskOption {
return func(t Task) TaskOption {
previous := t.GetStopOnFailure()
t.SetStopOnFailure(v)
log.WithFields(log.Fields{
"_module": "core",
"_block": "OptionStopOnFailure",
"task-id": t.ID(),
"task-name": t.GetName(),
"consecutive failure limit": t.GetStopOnFailure(),
}).Debug("Setting stop-on-failure limit for task")
return OptionStopOnFailure(previous)
}
} | go | {
"resource": ""
} |
q15423 | CacheExpiration | train | func CacheExpiration(t time.Duration) PluginControlOpt {
return func(c *pluginControl) {
strategy.GlobalCacheExpiration = t
}
} | go | {
"resource": ""
} |
q15424 | OptSetConfig | train | func OptSetConfig(cfg *Config) PluginControlOpt {
return func(c *pluginControl) {
c.Config = cfg
c.pluginManager.SetPluginConfig(cfg.Plugins)
c.pluginManager.SetPluginLoadTimeout(c.Config.PluginLoadTimeout)
c.pluginRunner.SetPluginLoadTimeout(c.Config.PluginLoadTimeout)
}
} | go | {
"resource": ""
} |
q15425 | OptSetTags | train | func OptSetTags(tags map[string]map[string]string) PluginControlOpt {
return func(c *pluginControl) {
c.pluginManager.SetPluginTags(tags)
}
} | go | {
"resource": ""
} |
q15426 | New | train | func New(cfg *Config) *pluginControl {
// construct a slice of options from the input configuration
opts := []PluginControlOpt{
MaxRunningPlugins(cfg.MaxRunningPlugins),
CacheExpiration(cfg.CacheExpiration.Duration),
OptSetConfig(cfg),
OptSetTags(cfg.Tags),
MaxPluginRestarts(cfg),
}
c := &pluginControl{}
c.Config = cfg
// Initialize components
// Event Manager
c.eventManager = gomit.NewEventController()
controlLogger.WithFields(log.Fields{
"_block": "new",
}).Debug("pevent controller created")
// Metric Catalog
c.metricCatalog = newMetricCatalog()
controlLogger.WithFields(log.Fields{
"_block": "new",
}).Debug("metric catalog created")
managerOpts := []pluginManagerOpt{
OptSetPprof(cfg.Pprof),
OptSetTempDirPath(cfg.TempDirPath),
}
runnerOpts := []pluginRunnerOpt{}
if cfg.IsTLSEnabled() {
if cfg.CACertPaths != "" {
certPaths := filepath.SplitList(cfg.CACertPaths)
c.grpcSecurity = client.SecurityTLSExtended(cfg.TLSCertPath, cfg.TLSKeyPath, client.SecureClient, certPaths)
} else {
c.grpcSecurity = client.SecurityTLSEnabled(cfg.TLSCertPath, cfg.TLSKeyPath, client.SecureClient)
}
managerOpts = append(managerOpts, OptEnableManagerTLS(c.grpcSecurity))
runnerOpts = append(runnerOpts, OptEnableRunnerTLS(c.grpcSecurity))
}
// Plugin Manager
c.pluginManager = newPluginManager(managerOpts...)
controlLogger.WithFields(log.Fields{
"_block": "new",
}).Debug("plugin manager created")
// Plugin Manager needs a reference to the metric catalog
c.pluginManager.SetMetricCatalog(c.metricCatalog)
// Signing Manager
c.signingManager = &psigning.SigningManager{}
controlLogger.WithFields(log.Fields{
"_block": "new",
}).Debug("signing manager created")
// Plugin Runner
c.pluginRunner = newRunner(runnerOpts...)
controlLogger.WithFields(log.Fields{
"_block": "new",
}).Debug("runner created")
c.pluginRunner.AddDelegates(c.eventManager)
c.pluginRunner.SetEmitter(c.eventManager)
c.pluginRunner.SetMetricCatalog(c.metricCatalog)
c.pluginRunner.SetPluginManager(c.pluginManager)
// Pass runner events to control main module
c.eventManager.RegisterHandler(c.Name(), c)
// Create subscription group - used for managing a group of subscriptions
c.subscriptionGroups = newSubscriptionGroups(c)
// Start stuff
err := c.pluginRunner.Start()
if err != nil {
panic(err)
}
// apply options
// it is important that this happens last, as an option may
// require that an internal member of c be constructed.
for _, opt := range opts {
opt(c)
}
return c
} | go | {
"resource": ""
} |
q15427 | Load | train | func (p *pluginControl) Load(rp *core.RequestedPlugin) (core.CatalogedPlugin, serror.SnapError) {
f := map[string]interface{}{
"_block": "load",
}
details, serr := p.returnPluginDetails(rp)
if serr != nil {
return nil, serr
}
if details.IsPackage {
defer os.RemoveAll(filepath.Dir(details.ExecPath))
}
controlLogger.WithFields(f).Info("plugin load called")
if !p.Started {
se := serror.New(ErrControllerNotStarted)
se.SetFields(f)
controlLogger.WithFields(f).Error(se)
return nil, se
}
pl, se := p.pluginManager.LoadPlugin(details, p.eventManager)
if se != nil {
return nil, se
}
// If plugin was loaded from a package, remove ExecPath for
// the temporary plugin that was used for load
if pl.Details.IsPackage {
pl.Details.ExecPath = ""
}
// defer sending event
event := &control_event.LoadPluginEvent{
Name: pl.Meta.Name,
Version: pl.Meta.Version,
Type: int(pl.Meta.Type),
Signed: pl.Details.Signed,
}
defer p.eventManager.Emit(event)
return pl, nil
} | go | {
"resource": ""
} |
q15428 | SubscribeDeps | train | func (p *pluginControl) SubscribeDeps(id string, requested []core.RequestedMetric, plugins []core.SubscribedPlugin, configTree *cdata.ConfigDataTree) (serrs []serror.SnapError) {
return p.subscriptionGroups.Add(id, requested, configTree, plugins)
} | go | {
"resource": ""
} |
q15429 | UnsubscribeDeps | train | func (p *pluginControl) UnsubscribeDeps(id string) []serror.SnapError {
// update view and unsubscribe to plugins
return p.subscriptionGroups.Remove(id)
} | go | {
"resource": ""
} |
q15430 | SetMonitorOptions | train | func (p *pluginControl) SetMonitorOptions(options ...monitorOption) {
p.pluginRunner.Monitor().Option(options...)
} | go | {
"resource": ""
} |
q15431 | CollectMetrics | train | func (p *pluginControl) CollectMetrics(id string, allTags map[string]map[string]string) (metrics []core.Metric, errs []error) {
// If control is not started we don't want tasks to be able to
// go through a workflow.
if !p.Started {
return nil, []error{ErrControllerNotStarted}
}
// Subscription groups are processed anytime a plugin is loaded/unloaded.
pluginToMetricMap, serrs, err := p.subscriptionGroups.Get(id)
if err != nil {
controlLogger.WithFields(log.Fields{
"_block": "CollectorMetrics",
"subscription-group-id": id,
}).Error(err)
errs = append(errs, err)
return
}
// If We received errors when the requested metrics were last processed
// against the metric catalog we need to return them to the caller.
if serrs != nil {
for _, e := range serrs {
errs = append(errs, e)
}
}
for ns, nsTags := range allTags {
for k, v := range nsTags {
log.WithFields(log.Fields{
"_module": "control",
"block": "CollectMetrics",
"type": "pluginCollector",
"ns": ns,
"tag-key": k,
"tag-val": v,
}).Debug("Tags in CollectMetrics")
}
}
cMetrics := make(chan []core.Metric)
cError := make(chan error)
var wg sync.WaitGroup
// For each available plugin call available plugin using RPC client and wait for response (goroutines)
for pluginKey, pmt := range pluginToMetricMap {
// merge global plugin config into the config for the metric
for _, mt := range pmt.metricTypes {
if mt.Config() != nil {
mt.Config().ReverseMergeInPlace(p.Config.Plugins.getPluginConfigDataNode(core.CollectorPluginType, pmt.plugin.Name(), pmt.plugin.Version()))
}
}
wg.Add(1)
go func(pluginKey string, mt []core.Metric) {
mts, err := p.pluginRunner.AvailablePlugins().collectMetrics(pluginKey, mt, id)
if err != nil {
cError <- err
} else {
cMetrics <- mts
}
}(pluginKey, pmt.metricTypes)
}
go func() {
for m := range cMetrics {
// Reapply standard tags after collection as a precaution. It is common for
// plugin authors to inadvertently overwrite or not pass along the data
// passed to CollectMetrics so we will help them out here.
for i := range m {
m[i] = p.pluginManager.AddStandardAndWorkflowTags(m[i], allTags)
}
metrics = append(metrics, m...)
wg.Done()
}
}()
go func() {
for e := range cError {
errs = append(errs, e)
wg.Done()
}
}()
wg.Wait()
close(cMetrics)
close(cError)
if len(errs) > 0 {
return nil, errs
}
return
} | go | {
"resource": ""
} |
q15432 | New | train | func New(cfg *Config) *scheduler {
schedulerLogger.WithFields(log.Fields{
"_block": "New",
"value": cfg.WorkManagerQueueSize,
}).Info("Setting work manager queue size")
schedulerLogger.WithFields(log.Fields{
"_block": "New",
"value": cfg.WorkManagerPoolSize,
}).Info("Setting work manager pool size")
opts := []workManagerOption{
CollectQSizeOption(cfg.WorkManagerQueueSize),
CollectWkrSizeOption(cfg.WorkManagerPoolSize),
PublishQSizeOption(cfg.WorkManagerQueueSize),
PublishWkrSizeOption(cfg.WorkManagerPoolSize),
ProcessQSizeOption(cfg.WorkManagerQueueSize),
ProcessWkrSizeOption(cfg.WorkManagerPoolSize),
}
s := &scheduler{
tasks: newTaskCollection(),
eventManager: gomit.NewEventController(),
taskWatcherColl: newTaskWatcherCollection(),
}
// we are setting the size of the queue and number of workers for
// collect, process and publish consistently for now
s.workManager = newWorkManager(opts...)
s.workManager.Start()
s.eventManager.RegisterHandler(HandlerRegistrationName, s)
return s
} | go | {
"resource": ""
} |
q15433 | CreateTask | train | func (s *scheduler) CreateTask(sch schedule.Schedule, wfMap *wmap.WorkflowMap, startOnCreate bool, opts ...core.TaskOption) (core.Task, core.TaskErrors) {
return s.createTask(sch, wfMap, startOnCreate, "user", opts...)
} | go | {
"resource": ""
} |
q15434 | GetTasks | train | func (s *scheduler) GetTasks() map[string]core.Task {
tasks := make(map[string]core.Task)
for id, t := range s.tasks.Table() {
tasks[id] = t
}
return tasks
} | go | {
"resource": ""
} |
q15435 | GetTask | train | func (s *scheduler) GetTask(id string) (core.Task, error) {
t, err := s.getTask(id)
if err != nil {
schedulerLogger.WithFields(log.Fields{
"_block": "get-task",
"_error": ErrTaskNotFound,
"task-id": id,
}).Error("error getting task")
return nil, err // We do this to send back an explicit nil on the interface
}
return t, nil
} | go | {
"resource": ""
} |
q15436 | StartTask | train | func (s *scheduler) StartTask(id string) []serror.SnapError {
return s.startTask(id, "user")
} | go | {
"resource": ""
} |
q15437 | StopTask | train | func (s *scheduler) StopTask(id string) []serror.SnapError {
return s.stopTask(id, "user")
} | go | {
"resource": ""
} |
q15438 | EnableTask | train | func (s *scheduler) EnableTask(id string) (core.Task, error) {
t, e := s.getTask(id)
if e != nil {
schedulerLogger.WithFields(log.Fields{
"_block": "enable-task",
"_error": ErrTaskNotFound,
"task-id": id,
}).Error("error enabling task")
return nil, e
}
err := t.Enable()
if err != nil {
schedulerLogger.WithFields(log.Fields{
"_block": "enable-task",
"_error": err.Error(),
"task-id": id,
}).Error("error enabling task")
return nil, err
}
schedulerLogger.WithFields(log.Fields{
"_block": "enable-task",
"task-id": t.ID(),
"task-state": t.State(),
}).Info("task enabled")
return t, nil
} | go | {
"resource": ""
} |
q15439 | SetMetricManager | train | func (s *scheduler) SetMetricManager(mm managesMetrics) {
s.metricManager = mm
schedulerLogger.WithFields(log.Fields{
"_block": "set-metric-manager",
}).Debug("metric manager linked")
} | go | {
"resource": ""
} |
q15440 | HandleGomitEvent | train | func (s *scheduler) HandleGomitEvent(e gomit.Event) {
switch v := e.Body.(type) {
case *scheduler_event.MetricCollectedEvent:
log.WithFields(log.Fields{
"_module": "scheduler-events",
"_block": "handle-events",
"event-namespace": e.Namespace(),
"task-id": v.TaskID,
"metric-count": len(v.Metrics),
}).Debug("event received")
s.taskWatcherColl.handleMetricCollected(v.TaskID, v.Metrics)
case *scheduler_event.MetricCollectionFailedEvent:
log.WithFields(log.Fields{
"_module": "scheduler-events",
"_block": "handle-events",
"event-namespace": e.Namespace(),
"task-id": v.TaskID,
"errors-count": v.Errors,
}).Debug("event received")
case *scheduler_event.TaskStartedEvent:
log.WithFields(log.Fields{
"_module": "scheduler-events",
"_block": "handle-events",
"event-namespace": e.Namespace(),
"task-id": v.TaskID,
}).Debug("event received")
s.taskWatcherColl.handleTaskStarted(v.TaskID)
case *scheduler_event.TaskStoppedEvent:
log.WithFields(log.Fields{
"_module": "scheduler-events",
"_block": "handle-events",
"event-namespace": e.Namespace(),
"task-id": v.TaskID,
}).Debug("event received")
// We need to unsubscribe from deps when a task has stopped
task, _ := s.getTask(v.TaskID)
task.UnsubscribePlugins()
s.taskWatcherColl.handleTaskStopped(v.TaskID)
case *scheduler_event.TaskEndedEvent:
log.WithFields(log.Fields{
"_module": "scheduler-events",
"_block": "handle-events",
"event-namespace": e.Namespace(),
"task-id": v.TaskID,
}).Debug("event received")
// We need to unsubscribe from deps when a task has ended
task, _ := s.getTask(v.TaskID)
task.UnsubscribePlugins()
s.taskWatcherColl.handleTaskEnded(v.TaskID)
case *scheduler_event.TaskDisabledEvent:
log.WithFields(log.Fields{
"_module": "scheduler-events",
"_block": "handle-events",
"event-namespace": e.Namespace(),
"task-id": v.TaskID,
"disabled-reason": v.Why,
}).Debug("event received")
// We need to unsubscribe from deps when a task goes disabled
task, _ := s.getTask(v.TaskID)
task.UnsubscribePlugins()
s.taskWatcherColl.handleTaskDisabled(v.TaskID, v.Why)
case *scheduler_event.PluginsUnsubscribedEvent:
log.WithFields(log.Fields{
"_module": "scheduler-events",
"_block": "handle-events",
"event-namespace": e.Namespace(),
"task-id": v.TaskID,
}).Debug("event received")
default:
log.WithFields(log.Fields{
"_module": "scheduler-events",
"_block": "handle-events",
"event-namespace": e.Namespace(),
}).Debug("event received")
}
} | go | {
"resource": ""
} |
q15441 | enableTask | train | func (s *apiV1) enableTask(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
id := p.ByName("id")
tsk, err := s.taskManager.EnableTask(id)
if err != nil {
if strings.Contains(err.Error(), ErrTaskNotFound.Error()) {
rbody.Write(404, rbody.FromError(err), w)
return
}
rbody.Write(500, rbody.FromError(err), w)
return
}
task := &rbody.ScheduledTaskEnabled{}
task.AddScheduledTask = *rbody.AddSchedulerTaskFromTask(tsk)
rbody.Write(200, task, w)
} | go | {
"resource": ""
} |
q15442 | Manifest | train | func Manifest(f io.ReadSeeker) (*schema.ImageManifest, error) {
m, err := specaci.ManifestFromImage(f)
if err != nil {
return nil, err
}
return m, nil
} | go | {
"resource": ""
} |
q15443 | Extract | train | func Extract(f io.ReadSeeker) (string, error) {
fileMode := os.FileMode(0755)
tr, err := specaci.NewCompressedTarReader(f)
if err != nil {
return "", err
}
defer tr.Close()
// Extract archive to temporary directory
dir, err := ioutil.TempDir("", "")
if err != nil {
return "", err
}
aciLogger.WithField("directory", dir).Debugf(
"Extracting archive to temporary directory")
for {
hdr, err := tr.Reader.Next()
if err == io.EOF {
break
}
if err != nil {
return "", fmt.Errorf("%v\n%v", ErrNext, err)
}
file := filepath.Join(dir, hdr.Name)
switch hdr.Typeflag {
case tar.TypeReg:
w, err := os.Create(file)
if err != nil {
return "", fmt.Errorf("%v: %v\n%v", ErrCreatingFile, file, err)
}
_, err = io.Copy(w, tr)
if err != nil {
w.Close()
return "", fmt.Errorf("%v: %v\n%v", ErrCopyingFile, file, err)
}
w.Close()
err = os.Chmod(file, fileMode)
if err != nil {
return "", fmt.Errorf("%v: %v\n%v", ErrChmod, file, err)
}
case tar.TypeDir:
err = os.MkdirAll(file, fileMode)
if err != nil {
return "", fmt.Errorf("%v: %v\n%v", ErrMkdirAll, file, err)
}
case tar.TypeSymlink:
err := os.Symlink(
filepath.Join(dir, filepath.Dir(hdr.Name), hdr.Linkname),
filepath.Join(dir, hdr.Name))
if err != nil {
return "", fmt.Errorf("%v: name: %v Linkname: %v \n%v",
ErrCreatingSymLink, hdr.Name, hdr.Linkname, err)
}
default:
return "", fmt.Errorf("%v (type: %d): %v", ErrUntar, hdr.Typeflag, hdr.Name)
}
}
return dir, nil
} | go | {
"resource": ""
} |
q15444 | Validate | train | func Validate(f io.ReadSeeker) error {
tr, err := specaci.NewCompressedTarReader(f)
defer tr.Close()
if err != nil {
return err
}
if err := specaci.ValidateArchive(tr.Reader); err != nil {
return err
}
return nil
} | go | {
"resource": ""
} |
q15445 | Remove | train | func (s subscriptionGroups) Remove(id string) []serror.SnapError {
s.Lock()
defer s.Unlock()
return s.remove(id)
} | go | {
"resource": ""
} |
q15446 | validatePluginUnloading | train | func (s *subscriptionGroups) validatePluginUnloading(pluginToUnload *loadedPlugin) (errs []serror.SnapError) {
s.Lock()
defer s.Unlock()
for id, group := range s.subscriptionMap {
if err := group.validatePluginUnloading(id, pluginToUnload); err != nil {
errs = append(errs, err)
}
}
return errs
} | go | {
"resource": ""
} |
q15447 | pluginIsSubscribed | train | func (s *subscriptionGroup) pluginIsSubscribed(plugin *loadedPlugin) bool {
// range over subscribed plugins to find if the plugin is there
for _, sp := range s.plugins {
if sp.TypeName() == plugin.TypeName() && sp.Name() == plugin.Name() && sp.Version() == plugin.Version() {
return true
}
}
return false
} | go | {
"resource": ""
} |
q15448 | validatePluginUnloading | train | func (s *subscriptionGroup) validatePluginUnloading(id string, plgToUnload *loadedPlugin) (serr serror.SnapError) {
impacted := false
if !s.pluginIsSubscribed(plgToUnload) {
// the plugin is not subscribed, so the task is not impacted by its unloading
return nil
}
controlLogger.WithFields(log.Fields{
"_block": "subscriptionGroup.validatePluginUnloading",
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
}).Debug("validating impact of unloading the plugin")
for _, requestedMetric := range s.requestedMetrics {
// get all plugins exposing the requested metric
plgs, _ := s.GetPlugins(requestedMetric.Namespace())
// when requested version is fixed (greater than 0), take into account only plugins in the requested version
if requestedMetric.Version() > 0 {
// skip those which are not impacted by unloading (version different than plgToUnload.Version())
if requestedMetric.Version() == plgToUnload.Version() {
plgsInVer := []core.CatalogedPlugin{}
for _, plg := range plgs {
if plg.Version() == requestedMetric.Version() {
plgsInVer = append(plgsInVer, plg)
}
}
// set plugins only in the requested version
plgs = plgsInVer
}
}
if len(plgs) == 1 && plgs[0].Key() == plgToUnload.Key() {
// the requested metric is exposed only by the single plugin and there is no replacement
impacted = true
controlLogger.WithFields(log.Fields{
"_block": "subscriptionGroup.validatePluginUnloading",
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
"requested-metric": fmt.Sprintf("%s:%d", requestedMetric.Namespace(), requestedMetric.Version()),
}).Errorf("unloading the plugin would cause missing in collection the requested metric")
}
}
if impacted {
serr = serror.New(ErrPluginCannotBeUnloaded, map[string]interface{}{
"task-id": id,
"plugin-to-unload": plgToUnload.Key(),
})
}
return serr
} | go | {
"resource": ""
} |
q15449 | comparePlugins | train | func comparePlugins(newPlugins,
oldPlugins []core.SubscribedPlugin) (adds,
removes []core.SubscribedPlugin) {
newMap := make(map[string]int)
oldMap := make(map[string]int)
for _, n := range newPlugins {
newMap[key(n)]++
}
for _, o := range oldPlugins {
oldMap[key(o)]++
}
for _, n := range newPlugins {
if oldMap[key(n)] > 0 {
oldMap[key(n)]--
continue
}
adds = append(adds, n)
}
for _, o := range oldPlugins {
if newMap[key(o)] > 0 {
newMap[key(o)]--
continue
}
removes = append(removes, o)
}
return
} | go | {
"resource": ""
} |
q15450 | LoadPlugin | train | func (c *Client) LoadPlugin(p []string) *LoadPluginResult {
r := new(LoadPluginResult)
resp, err := c.pluginUploadRequest(p)
if err != nil {
r.Err = serror.New(err)
return r
}
switch resp.Meta.Type {
case rbody.PluginsLoadedType:
pl := resp.Body.(*rbody.PluginsLoaded)
r.LoadedPlugins = convertLoadedPlugins(pl.LoadedPlugins)
case rbody.ErrorType:
f := resp.Body.(*rbody.Error).Fields
fields := make(map[string]interface{})
for k, v := range f {
fields[k] = v
}
r.Err = serror.New(resp.Body.(*rbody.Error), fields)
default:
r.Err = serror.New(ErrAPIResponseMetaType)
}
return r
} | go | {
"resource": ""
} |
q15451 | UnloadPlugin | train | func (c *Client) UnloadPlugin(pluginType, name string, version int) *UnloadPluginResult {
r := &UnloadPluginResult{}
resp, err := c.do("DELETE", fmt.Sprintf("/plugins/%s/%s/%d", pluginType, url.QueryEscape(name), version), ContentTypeJSON)
if err != nil {
r.Err = err
return r
}
switch resp.Meta.Type {
case rbody.PluginUnloadedType:
// Success
up := resp.Body.(*rbody.PluginUnloaded)
r = &UnloadPluginResult{up, nil}
case rbody.ErrorType:
r.Err = resp.Body.(*rbody.Error)
default:
r.Err = ErrAPIResponseMetaType
}
return r
} | go | {
"resource": ""
} |
q15452 | GetPlugins | train | func (c *Client) GetPlugins(details bool) *GetPluginsResult {
r := &GetPluginsResult{}
var path string
if details {
path = "/plugins?details"
} else {
path = "/plugins"
}
resp, err := c.do("GET", path, ContentTypeJSON)
if err != nil {
r.Err = err
return r
}
switch resp.Meta.Type {
// TODO change this to concrete const type when Joel adds it
case rbody.PluginListType:
// Success
b := resp.Body.(*rbody.PluginList)
r.LoadedPlugins = convertLoadedPlugins(b.LoadedPlugins)
r.AvailablePlugins = convertAvailablePlugins(b.AvailablePlugins)
return r
case rbody.ErrorType:
r.Err = resp.Body.(*rbody.Error)
default:
r.Err = ErrAPIResponseMetaType
}
return r
} | go | {
"resource": ""
} |
q15453 | GetPlugin | train | func (c *Client) GetPlugin(typ, name string, ver int) *GetPluginResult {
r := &GetPluginResult{}
path := "/plugins/" + typ + "/" + name + "/" + strconv.Itoa(ver)
resp, err := c.do("GET", path, ContentTypeJSON)
if err != nil {
r.Err = err
return r
}
switch resp.Meta.Type {
// TODO change this to concrete const type when Joel adds it
case rbody.PluginReturnedType:
// Success
b := resp.Body.(*rbody.PluginReturned)
r.ReturnedPlugin = ReturnedPlugin{b}
return r
case rbody.ErrorType:
r.Err = resp.Body.(*rbody.Error)
default:
r.Err = ErrAPIResponseMetaType
}
return r
} | go | {
"resource": ""
} |
q15454 | Close | train | func (t *TaskWatcher) Close() error {
for _, x := range t.taskIDs {
t.parent.rm(x, t)
}
return nil
} | go | {
"resource": ""
} |
q15455 | Insert | train | func (p *pool) Insert(a AvailablePlugin) error {
if a.Type() != plugin.CollectorPluginType && a.Type() != plugin.ProcessorPluginType && a.Type() != plugin.PublisherPluginType && a.Type() != plugin.StreamCollectorPluginType {
return ErrBadType
}
// If an empty pool is created, it does not have
// any available plugins from which to retrieve
// concurrency count or exclusivity. We ensure it
// is set correctly on an insert.
if len(p.plugins) == 0 {
if err := p.applyPluginMeta(a); err != nil {
return err
}
}
a.SetID(p.generatePID())
p.plugins[a.ID()] = a
return nil
} | go | {
"resource": ""
} |
q15456 | applyPluginMeta | train | func (p *pool) applyPluginMeta(a AvailablePlugin) error {
// Checking if plugin is exclusive
// (only one instance should be running).
if a.Exclusive() {
p.max = 1
}
// Set the cache TTL
cacheTTL := GlobalCacheExpiration
// if the plugin exposes a default TTL that is greater the the global default use it
if a.CacheTTL() != 0 && a.CacheTTL() > GlobalCacheExpiration {
cacheTTL = a.CacheTTL()
}
// Set the concurrency count
p.concurrencyCount = a.ConcurrencyCount()
// Set the routing and caching strategy
switch a.RoutingStrategy() {
case plugin.DefaultRouting:
p.RoutingAndCaching = NewLRU(cacheTTL)
case plugin.StickyRouting:
p.RoutingAndCaching = NewSticky(cacheTTL)
p.concurrencyCount = 1
case plugin.ConfigRouting:
p.RoutingAndCaching = NewConfigBased(cacheTTL)
default:
return ErrBadStrategy
}
return nil
} | go | {
"resource": ""
} |
q15457 | Subscribe | train | func (p *pool) Subscribe(taskID string) {
p.Lock()
defer p.Unlock()
if _, exists := p.subs[taskID]; !exists {
// Version is the last item in the key, so we split here
// to retrieve it for the subscription.
p.subs[taskID] = &subscription{
TaskID: taskID,
Version: p.version,
}
}
} | go | {
"resource": ""
} |
q15458 | Unsubscribe | train | func (p *pool) Unsubscribe(taskID string) {
p.Lock()
defer p.Unlock()
delete(p.subs, taskID)
} | go | {
"resource": ""
} |
q15459 | Eligible | train | func (p *pool) Eligible() bool {
p.RLock()
defer p.RUnlock()
// optimization: don't even bother with concurrency
// count if we have already reached pool max
if len(p.plugins) >= p.max {
return false
}
// Check if pool is eligible and number of plugins is less than maximum allowed
if len(p.subs) > p.concurrencyCount*len(p.plugins) {
return true
}
return false
} | go | {
"resource": ""
} |
q15460 | Kill | train | func (p *pool) Kill(id uint32, reason string) {
p.Lock()
defer p.Unlock()
ap, ok := p.plugins[id]
if ok {
ap.Kill(reason)
delete(p.plugins, id)
}
} | go | {
"resource": ""
} |
q15461 | KillAll | train | func (p *pool) KillAll(reason string) {
for id, rp := range p.plugins {
log.WithFields(log.Fields{
"_block": "KillAll",
"reason": reason,
}).Debug(fmt.Sprintf("handling 'KillAll' for pool '%v', killing plugin '%v:%v'", p.String(), rp.Name(), rp.Version()))
if err := rp.Stop(reason); err != nil {
log.WithFields(log.Fields{
"_block": "KillAll",
"reason": reason,
}).Error(err)
}
p.Kill(id, reason)
}
} | go | {
"resource": ""
} |
q15462 | SelectAndKill | train | func (p *pool) SelectAndKill(id, reason string) {
rp, err := p.Remove(p.plugins.Values(), id)
if err != nil {
log.WithFields(log.Fields{
"_block": "SelectAndKill",
"taskID": id,
"reason": reason,
}).Error(err)
return
}
if err := rp.Stop(reason); err != nil {
log.WithFields(log.Fields{
"_block": "SelectAndKill",
"taskID": id,
"reason": reason,
}).Error(err)
}
if err := rp.Kill(reason); err != nil {
log.WithFields(log.Fields{
"_block": "SelectAndKill",
"taskID": id,
"reason": reason,
}).Error(err)
}
p.remove(rp.ID())
} | go | {
"resource": ""
} |
q15463 | remove | train | func (p *pool) remove(id uint32) {
p.Lock()
defer p.Unlock()
delete(p.plugins, id)
} | go | {
"resource": ""
} |
q15464 | Count | train | func (p *pool) Count() int {
p.RLock()
defer p.RUnlock()
return len(p.plugins)
} | go | {
"resource": ""
} |
q15465 | SubscriptionCount | train | func (p *pool) SubscriptionCount() int {
p.RLock()
defer p.RUnlock()
return len(p.subs)
} | go | {
"resource": ""
} |
q15466 | SelectAP | train | func (p *pool) SelectAP(taskID string, config map[string]ctypes.ConfigValue) (AvailablePlugin, serror.SnapError) {
aps := p.plugins.Values()
var id string
switch p.Strategy().String() {
case "least-recently-used":
id = ""
case "sticky":
id = taskID
case "config-based":
id = idFromCfg(config)
default:
return nil, serror.New(ErrBadStrategy)
}
ap, err := p.Select(aps, id)
if err != nil {
return nil, serror.New(err)
}
return ap, nil
} | go | {
"resource": ""
} |
q15467 | generatePID | train | func (p *pool) generatePID() uint32 {
atomic.AddUint32(&p.pidCounter, 1)
return p.pidCounter
} | go | {
"resource": ""
} |
q15468 | CacheTTL | train | func (p *pool) CacheTTL(taskID string) (time.Duration, error) {
if len(p.plugins) == 0 {
return 0, ErrPoolEmpty
}
return p.RoutingAndCaching.CacheTTL(taskID)
} | go | {
"resource": ""
} |
q15469 | ValidateSignature | train | func (s *SigningManager) ValidateSignature(keyringFiles []string, signedFile string, signature []byte) error {
var signedby string
var e error
var checked *openpgp.Entity
signed, err := os.Open(signedFile)
if err != nil {
return fmt.Errorf("%v: %v\n%v", ErrSignedFileNotFound, signedFile, err)
}
defer signed.Close()
//Go through all the keyrings til either signature is valid or end of keyrings
for _, keyringFile := range keyringFiles {
keyringf, err := os.Open(keyringFile)
if err != nil {
return fmt.Errorf("%v: %v\n%v", ErrKeyringFileNotFound, keyringFile, err)
}
defer keyringf.Close()
//Read both armored and unarmored keyrings
keyring, err := openpgp.ReadArmoredKeyRing(keyringf)
if err != nil {
keyringf.Seek(0, 0)
keyring, err = openpgp.ReadKeyRing(keyringf)
if err != nil {
return fmt.Errorf("%v: %v\n%v", ErrUnableToReadKeyring, keyringFile, err)
}
}
//Check the armored detached signature
checked, e = openpgp.CheckArmoredDetachedSignature(keyring, signed, bytes.NewReader(signature))
if e == nil {
for k := range checked.Identities {
signedby = signedby + k
}
fmt.Printf("Signature made %v using RSA key ID %v\nGood signature from %v\n", time.Now().Format(time.RFC1123), checked.PrimaryKey.KeyIdShortString(), signedby)
return nil
}
signed.Seek(0, 0)
}
return fmt.Errorf("%v\n%v", ErrCheckSignature, e)
} | go | {
"resource": ""
} |
q15470 | SupportedTypes | train | func SupportedTypes() []string {
// This is kind of a hack but keeps the definition of types here in
// ctypes.go. If you create a new ConfigValue type be sure and add here
// to return the Type() response. This will cause any depedant components
// to acknowledge and use that type.
t := []string{
// String
ConfigValueStr{}.Type(),
// Integer
ConfigValueInt{}.Type(),
// Float
ConfigValueFloat{}.Type(),
// Bool
ConfigValueBool{}.Type(),
}
return t
} | go | {
"resource": ""
} |
q15471 | NewExecutablePlugin | train | func NewExecutablePlugin(a Arg, commands ...string) (*ExecutablePlugin, error) {
jsonArgs, err := json.Marshal(a)
if err != nil {
return nil, err
}
cmd := &exec.Cmd{
Path: commands[0],
Args: append(commands, string(jsonArgs)),
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, err
}
return &ExecutablePlugin{
cmd: &commandWrapper{cmd},
stdout: stdout,
stderr: stderr,
}, nil
} | go | {
"resource": ""
} |
q15472 | Run | train | func (e *ExecutablePlugin) Run(timeout time.Duration) (Response, error) {
var (
respReceived bool
resp Response
err error
respBytes []byte
)
doneChan := make(chan struct{})
stdOutScanner := bufio.NewScanner(e.stdout)
// Start the command and begin reading its output.
if err = e.cmd.Start(); err != nil {
return resp, err
}
e.captureStderr()
go func() {
for {
for stdOutScanner.Scan() {
// The first chunk from the scanner is the plugin's response to the
// handshake. Once we've received that, we can begin to forward
// logs on to snapteld's log.
if !respReceived {
respBytes = stdOutScanner.Bytes()
err = json.Unmarshal(respBytes, &resp)
respReceived = true
close(doneChan)
} else {
execLogger.WithFields(log.Fields{
"plugin": e.name,
"io": "stdout",
}).Debug(stdOutScanner.Text())
}
}
if errScanner := stdOutScanner.Err(); errScanner != nil {
reader := bufio.NewReader(e.stdout)
log, errRead := reader.ReadString('\n')
if errRead == io.EOF {
break
}
execLogger.
WithField("plugin", path.Base(e.cmd.Path())).
WithField("io", "stdout").
WithField("scanner_err", errScanner).
WithField("read_string_err", errRead).
Warn(log)
continue //scanner finished with errors so try to scan once again
}
break //scanner finished scanning without errors so break the loop
}
}()
// Wait until:
// a) We receive a signal that the plugin has responded
// OR
// b) The timeout expires
select {
case <-doneChan:
case <-time.After(timeout):
// We timed out waiting for the plugin's response. Set err.
err = fmt.Errorf("timed out waiting for plugin %s", path.Base(e.cmd.Path()))
}
if err != nil {
execLogger.WithFields(log.Fields{
"received_response": string(respBytes),
}).Error("error loading plugin")
// Kill the plugin if we failed to load it.
e.Kill()
}
lowerName := strings.ToLower(resp.Meta.Name)
if lowerName != resp.Meta.Name {
execLogger.WithFields(log.Fields{
"plugin-name": resp.Meta.Name,
"plugin-version": resp.Meta.Version,
"plugin-type": resp.Type.String(),
}).Warning("uppercase plugin name")
}
resp.Meta.Name = lowerName
return resp, err
} | go | {
"resource": ""
} |
q15473 | Strings | train | func (n Namespace) Strings() []string {
var ns []string
for _, namespaceElement := range n {
ns = append(ns, namespaceElement.Value)
}
return ns
} | go | {
"resource": ""
} |
q15474 | getSeparator | train | func (n Namespace) getSeparator() string {
smap := initSeparatorMap()
for _, e := range n {
// look at each char
for _, r := range e.Value {
ch := fmt.Sprintf("%c", r)
if v, ok := smap[ch]; ok && !v {
smap[ch] = true
}
}
}
// Go through our separator list
for _, s := range nsPriorityList {
if v, ok := smap[s]; ok && !v {
return s
}
}
return Separator
} | go | {
"resource": ""
} |
q15475 | initSeparatorMap | train | func initSeparatorMap() map[string]bool {
m := map[string]bool{}
for _, s := range nsPriorityList {
m[s] = false
}
return m
} | go | {
"resource": ""
} |
q15476 | NewNamespace | train | func NewNamespace(ns ...string) Namespace {
n := make([]NamespaceElement, len(ns))
for i, ns := range ns {
n[i] = NamespaceElement{Value: ns}
}
return n
} | go | {
"resource": ""
} |
q15477 | AddDynamicElement | train | func (n Namespace) AddDynamicElement(name, description string) Namespace {
nse := NamespaceElement{Name: name, Description: description, Value: "*"}
return append(n, nse)
} | go | {
"resource": ""
} |
q15478 | AddStaticElement | train | func (n Namespace) AddStaticElement(value string) Namespace {
nse := NamespaceElement{Value: value}
return append(n, nse)
} | go | {
"resource": ""
} |
q15479 | AddStaticElements | train | func (n Namespace) AddStaticElements(values ...string) Namespace {
for _, value := range values {
n = append(n, NamespaceElement{Value: value})
}
return n
} | go | {
"resource": ""
} |
q15480 | start | train | func (w *worker) start() {
for {
select {
case q := <-w.rcv:
// assert that deadline is not exceeded
if chrono.Chrono.Now().Before(q.Job().Deadline()) {
q.Job().Run()
} else {
// the deadline was exceeded and this job will not run
q.Job().AddErrors(errors.New("Worker refused to run overdue job."))
}
// mark the job complete
q.Promise().Complete(q.Job().Errors())
// the single kill-channel -- used when resizing worker pools
case <-w.kamikaze:
return
//the broadcast that kills all workers
case <-workerKillChan:
return
}
}
} | go | {
"resource": ""
} |
q15481 | FetchMetrics | train | func (c *Client) FetchMetrics(ns string, ver int) *GetMetricsResult {
r := &GetMetricsResult{}
q := fmt.Sprintf("/metrics?ns=%s&ver=%d", ns, ver)
resp, err := c.do("GET", q, ContentTypeJSON)
if err != nil {
return &GetMetricsResult{Err: err}
}
switch resp.Meta.Type {
case rbody.MetricsReturnedType:
mc := resp.Body.(*rbody.MetricsReturned)
r.Catalog = convertCatalog(mc)
case rbody.MetricReturnedType:
mc := resp.Body.(*rbody.MetricReturned)
r.Catalog = []*rbody.Metric{mc.Metric}
case rbody.ErrorType:
r.Err = resp.Body.(*rbody.Error)
default:
r.Err = ErrAPIResponseMetaType
}
return r
} | go | {
"resource": ""
} |
q15482 | GetMetricVersions | train | func (c *Client) GetMetricVersions(ns string) *GetMetricsResult {
r := &GetMetricsResult{}
q := fmt.Sprintf("/metrics?ns=%s", ns)
resp, err := c.do("GET", q, ContentTypeJSON)
if err != nil {
return &GetMetricsResult{Err: err}
}
switch resp.Meta.Type {
case rbody.MetricsReturnedType:
mc := resp.Body.(*rbody.MetricsReturned)
r.Catalog = convertCatalog(mc)
case rbody.ErrorType:
r.Err = resp.Body.(*rbody.Error)
default:
r.Err = ErrAPIResponseMetaType
}
return r
} | go | {
"resource": ""
} |
q15483 | GetFirstChar | train | func GetFirstChar(s string) string {
firstChar := ""
for _, r := range s {
firstChar = fmt.Sprintf("%c", r)
break
}
return firstChar
} | go | {
"resource": ""
} |
q15484 | GobEncode | train | func (c *ConfigTree) GobEncode() ([]byte, error) {
//todo throw an error if not frozen
w := new(bytes.Buffer)
encoder := gob.NewEncoder(w)
if c.root == nil {
c.root = &node{}
// c.root.setKeys([]string{})
}
if err := encoder.Encode(c.root); err != nil {
return nil, err
}
return w.Bytes(), nil
} | go | {
"resource": ""
} |
q15485 | GobDecode | train | func (c *ConfigTree) GobDecode(buf []byte) error {
r := bytes.NewBuffer(buf)
decoder := gob.NewDecoder(r)
if err := decoder.Decode(&c.root); err != nil {
return err
}
return nil
} | go | {
"resource": ""
} |
q15486 | MarshalJSON | train | func (c *ConfigTree) MarshalJSON() ([]byte, error) {
return json.Marshal(&struct {
Root *node `json:"root"`
}{
Root: c.root,
})
} | go | {
"resource": ""
} |
q15487 | Add | train | func (c *ConfigTree) Add(ns []string, inNode Node) {
c.log(fmt.Sprintf("Adding %v at %s\n", inNode, ns))
if len(ns) == 0 {
c.log(fmt.Sprintln("ns is empty - returning with no change to tree"))
return
}
f, remain := ns[0], ns[1:]
c.log(fmt.Sprintf("first ns (%s) remain (%s)", f, remain))
if c.root == nil {
// Create node at root
c.root = new(node)
c.root.setKeys([]string{f})
c.log(fmt.Sprintf("Root now = %v\n", c.root.keys))
// If remain is empty then the inNode belongs at this root level
if len(remain) == 0 {
c.log(fmt.Sprintf("adding node at root level\n"))
c.root.Node = inNode
// And return since we are done
return
}
} else {
if f != c.root.keys[0] {
panic("Can't add a new root namespace")
}
}
c.root.add(remain, inNode)
} | go | {
"resource": ""
} |
q15488 | Get | train | func (c *ConfigTree) Get(ns []string) Node {
c.log(fmt.Sprintf("Get on ns (%s)\n", ns))
retNodes := new([]Node)
// Return if no root exists (no tree without a root)
if c.root == nil {
c.log(fmt.Sprintln("ctree: no root - returning nil"))
return nil
}
if len(c.root.keys) == 0 {
//This will be the case when a plugin returns an empty configPolicyTree
return nil
}
rootKeyLength := len(c.root.keys)
if len(ns) < rootKeyLength {
c.log(fmt.Sprintln("ns less than root key length - returning nil"))
return nil
}
match, remain := ns[:rootKeyLength], ns[rootKeyLength:]
if bytes.Compare(nsToByteArray(match), c.root.keysBytes) != 0 {
c.log(fmt.Sprintf("no match versus root key (match:'%s' != root:'%s')\n", string(nsToByteArray(match)), string(c.root.keysBytes)))
return nil
}
c.log(fmt.Sprintf("Match root key (match:'%s' == root:'%s')\n", string(nsToByteArray(match)), string(c.root.keysBytes)))
if c.root.Node != nil {
c.log(fmt.Sprintf("adding root node (not nil) to nodes to merge (%v)\n", c.root.Node))
*retNodes = append(*retNodes, c.root.Node)
}
c.log(fmt.Sprintf("children to get from (%d)\n", len(c.root.nodes)))
for _, child := range c.root.nodes {
childNodes := child.get(remain)
*retNodes = append(*retNodes, *childNodes...)
}
if len(*retNodes) == 0 {
// There are no child nodes with configs so we return
return nil
}
c.log(fmt.Sprintf("nodes to merge count (%d)\n", len(*retNodes)))
// Call Node.Merge() sequentially on the retNodes
rn := (*retNodes)[0]
for _, n := range (*retNodes)[1:] {
rn = rn.Merge(n)
}
return rn
} | go | {
"resource": ""
} |
q15489 | MarshalJSON | train | func (n *node) MarshalJSON() ([]byte, error) {
return json.Marshal(&struct {
Nodes []*node `json:"nodes"`
Keys []string `json:"keys"`
KeysBytes []byte `json:"keysbytes"`
Node Node `json:"node"`
}{
Nodes: n.nodes,
Keys: n.keys,
KeysBytes: n.keysBytes,
Node: n.Node,
})
} | go | {
"resource": ""
} |
q15490 | GobEncode | train | func (n *node) GobEncode() ([]byte, error) {
w := new(bytes.Buffer)
encoder := gob.NewEncoder(w)
if err := encoder.Encode(n.nodes); err != nil {
return nil, err
}
if err := encoder.Encode(n.keys); err != nil {
return nil, err
}
if err := encoder.Encode(n.keysBytes); err != nil {
return nil, err
}
if err := encoder.Encode(&n.Node); err != nil {
return nil, err
}
return w.Bytes(), nil
} | go | {
"resource": ""
} |
q15491 | GobDecode | train | func (n *node) GobDecode(buf []byte) error {
r := bytes.NewBuffer(buf)
decoder := gob.NewDecoder(r)
if err := decoder.Decode(&n.nodes); err != nil {
return err
}
if err := decoder.Decode(&n.keys); err != nil {
return err
}
if err := decoder.Decode(&n.keysBytes); err != nil {
return err
}
return decoder.Decode(&n.Node)
} | go | {
"resource": ""
} |
q15492 | MarshalJSON | train | func (f *FloatRule) MarshalJSON() ([]byte, error) {
return json.Marshal(&struct {
Key string `json:"key"`
Required bool `json:"required"`
Default ctypes.ConfigValue `json:"default,omitempty"`
Minimum ctypes.ConfigValue `json:"minimum,omitempty"`
Maximum ctypes.ConfigValue `json:"maximum,omitempty"`
Type string `json:"type"`
}{
Key: f.key,
Required: f.required,
Default: f.Default(),
Minimum: f.Minimum(),
Maximum: f.Maximum(),
Type: FloatType,
})
} | go | {
"resource": ""
} |
q15493 | GobEncode | train | func (f *FloatRule) GobEncode() ([]byte, error) {
w := new(bytes.Buffer)
encoder := gob.NewEncoder(w)
if err := encoder.Encode(f.key); err != nil {
return nil, err
}
if err := encoder.Encode(f.required); err != nil {
return nil, err
}
if f.default_ == nil {
encoder.Encode(false)
} else {
encoder.Encode(true)
if err := encoder.Encode(&f.default_); err != nil {
return nil, err
}
}
if f.minimum == nil {
encoder.Encode(false)
} else {
encoder.Encode(true)
if err := encoder.Encode(f.minimum); err != nil {
return nil, err
}
}
if f.maximum == nil {
encoder.Encode(false)
} else {
encoder.Encode(true)
if err := encoder.Encode(f.maximum); err != nil {
return nil, err
}
}
return w.Bytes(), nil
} | go | {
"resource": ""
} |
q15494 | Default | train | func (f *FloatRule) Default() ctypes.ConfigValue {
if f.default_ != nil {
return ctypes.ConfigValueFloat{Value: *f.default_}
}
return nil
} | go | {
"resource": ""
} |
q15495 | parseURL | train | func parseURL(url string) error {
if !govalidator.IsURL(url) || !strings.HasPrefix(url, "http") {
return fmt.Errorf("URL %s is not in the format of http(s)://<ip>:<port>", url)
}
return nil
} | go | {
"resource": ""
} |
q15496 | Password | train | func Password(p string) metaOp {
return func(c *Client) {
c.Password = strings.TrimSpace(p)
}
} | go | {
"resource": ""
} |
q15497 | Timeout | train | func Timeout(t time.Duration) metaOp {
return func(c *Client) {
c.http.Timeout = t
}
} | go | {
"resource": ""
} |
q15498 | New | train | func New(url, ver string, insecure bool, opts ...metaOp) (*Client, error) {
if err := parseURL(url); err != nil {
return nil, err
}
if ver == "" {
ver = "v1"
}
var t *http.Transport
if insecure {
t = insecureTransport
} else {
t = secureTransport
}
c := &Client{
URL: url,
Version: ver,
http: &http.Client{
Transport: t,
},
}
for _, opt := range opts {
opt(c)
}
c.prefix = url + "/" + ver
return c, nil
} | go | {
"resource": ""
} |
q15499 | TribeRequest | train | func (c *Client) TribeRequest() (*http.Response, error) {
req, err := http.NewRequest("GET", c.URL, nil)
if err != nil {
return nil, err
}
addAuth(req, "snap", c.Password)
rsp, err := c.http.Do(req)
if err != nil {
return nil, err
}
return rsp, nil
} | go | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.