_id stringlengths 2 7 | title stringlengths 1 118 | partition stringclasses 3 values | text stringlengths 52 85.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q15300 | Spin | train | func (t *task) Spin() {
// We need to lock long enough to change state
t.Lock()
defer t.Unlock()
// if this task is a streaming task
if t.isStream {
t.state = core.TaskSpinning
t.killChan = make(chan struct{})
go t.stream()
return
}
// Reset the lastFireTime at each Spin.
// This ensures misses are tracked only forward of the point
// in time that a task starts spinning. E.g. stopping a task,
// waiting a period of time, and starting the task won't show
// misses for the interval while stopped.
t.lastFireTime = time.Time{}
if t.state == core.TaskStopped || t.state == core.TaskEnded {
t.state = core.TaskSpinning
t.killChan = make(chan struct{})
// spin in a goroutine
go t.spin()
}
} | go | {
"resource": ""
} |
q15301 | stream | train | func (t *task) stream() {
var consecutiveFailures int
resetTime := time.Second * 3
for {
metricsChan, errChan, err := t.metricsManager.StreamMetrics(
t.id,
t.workflow.tags,
t.maxCollectDuration,
t.maxMetricsBuffer)
if err != nil {
consecutiveFailures++
// check task failures
if t.stopOnFailure >= 0 && consecutiveFailures >= t.stopOnFailure {
taskLogger.WithFields(log.Fields{
"_block": "stream",
"task-id": t.id,
"task-name": t.name,
"consecutive failures": consecutiveFailures,
"error": t.lastFailureMessage,
}).Error(ErrTaskDisabledOnFailures)
// disable the task
t.disable(t.lastFailureMessage)
return
}
// If we are unsuccessful at setting up the stream
// wait for a second and then try again until either
// the connection is successful or we pass the
// acceptable number of consecutive failures
time.Sleep(resetTime)
continue
} else {
consecutiveFailures = 0
}
done := false
for !done {
if errChan == nil {
break
}
select {
case <-t.killChan:
t.Lock()
t.state = core.TaskStopped
t.Unlock()
done = true
event := new(scheduler_event.TaskStoppedEvent)
event.TaskID = t.id
defer t.eventEmitter.Emit(event)
return
case mts, ok := <-metricsChan:
if !ok {
metricsChan = nil
break
}
if len(mts) == 0 {
continue
}
t.hitCount++
consecutiveFailures = 0
t.workflow.StreamStart(t, mts)
case err := <-errChan:
taskLogger.WithFields(log.Fields{
"_block": "stream",
"task-id": t.id,
"task-name": t.name,
}).Error("Error: " + err.Error())
consecutiveFailures++
if err.Error() == "connection broken" {
// Wait here before trying to reconnect to allow time
// for plugin restarts.
time.Sleep(resetTime)
done = true
}
// check task failures
if t.stopOnFailure >= 0 && consecutiveFailures >= t.stopOnFailure {
taskLogger.WithFields(log.Fields{
"_block": "stream",
"task-id": t.id,
"task-name": t.name,
"consecutive failures": consecutiveFailures,
"error": t.lastFailureMessage,
}).Error(ErrTaskDisabledOnFailures)
// disable the task
t.disable(t.lastFailureMessage)
return
}
}
}
}
} | go | {
"resource": ""
} |
q15302 | UnsubscribePlugins | train | func (t *task) UnsubscribePlugins() []serror.SnapError {
depGroups := getWorkflowPlugins(t.workflow.processNodes, t.workflow.publishNodes, t.workflow.metrics)
var errs []serror.SnapError
for k := range depGroups {
event := &scheduler_event.PluginsUnsubscribedEvent{
TaskID: t.ID(),
Plugins: depGroups[k].subscribedPlugins,
}
defer t.eventEmitter.Emit(event)
mgr, err := t.RemoteManagers.Get(k)
if err != nil {
errs = append(errs, serror.New(err))
} else {
uerrs := mgr.UnsubscribeDeps(t.ID())
if len(uerrs) > 0 {
errs = append(errs, uerrs...)
}
}
}
for _, err := range errs {
taskLogger.WithFields(log.Fields{
"_block": "UnsubscribePlugins",
"task-id": t.id,
"task-name": t.name,
"task-state": t.state,
}).Error(err)
}
return errs
} | go | {
"resource": ""
} |
q15303 | SubscribePlugins | train | func (t *task) SubscribePlugins() ([]string, []serror.SnapError) {
depGroups := getWorkflowPlugins(t.workflow.processNodes, t.workflow.publishNodes, t.workflow.metrics)
var subbedDeps []string
for k := range depGroups {
var errs []serror.SnapError
mgr, err := t.RemoteManagers.Get(k)
if err != nil {
errs = append(errs, serror.New(err))
} else {
errs = mgr.SubscribeDeps(t.ID(), depGroups[k].requestedMetrics, depGroups[k].subscribedPlugins, t.workflow.configTree)
}
// If there are errors with subscribing any deps, go through and unsubscribe all other
// deps that may have already been subscribed then return the errors.
if len(errs) > 0 {
for _, key := range subbedDeps {
mgr, err := t.RemoteManagers.Get(key)
if err != nil {
errs = append(errs, serror.New(err))
} else {
// sending empty mts to unsubscribe to indicate task should not start
uerrs := mgr.UnsubscribeDeps(t.ID())
errs = append(errs, uerrs...)
}
}
return nil, errs
}
// If subscribed successfully add to subbedDeps
subbedDeps = append(subbedDeps, k)
}
return subbedDeps, nil
} | go | {
"resource": ""
} |
q15304 | Enable | train | func (t *task) Enable() error {
t.Lock()
defer t.Unlock()
if t.state != core.TaskDisabled {
return ErrTaskNotDisabled
}
t.state = core.TaskStopped
return nil
} | go | {
"resource": ""
} |
q15305 | disable | train | func (t *task) disable(failureMsg string) {
t.Lock()
t.state = core.TaskDisabled
t.Unlock()
// Send task disabled event
event := new(scheduler_event.TaskDisabledEvent)
event.TaskID = t.id
event.Why = fmt.Sprintf("Task disabled with error: %s", failureMsg)
defer t.eventEmitter.Emit(event)
} | go | {
"resource": ""
} |
q15306 | RecordFailure | train | func (t *task) RecordFailure(e []error) {
// We synchronize this update to ensure it is atomic
t.failureMutex.Lock()
defer t.failureMutex.Unlock()
t.failedRuns++
t.lastFailureTime = t.lastFireTime
t.lastFailureMessage = e[len(e)-1].Error()
} | go | {
"resource": ""
} |
q15307 | Get | train | func (t *taskCollection) Get(id string) *task {
t.Lock()
defer t.Unlock()
if t, ok := t.table[id]; ok {
return t
}
return nil
} | go | {
"resource": ""
} |
q15308 | add | train | func (t *taskCollection) add(task *task) error {
t.Lock()
defer t.Unlock()
if _, ok := t.table[task.id]; !ok {
//If we don't already have this task in the collection save it
t.table[task.id] = task
} else {
taskLogger.WithFields(log.Fields{
"_module": "scheduler-taskCollection",
"_block": "add",
"task id": task.id,
}).Error(ErrTaskHasAlreadyBeenAdded.Error())
return ErrTaskHasAlreadyBeenAdded
}
return nil
} | go | {
"resource": ""
} |
q15309 | remove | train | func (t *taskCollection) remove(task *task) error {
t.Lock()
defer t.Unlock()
if _, ok := t.table[task.id]; ok {
if task.state != core.TaskStopped && task.state != core.TaskDisabled && task.state != core.TaskEnded {
taskLogger.WithFields(log.Fields{
"_block": "remove",
"task id": task.id,
}).Error(ErrTaskNotStopped)
return ErrTaskNotStopped
}
delete(t.table, task.id)
} else {
taskLogger.WithFields(log.Fields{
"_block": "remove",
"task id": task.id,
}).Error(ErrTaskNotFound)
return ErrTaskNotFound
}
return nil
} | go | {
"resource": ""
} |
q15310 | Table | train | func (t *taskCollection) Table() map[string]*task {
t.Lock()
defer t.Unlock()
tasks := make(map[string]*task)
for id, t := range t.table {
tasks[id] = t
}
return tasks
} | go | {
"resource": ""
} |
q15311 | createTaskClients | train | func createTaskClients(mgrs *managers, wf *schedulerWorkflow) error {
return walkWorkflow(wf.processNodes, wf.publishNodes, mgrs)
} | go | {
"resource": ""
} |
q15312 | Select | train | func (s *sticky) Select(aps []AvailablePlugin, taskID string) (AvailablePlugin, error) {
if ap, ok := s.plugins[taskID]; ok && ap != nil {
return ap, nil
}
return s.selectPlugin(aps, taskID)
} | go | {
"resource": ""
} |
q15313 | OptEnableRunnerTLS | train | func OptEnableRunnerTLS(grpcSecurity client.GRPCSecurity) pluginRunnerOpt {
return func(r *runner) {
r.grpcSecurity = grpcSecurity
}
} | go | {
"resource": ""
} |
q15314 | Start | train | func (r *runner) Start() error {
// Delegates must be added before starting if none exist
// then this Runner can do nothing and should not start.
if len(r.delegates) == 0 {
return errors.New("No delegates added before called Start()")
}
// For each delegate register needed handlers
for _, del := range r.delegates {
e := del.RegisterHandler(HandlerRegistrationName, r)
if e != nil {
return e
}
}
// Start the monitor
r.monitor.Start(r.availablePlugins)
runnerLog.WithFields(log.Fields{
"_block": "start",
}).Debug("started")
return nil
} | go | {
"resource": ""
} |
q15315 | Stop | train | func (r *runner) Stop() []error {
var errs []error
// Stop the monitor
r.monitor.Stop()
// TODO: Actually stop the plugins
// For each delegate unregister needed handlers
for _, del := range r.delegates {
e := del.UnregisterHandler(HandlerRegistrationName)
if e != nil {
errs = append(errs, e)
}
}
defer runnerLog.WithFields(log.Fields{
"_block": "start-plugin",
}).Debug("stopped")
return errs
} | go | {
"resource": ""
} |
q15316 | HandleGomitEvent | train | func (r *runner) HandleGomitEvent(e gomit.Event) {
switch v := e.Body.(type) {
case *control_event.DeadAvailablePluginEvent:
runnerLog.WithFields(log.Fields{
"_block": "handle-events",
"event": v.Namespace(),
"aplugin": v.String,
}).Warning("handling dead available plugin event")
pool, err := r.availablePlugins.getPool(v.Key)
if err != nil {
runnerLog.WithFields(log.Fields{
"_block": "handle-events",
"aplugin": v.String,
}).Error(err.Error())
return
}
if pool != nil {
pool.Kill(v.Id, "plugin dead")
}
if pool.Eligible() {
if pool.RestartCount() < MaxPluginRestartCount || MaxPluginRestartCount == -1 {
e := r.restartPlugin(v.Key)
if e != nil {
runnerLog.WithFields(log.Fields{
"_block": "handle-events",
"aplugin": v.String,
}).Error(e.Error())
return
}
pool.IncRestartCount()
runnerLog.WithFields(log.Fields{
"_block": "handle-events",
"aplugin": v.String,
"restart-count": pool.RestartCount(),
}).Warning("plugin restarted")
r.emitter.Emit(&control_event.RestartedAvailablePluginEvent{
Id: v.Id,
Name: v.Name,
Version: v.Version,
Key: v.Key,
Type: v.Type,
})
} else {
runnerLog.WithFields(log.Fields{
"_block": "handle-events",
"aplugin": v.String,
}).Warning("plugin disabled due to exceeding restart limit: ", MaxPluginRestartCount)
r.emitter.Emit(&control_event.MaxPluginRestartsExceededEvent{
Id: v.Id,
Name: v.Name,
Version: v.Version,
Key: v.Key,
Type: v.Type,
})
}
}
case *control_event.PluginUnsubscriptionEvent:
runnerLog.WithFields(log.Fields{
"_block": "subscribe-pool",
"event": v.Namespace(),
"plugin-name": v.PluginName,
"plugin-version": v.PluginVersion,
"plugin-type": core.PluginType(v.PluginType).String(),
}).Debug("handling plugin unsubscription event")
err := r.handleUnsubscription(core.PluginType(v.PluginType).String(), v.PluginName, v.PluginVersion, v.TaskId)
if err != nil {
return
}
default:
runnerLog.WithFields(log.Fields{
"_block": "handle-events",
"event": v.Namespace(),
}).Info("Nothing to do for this event")
}
} | go | {
"resource": ""
} |
q15317 | Start | train | func (q *queue) Start() {
q.mutex.Lock()
defer q.mutex.Unlock()
if q.status == queueStopped {
q.status = queueRunning
go q.start()
}
} | go | {
"resource": ""
} |
q15318 | Stop | train | func (q *queue) Stop() {
q.mutex.Lock()
defer q.mutex.Unlock()
if q.status != queueStopped {
close(q.kill)
q.status = queueStopped
}
} | go | {
"resource": ""
} |
q15319 | IsUri | train | func IsUri(url string) bool {
if !govalidator.IsURL(url) || !strings.HasPrefix(url, "http") {
return false
}
return true
} | go | {
"resource": ""
} |
q15320 | MarshalJSON | train | func (i *IntRule) MarshalJSON() ([]byte, error) {
return json.Marshal(&struct {
Key string `json:"key"`
Required bool `json:"required"`
Default ctypes.ConfigValue `json:"default,omitempty"`
Minimum ctypes.ConfigValue `json:"minimum,omitempty"`
Maximum ctypes.ConfigValue `json:"maximum,omitempty"`
Type string `json:"type"`
}{
Key: i.key,
Required: i.required,
Default: i.Default(),
Minimum: i.Minimum(),
Maximum: i.Maximum(),
Type: IntegerType,
})
} | go | {
"resource": ""
} |
q15321 | GobDecode | train | func (i *IntRule) GobDecode(buf []byte) error {
r := bytes.NewBuffer(buf)
decoder := gob.NewDecoder(r)
if err := decoder.Decode(&i.key); err != nil {
return err
}
if err := decoder.Decode(&i.required); err != nil {
return err
}
var is_default_set bool
decoder.Decode(&is_default_set)
if is_default_set {
return decoder.Decode(&i.default_)
}
var is_minimum_set bool
decoder.Decode(&is_minimum_set)
if is_minimum_set {
if err := decoder.Decode(&i.minimum); err != nil {
return err
}
}
var is_maximum_set bool
decoder.Decode(&is_maximum_set)
if is_maximum_set {
if err := decoder.Decode(&i.maximum); err != nil {
return err
}
}
return nil
} | go | {
"resource": ""
} |
q15322 | Default | train | func (i *IntRule) Default() ctypes.ConfigValue {
if i.default_ != nil {
return ctypes.ConfigValueInt{Value: *i.default_}
}
return nil
} | go | {
"resource": ""
} |
q15323 | readConfig | train | func readConfig(cfg *Config, fpath string) {
var path string
if !defaultConfigFile() && fpath == "" {
return
}
if defaultConfigFile() && fpath == "" {
path = defaultConfigPath
}
if fpath != "" {
f, err := os.Stat(fpath)
if err != nil {
log.Fatal(err)
}
if f.IsDir() {
log.Fatal("configuration path provided must be a file")
}
path = fpath
}
serrs := cfgfile.Read(path, &cfg, CONFIG_CONSTRAINTS)
if serrs != nil {
for _, serr := range serrs {
log.WithFields(serr.Fields()).Error(serr.Error())
}
log.Fatal("Errors found while parsing global configuration file")
}
} | go | {
"resource": ""
} |
q15324 | setBoolVal | train | func setBoolVal(field bool, ctx runtimeFlagsContext, flagName string, inverse ...bool) bool {
// check to see if a value was set (either on the command-line or via the associated
// environment variable, if any); if so, use that as value for the input field
val := ctx.Bool(flagName)
if ctx.IsSet(flagName) || val {
field = val
if len(inverse) > 0 {
field = !field
}
}
return field
} | go | {
"resource": ""
} |
q15325 | checkCmdLineFlags | train | func checkCmdLineFlags(ctx runtimeFlagsContext) (int, bool, error) {
tlsCert := ctx.String("tls-cert")
tlsKey := ctx.String("tls-key")
if _, err := checkTLSEnabled(tlsCert, tlsKey, commandLineErrorPrefix); err != nil {
return -1, false, err
}
// Check to see if the API address is specified (either via the CLI or through
// the associated environment variable); if so, grab the port and check that the
// address and port against the constraints (above)
addr := ctx.String("api-addr")
port := ctx.Int("api-port")
if ctx.IsSet("api-addr") || addr != "" {
portInAddr, err := checkHostPortVals(addr, &port, commandLineErrorPrefix)
if err != nil {
return -1, portInAddr, err
}
return port, portInAddr, nil
}
return port, false, nil
} | go | {
"resource": ""
} |
q15326 | checkCfgSettings | train | func checkCfgSettings(cfg *Config) (int, bool, error) {
tlsCert := cfg.Control.TLSCertPath
tlsKey := cfg.Control.TLSKeyPath
if _, err := checkTLSEnabled(tlsCert, tlsKey, configFileErrorPrefix); err != nil {
return -1, false, err
}
addr := cfg.RestAPI.Address
var port int
if cfg.RestAPI.PortSetByConfigFile() {
port = cfg.RestAPI.Port
} else {
port = -1
}
portInAddr, err := checkHostPortVals(addr, &port, configFileErrorPrefix)
if err != nil {
return -1, portInAddr, err
}
return port, portInAddr, nil
} | go | {
"resource": ""
} |
q15327 | setMaxProcs | train | func setMaxProcs(maxProcs int) {
var _maxProcs int
numProcs := runtime.NumCPU()
if maxProcs <= 0 {
// We prefer sane values for GOMAXPROCS
log.WithFields(
log.Fields{
"_block": "main",
"_module": logModule,
"maxprocs": maxProcs,
}).Error("Trying to set GOMAXPROCS to an invalid value")
_maxProcs = 1
log.WithFields(
log.Fields{
"_block": "main",
"_module": logModule,
"maxprocs": _maxProcs,
}).Warning("Setting GOMAXPROCS to 1")
_maxProcs = 1
} else if maxProcs <= numProcs {
_maxProcs = maxProcs
} else {
log.WithFields(
log.Fields{
"_block": "main",
"_module": logModule,
"maxprocs": maxProcs,
}).Error("Trying to set GOMAXPROCS larger than number of CPUs available on system")
_maxProcs = numProcs
log.WithFields(
log.Fields{
"_block": "main",
"_module": logModule,
"maxprocs": _maxProcs,
}).Warning("Setting GOMAXPROCS to number of CPUs on host")
}
log.Info("setting GOMAXPROCS to: ", _maxProcs, " core(s)")
runtime.GOMAXPROCS(_maxProcs)
//Verify setting worked
actualNumProcs := runtime.GOMAXPROCS(0)
if actualNumProcs != _maxProcs {
log.WithFields(
log.Fields{
"block": "main",
"_module": logModule,
"given maxprocs": _maxProcs,
"real maxprocs": actualNumProcs,
}).Warning("not using given maxprocs")
}
} | go | {
"resource": ""
} |
q15328 | New | train | func New(e error, fields ...map[string]interface{}) *snapError {
// Catch someone trying to wrap a serror around a serror.
// We throw a panic to make them fix this.
if _, ok := e.(SnapError); ok {
panic("You are trying to wrap a snapError around a snapError. Don't do this.")
}
p := &snapError{
err: e,
fields: make(map[string]interface{}),
}
// insert fields into new snapError
for _, f := range fields {
for k, v := range f {
p.fields[k] = v
}
}
return p
} | go | {
"resource": ""
} |
q15329 | MonitorDurationOption | train | func MonitorDurationOption(v time.Duration) monitorOption {
return func(m *monitor) monitorOption {
previous := m.duration
m.duration = v
return MonitorDurationOption(previous)
}
} | go | {
"resource": ""
} |
q15330 | Start | train | func (m *monitor) Start(availablePlugins *availablePlugins) {
//start a routine that will be fired every X duration looping
//over available plugins and firing a health check routine
ticker := time.NewTicker(m.duration)
m.quit = make(chan struct{})
go func() {
for {
select {
case <-ticker.C:
go func() {
availablePlugins.RLock()
for _, ap := range availablePlugins.all() {
if !ap.IsRemote() {
go ap.CheckHealth()
}
}
availablePlugins.RUnlock()
}()
case <-m.quit:
ticker.Stop()
m.State = MonitorStopped
return
}
}
}()
m.State = MonitorStarted
} | go | {
"resource": ""
} |
q15331 | GetConfigPolicy | train | func (f *Mock) GetConfigPolicy() (plugin.ConfigPolicy, error) {
p := plugin.NewConfigPolicy()
err := p.AddNewStringRule([]string{"intel", "mock", "test%>"}, "name", false, plugin.SetDefaultString("bob"))
if err != nil {
return *p, err
}
err = p.AddNewStringRule([]string{"intel", "mock", "/foo=㊽"}, "password", true)
return *p, err
} | go | {
"resource": ""
} |
q15332 | GetPluginConfig | train | func (c *Client) GetPluginConfig(pluginType, name, version string) *GetPluginConfigResult {
r := &GetPluginConfigResult{}
resp, err := c.do("GET", fmt.Sprintf("/plugins/%s/%s/%s/config", pluginType, url.QueryEscape(name), version), ContentTypeJSON)
if err != nil {
r.Err = err
return r
}
switch resp.Meta.Type {
case rbody.PluginConfigItemType:
// Success
config := resp.Body.(*rbody.PluginConfigItem)
r = &GetPluginConfigResult{config, nil}
case rbody.ErrorType:
r.Err = resp.Body.(*rbody.Error)
default:
r.Err = ErrAPIResponseMetaType
}
return r
} | go | {
"resource": ""
} |
q15333 | SetPluginConfig | train | func (c *Client) SetPluginConfig(pluginType, name, version string, key string, value ctypes.ConfigValue) *SetPluginConfigResult {
r := &SetPluginConfigResult{}
b, err := json.Marshal(map[string]ctypes.ConfigValue{key: value})
if err != nil {
r.Err = err
return r
}
resp, err := c.do("PUT", fmt.Sprintf("/plugins/%s/%s/%s/config", pluginType, url.QueryEscape(name), version), ContentTypeJSON, b)
if err != nil {
r.Err = err
return r
}
switch resp.Meta.Type {
case rbody.SetPluginConfigItemType:
// Success
config := resp.Body.(*rbody.SetPluginConfigItem)
r = &SetPluginConfigResult{config, nil}
case rbody.ErrorType:
r.Err = resp.Body.(*rbody.Error)
default:
r.Err = ErrAPIResponseMetaType
}
return r
} | go | {
"resource": ""
} |
q15334 | DeletePluginConfig | train | func (c *Client) DeletePluginConfig(pluginType, name, version string, key string) *DeletePluginConfigResult {
r := &DeletePluginConfigResult{}
b, err := json.Marshal([]string{key})
if err != nil {
r.Err = err
return r
}
resp, err := c.do("DELETE", fmt.Sprintf("/plugins/%s/%s/%s/config", pluginType, url.QueryEscape(name), version), ContentTypeJSON, b)
if err != nil {
r.Err = err
return r
}
switch resp.Meta.Type {
case rbody.DeletePluginConfigItemType:
// Success
config := resp.Body.(*rbody.DeletePluginConfigItem)
r = &DeletePluginConfigResult{config, nil}
case rbody.ErrorType:
r.Err = resp.Body.(*rbody.Error)
default:
r.Err = ErrAPIResponseMetaType
}
return r
} | go | {
"resource": ""
} |
q15335 | init | train | func init() {
host, err := os.Hostname()
if err != nil {
log.WithFields(log.Fields{
"_module": "control",
"_file": "metrics.go,",
"_block": "addStandardAndWorkflowTags",
"error": err.Error(),
}).Error("Unable to determine hostname")
host = "not_found"
}
hostnameReader = &hostnameReaderType{hostname: host, hostnameRefreshTTL: time.Hour, lastRefresh: time.Now()}
} | go | {
"resource": ""
} |
q15336 | RmUnloadedPluginMetrics | train | func (mc *metricCatalog) RmUnloadedPluginMetrics(lp *loadedPlugin) {
mc.mutex.Lock()
defer mc.mutex.Unlock()
mc.tree.DeleteByPlugin(lp)
// Update metric catalog keys
mc.keys = []string{}
mts := mc.tree.gatherMetricTypes()
for _, m := range mts {
mc.keys = append(mc.keys, m.Namespace().String())
}
} | go | {
"resource": ""
} |
q15337 | Add | train | func (mc *metricCatalog) Add(m *metricType) {
mc.mutex.Lock()
defer mc.mutex.Unlock()
key := m.Namespace().String()
// adding key as a cataloged keys (mc.keys)
mc.keys = appendIfMissing(mc.keys, key)
mc.tree.Add(m)
} | go | {
"resource": ""
} |
q15338 | GetMetric | train | func (mc *metricCatalog) GetMetric(requested core.Namespace, version int) (*metricType, error) {
mc.mutex.Lock()
defer mc.mutex.Unlock()
var ns core.Namespace
catalogedmt, err := mc.tree.GetMetric(requested.Strings(), version)
if err != nil {
log.WithFields(log.Fields{
"_module": "control",
"_file": "metrics.go,",
"_block": "get-metric",
"error": err,
}).Error("error getting metric")
return nil, err
}
ns = catalogedmt.Namespace()
if isDynamic, _ := ns.IsDynamic(); isDynamic {
// when namespace is dynamic and the cataloged namespace (e.g. ns=/intel/mock/*/bar) is different than
// the requested (e.g. requested=/intel/mock/host0/bar), than specify an instance of dynamic element,
// so as a result the dynamic element will have set a value (e.g. ns[2].Value equals "host0")
if ns.String() != requested.String() {
ns = specifyInstanceOfDynamicMetric(ns, requested)
}
}
returnedmt := &metricType{
Plugin: catalogedmt.Plugin,
namespace: ns,
version: catalogedmt.Version(),
lastAdvertisedTime: catalogedmt.LastAdvertisedTime(),
tags: catalogedmt.Tags(),
policy: catalogedmt.Plugin.Policy().Get(catalogedmt.Namespace().Strings()),
config: catalogedmt.Config(),
unit: catalogedmt.Unit(),
description: catalogedmt.Description(),
subscriptions: catalogedmt.SubscriptionCount(),
}
return returnedmt, nil
} | go | {
"resource": ""
} |
q15339 | GetVersions | train | func (mc *metricCatalog) GetVersions(ns core.Namespace) ([]*metricType, error) {
mc.mutex.Lock()
defer mc.mutex.Unlock()
mts, err := mc.tree.GetVersions(ns.Strings())
if err != nil {
log.WithFields(log.Fields{
"_module": "control",
"_file": "metrics.go,",
"_block": "get-versions",
"error": err,
}).Error("error getting plugin version")
return nil, err
}
return mts, nil
} | go | {
"resource": ""
} |
q15340 | Remove | train | func (mc *metricCatalog) Remove(ns core.Namespace) {
mc.mutex.Lock()
defer mc.mutex.Unlock()
mc.tree.Remove(ns.Strings())
} | go | {
"resource": ""
} |
q15341 | Subscribe | train | func (mc *metricCatalog) Subscribe(ns []string, version int) error {
mc.mutex.Lock()
defer mc.mutex.Unlock()
m, err := mc.tree.GetMetric(ns, version)
if err != nil {
log.WithFields(log.Fields{
"_module": "control",
"_file": "metrics.go,",
"_block": "subscribe",
"error": err,
}).Error("error getting metric")
return err
}
m.Subscribe()
return nil
} | go | {
"resource": ""
} |
q15342 | containsTuple | train | func containsTuple(nsElement string) (bool, []string) {
tupleItems := []string{}
if isTuple(nsElement) {
if strings.ContainsAny(nsElement, "*") {
// an asterisk covers all tuples cases (eg. /intel/mock/(host0;host1;*)/baz)
// so to avoid retrieving the same metric more than once, return only '*' as a tuple's items
tupleItems = []string{"*"}
} else {
tuple := strings.TrimSuffix(strings.TrimPrefix(nsElement, core.TuplePrefix), core.TupleSuffix)
items := strings.Split(tuple, core.TupleSeparator)
// removing all leading and trailing white space
for _, item := range items {
tupleItems = append(tupleItems, strings.TrimSpace(item))
}
}
return true, tupleItems
}
return false, nil
} | go | {
"resource": ""
} |
q15343 | specifyInstanceOfDynamicMetric | train | func specifyInstanceOfDynamicMetric(catalogedNamespace core.Namespace, requestedNamespace core.Namespace) core.Namespace {
specifiedNamespace := make(core.Namespace, len(catalogedNamespace))
copy(specifiedNamespace, catalogedNamespace)
_, indexes := catalogedNamespace.IsDynamic()
for _, index := range indexes {
if len(requestedNamespace) > index {
// use namespace's element of requested metric declared in task manifest
// to specify a dynamic instance of the cataloged metric
specifiedNamespace[index].Value = requestedNamespace[index].Value
}
}
return specifiedNamespace
} | go | {
"resource": ""
} |
q15344 | validateMetricNamespace | train | func validateMetricNamespace(ns core.Namespace) error {
value := ""
for _, i := range ns {
// A dynamic element requires the name while a static element does not.
if i.Name != "" && i.Value != "*" {
return errorMetricStaticElementHasName(i.Value, i.Name, ns.String())
}
if i.Name == "" && i.Value == "*" {
return errorMetricDynamicElementHasNoName(i.Value, ns.String())
}
if isTuple(i.Value) {
return errorMetricElementHasTuple(i.Value, ns.String())
}
value += i.Value
}
// plugin should NOT advertise metrics ending with a wildcard
if strings.HasSuffix(value, "*") {
return errorMetricEndsWithAsterisk(ns.String())
}
return nil
} | go | {
"resource": ""
} |
q15345 | MarshalJSON | train | func (b *BoolRule) MarshalJSON() ([]byte, error) {
return json.Marshal(&struct {
Key string `json:"key"`
Required bool `json:"required"`
Default ctypes.ConfigValue `json:"default,omitempty"`
Type string `json:"type"`
}{
Key: b.key,
Required: b.required,
Default: b.Default(),
Type: BoolType,
})
} | go | {
"resource": ""
} |
q15346 | GobEncode | train | func (b *BoolRule) GobEncode() ([]byte, error) {
w := new(bytes.Buffer)
encoder := gob.NewEncoder(w)
if err := encoder.Encode(b.key); err != nil {
return nil, err
}
if err := encoder.Encode(b.required); err != nil {
return nil, err
}
if b.default_ == nil {
encoder.Encode(false)
} else {
encoder.Encode(true)
if err := encoder.Encode(&b.default_); err != nil {
return nil, err
}
}
return w.Bytes(), nil
} | go | {
"resource": ""
} |
q15347 | GobDecode | train | func (b *BoolRule) GobDecode(buf []byte) error {
r := bytes.NewBuffer(buf)
decoder := gob.NewDecoder(r)
if err := decoder.Decode(&b.key); err != nil {
return err
}
if err := decoder.Decode(&b.required); err != nil {
return err
}
var isDefaultSet bool
decoder.Decode(&isDefaultSet)
if isDefaultSet {
return decoder.Decode(&b.default_)
}
return nil
} | go | {
"resource": ""
} |
q15348 | Validate | train | func (b *BoolRule) Validate(cv ctypes.ConfigValue) error {
// Check that type is correct
if cv.Type() != BoolType {
return wrongType(b.key, cv.Type(), BoolType)
}
return nil
} | go | {
"resource": ""
} |
q15349 | Default | train | func (b *BoolRule) Default() ctypes.ConfigValue {
if b.default_ != nil {
return ctypes.ConfigValueBool{Value: *b.default_}
}
return nil
} | go | {
"resource": ""
} |
q15350 | GetConfigPolicy | train | func (s *SessionState) GetConfigPolicy(args []byte, reply *[]byte) error {
defer catchPluginPanic(s.Logger())
s.logger.Debug("GetConfigPolicy called")
policy, err := s.plugin.GetConfigPolicy()
if err != nil {
return errors.New(fmt.Sprintf("GetConfigPolicy call error : %s", err.Error()))
}
r := GetConfigPolicyReply{Policy: policy}
*reply, err = s.Encode(r)
if err != nil {
return err
}
return nil
} | go | {
"resource": ""
} |
q15351 | Ping | train | func (s *SessionState) Ping(arg []byte, reply *[]byte) error {
// For now we return nil. We can return an error if we are shutting
// down or otherwise in a state we should signal poor health.
// Reply should contain any context.
s.ResetHeartbeat()
s.logger.Debug("Ping received")
*reply = []byte{}
return nil
} | go | {
"resource": ""
} |
q15352 | Kill | train | func (s *SessionState) Kill(args []byte, reply *[]byte) error {
a := &KillArgs{}
err := s.Decode(args, a)
if err != nil {
return err
}
s.logger.Debugf("Kill called by agent, reason: %s\n", a.Reason)
go func() {
time.Sleep(time.Second * 2)
s.killChan <- 0
}()
*reply = []byte{}
return nil
} | go | {
"resource": ""
} |
q15353 | DeleteByPlugin | train | func (m *MTTrie) DeleteByPlugin(cp core.CatalogedPlugin) {
for _, mt := range m.gatherMetricTypes() {
mtPluginKey := fmt.Sprintf("%s"+core.Separator+"%s"+core.Separator+"%d", mt.Plugin.TypeName(), mt.Plugin.Name(), mt.Plugin.Version())
cpKey := fmt.Sprintf("%s"+core.Separator+"%s"+core.Separator+"%d", cp.TypeName(), cp.Name(), cp.Version())
if mtPluginKey == cpKey {
// remove this metric
m.RemoveMetric(mt)
}
}
} | go | {
"resource": ""
} |
q15354 | RemoveMetric | train | func (m *MTTrie) RemoveMetric(mt metricType) {
a, _ := m.find(mt.Namespace().Strings())
if a != nil {
for v, x := range a.mts {
if mt.Version() == x.Version() {
// delete this metric from the node
delete(a.mts, v)
}
}
}
} | go | {
"resource": ""
} |
q15355 | Add | train | func (mtt *mttNode) Add(mt *metricType) {
ns := mt.Namespace()
node, index := mtt.walk(ns.Strings())
if index == len(ns) {
if node.mts == nil {
node.mts = make(map[int]*metricType)
}
node.mts[mt.Version()] = mt
return
}
// walk through the remaining namespace and build out the
// new branch in the trie.
for _, n := range ns[index:] {
if node.children == nil {
node.children = make(map[string]*mttNode)
}
node.children[n.Value] = &mttNode{}
node = node.children[n.Value]
}
node.mts = make(map[int]*metricType)
node.mts[mt.Version()] = mt
} | go | {
"resource": ""
} |
q15356 | Fetch | train | func (mtt *mttNode) Fetch(ns []string) ([]*metricType, error) {
children := mtt.fetch(ns)
var mts []*metricType
for _, child := range children {
for _, mt := range child.mts {
mts = append(mts, mt)
}
}
if len(mts) == 0 && len(ns) > 0 {
return nil, errorMetricsNotFound("/" + strings.Join(ns, "/"))
}
return mts, nil
} | go | {
"resource": ""
} |
q15357 | Remove | train | func (mtt *mttNode) Remove(ns []string) error {
_, err := mtt.find(ns)
if err != nil {
return err
}
parent, err := mtt.find(ns[:len(ns)-1])
if err != nil {
return err
}
// remove node from parent
delete(parent.children, ns[len(ns)-1:][0])
return nil
} | go | {
"resource": ""
} |
q15358 | GetVersions | train | func (mtt *mttNode) GetVersions(ns []string) ([]*metricType, error) {
var nodes []*mttNode
var mts []*metricType
if len(ns) == 0 {
return nil, errorEmptyNamespace()
}
nodes = mtt.search(nodes, ns)
for _, node := range nodes {
// concatenates metric types in ALL versions into a single slice
for _, mt := range node.mts {
mts = append(mts, mt)
}
}
if len(mts) == 0 {
return nil, errorMetricNotFound("/" + strings.Join(ns, "/"))
}
return mts, nil
} | go | {
"resource": ""
} |
q15359 | fetch | train | func (mtt *mttNode) fetch(ns []string) []*mttNode {
node, err := mtt.find(ns)
if err != nil {
return nil
}
var children []*mttNode
if node.mts != nil {
children = append(children, node)
}
if node.children != nil {
children = gatherDescendants(children, node)
}
return children
} | go | {
"resource": ""
} |
q15360 | search | train | func (mtt *mttNode) search(nodes []*mttNode, ns []string) []*mttNode {
parent := mtt
var children []*mttNode
if parent.children == nil {
return nodes
}
if len(ns) == 1 {
// the last element of ns is under searching process
switch ns[0] {
case "*":
// fetch all descendants when wildcard ends namespace
children = parent.fetch([]string{})
default:
children = parent.gatherChildren(ns[0])
}
nodes = append(nodes, children...)
return nodes
}
children = parent.gatherChildren(ns[0])
for _, child := range children {
nodes = child.search(nodes, ns[1:])
}
return nodes
} | go | {
"resource": ""
} |
q15361 | gatherDescendants | train | func gatherDescendants(descendants []*mttNode, node *mttNode) []*mttNode {
for _, child := range node.children {
if child.mts != nil {
descendants = append(descendants, child)
}
if child.children != nil {
descendants = gatherDescendants(descendants, child)
}
}
return descendants
} | go | {
"resource": ""
} |
q15362 | MarshalJSON | train | func (s *StringRule) MarshalJSON() ([]byte, error) {
return json.Marshal(&struct {
Key string `json:"key"`
Required bool `json:"required"`
Default ctypes.ConfigValue `json:"default"`
Type string `json:"type"`
}{
Key: s.key,
Required: s.required,
Default: s.Default(),
Type: StringType,
})
} | go | {
"resource": ""
} |
q15363 | GobDecode | train | func (s *StringRule) GobDecode(buf []byte) error {
r := bytes.NewBuffer(buf)
decoder := gob.NewDecoder(r)
if err := decoder.Decode(&s.key); err != nil {
return err
}
if err := decoder.Decode(&s.required); err != nil {
return err
}
var is_default_set bool
decoder.Decode(&is_default_set)
if is_default_set {
return decoder.Decode(&s.default_)
}
return nil
} | go | {
"resource": ""
} |
q15364 | Validate | train | func (s *StringRule) Validate(cv ctypes.ConfigValue) error {
// Check that type is correct
if cv.Type() != StringType {
return wrongType(s.key, cv.Type(), StringType)
}
return nil
} | go | {
"resource": ""
} |
q15365 | Default | train | func (s *StringRule) Default() ctypes.ConfigValue {
if s.default_ != nil {
return ctypes.ConfigValueStr{Value: *s.default_}
}
return nil
} | go | {
"resource": ""
} |
q15366 | Stop | train | func (a *availablePlugin) Stop(r string) error {
log.WithFields(log.Fields{
"_module": "control-aplugin",
"block": "stop",
"plugin_name": a,
}).Info("stopping available plugin")
if a.IsRemote() {
return a.client.Close()
}
return a.client.Kill(r)
} | go | {
"resource": ""
} |
q15367 | Kill | train | func (a *availablePlugin) Kill(r string) error {
log.WithFields(log.Fields{
"_module": "control-aplugin",
"block": "kill",
"plugin_name": a,
}).Info("hard killing available plugin")
if a.fromPackage {
log.WithFields(log.Fields{
"_module": "control-aplugin",
"block": "kill",
"plugin_name": a,
"pluginPath": a.execPath,
}).Debug("deleting available plugin package")
os.RemoveAll(filepath.Dir(a.execPath))
}
// If it's a streaming plugin, we need to signal the scheduler that
// this plugin is being killed.
if c, ok := a.client.(client.PluginStreamCollectorClient); ok {
c.Killed()
}
if a.ePlugin != nil {
return a.ePlugin.Kill()
}
return nil
} | go | {
"resource": ""
} |
q15368 | CheckHealth | train | func (a *availablePlugin) CheckHealth() {
go func() {
a.healthChan <- a.client.Ping()
}()
select {
case err := <-a.healthChan:
if err == nil {
if a.failedHealthChecks > 0 {
// only log on first ok health check
log.WithFields(log.Fields{
"_module": "control-aplugin",
"block": "check-health",
"plugin_name": a,
}).Debug("health is ok")
}
a.failedHealthChecks = 0
} else {
a.healthCheckFailed()
}
case <-time.After(DefaultHealthCheckTimeout):
a.healthCheckFailed()
}
} | go | {
"resource": ""
} |
q15369 | healthCheckFailed | train | func (a *availablePlugin) healthCheckFailed() {
log.WithFields(log.Fields{
"_module": "control-aplugin",
"block": "check-health",
"plugin_name": a,
}).Warning("heartbeat missed")
a.failedHealthChecks++
if a.failedHealthChecks >= DefaultHealthCheckFailureLimit {
log.WithFields(log.Fields{
"_module": "control-aplugin",
"block": "check-health",
"plugin_name": a,
}).Warning("heartbeat failed")
pde := &control_event.DeadAvailablePluginEvent{
Name: a.name,
Version: a.version,
Type: int(a.pluginType),
Key: a.key,
Id: a.ID(),
String: a.String(),
}
defer a.emitter.Emit(pde)
}
hcfe := &control_event.HealthCheckFailedEvent{
Name: a.name,
Version: a.version,
Type: int(a.pluginType),
}
defer a.emitter.Emit(hcfe)
} | go | {
"resource": ""
} |
q15370 | CreateTask | train | func (c *Client) CreateTask(s *Schedule, wf *wmap.WorkflowMap, name string, deadline string, startTask bool, maxFailures int) *CreateTaskResult {
t := core.TaskCreationRequest{
Schedule: &core.Schedule{
Type: s.Type,
Interval: s.Interval,
StartTimestamp: s.StartTimestamp,
StopTimestamp: s.StopTimestamp,
Count: s.Count,
},
Workflow: wf,
Start: startTask,
MaxFailures: maxFailures,
}
if name != "" {
t.Name = name
}
if deadline != "" {
t.Deadline = deadline
}
// Marshal to JSON for request body
j, err := json.Marshal(t)
if err != nil {
return &CreateTaskResult{Err: err}
}
resp, err := c.do("POST", "/tasks", ContentTypeJSON, j)
if err != nil {
return &CreateTaskResult{Err: err}
}
switch resp.Meta.Type {
case rbody.AddScheduledTaskType:
// Success
return &CreateTaskResult{resp.Body.(*rbody.AddScheduledTask), nil}
case rbody.ErrorType:
return &CreateTaskResult{Err: resp.Body.(*rbody.Error)}
default:
return &CreateTaskResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15371 | WatchTask | train | func (c *Client) WatchTask(id string) *WatchTasksResult {
// during watch we don't want to have a timeout
// Store the old timeout so we can restore when we are through
oldTimeout := c.http.Timeout
c.http.Timeout = time.Duration(0)
r := &WatchTasksResult{
EventChan: make(chan *rbody.StreamedTaskEvent),
DoneChan: make(chan struct{}),
}
url := fmt.Sprintf("%s/tasks/%v/watch", c.prefix, id)
req, err := http.NewRequest("GET", url, nil)
addAuth(req, c.Username, c.Password)
if err != nil {
r.Err = err
r.Close()
return r
}
resp, err := c.http.Do(req)
if err != nil {
if strings.Contains(err.Error(), "tls: oversized record") || strings.Contains(err.Error(), "malformed HTTP response") {
r.Err = fmt.Errorf("error connecting to API URI: %s. Do you have an http/https mismatch?", c.URL)
} else {
r.Err = err
}
r.Close()
return r
}
if resp.StatusCode != 200 {
ar, err := httpRespToAPIResp(resp)
if err != nil {
r.Err = err
} else {
r.Err = errors.New(ar.Meta.Message)
}
r.Close()
return r
}
// Start watching
go func() {
reader := bufio.NewReader(resp.Body)
defer func() { c.http.Timeout = oldTimeout }()
for {
select {
case <-r.DoneChan:
resp.Body.Close()
return
default:
line, _ := reader.ReadBytes('\n')
sline := string(line)
if sline == "" || sline == "\n" {
continue
}
if strings.HasPrefix(sline, "data:") {
sline = strings.TrimPrefix(sline, "data:")
line = []byte(sline)
}
ste := &rbody.StreamedTaskEvent{}
err := json.Unmarshal(line, ste)
if err != nil {
r.Err = err
r.Close()
return
}
switch ste.EventType {
case rbody.TaskWatchTaskDisabled:
r.EventChan <- ste
r.Close()
case rbody.TaskWatchTaskStopped, rbody.TaskWatchTaskEnded, rbody.TaskWatchTaskStarted, rbody.TaskWatchMetricEvent:
r.EventChan <- ste
}
}
}
}()
return r
} | go | {
"resource": ""
} |
q15372 | GetTasks | train | func (c *Client) GetTasks() *GetTasksResult {
resp, err := c.do("GET", "/tasks", ContentTypeJSON, nil)
if err != nil {
return &GetTasksResult{Err: err}
}
switch resp.Meta.Type {
case rbody.ScheduledTaskListReturnedType:
// Success
return &GetTasksResult{resp.Body.(*rbody.ScheduledTaskListReturned), nil}
case rbody.ErrorType:
return &GetTasksResult{Err: resp.Body.(*rbody.Error)}
default:
return &GetTasksResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15373 | GetTask | train | func (c *Client) GetTask(id string) *GetTaskResult {
resp, err := c.do("GET", fmt.Sprintf("/tasks/%v", id), ContentTypeJSON, nil)
if err != nil {
return &GetTaskResult{Err: err}
}
switch resp.Meta.Type {
case rbody.ScheduledTaskReturnedType:
// Success
return &GetTaskResult{resp.Body.(*rbody.ScheduledTaskReturned), nil}
case rbody.ErrorType:
return &GetTaskResult{Err: resp.Body.(*rbody.Error)}
default:
return &GetTaskResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15374 | StartTask | train | func (c *Client) StartTask(id string) *StartTasksResult {
resp, err := c.do("PUT", fmt.Sprintf("/tasks/%v/start", id), ContentTypeJSON)
if err != nil {
return &StartTasksResult{Err: err}
}
switch resp.Meta.Type {
case rbody.ScheduledTaskStartedType:
// Success
return &StartTasksResult{resp.Body.(*rbody.ScheduledTaskStarted), nil}
case rbody.ErrorType:
return &StartTasksResult{Err: resp.Body.(*rbody.Error)}
default:
return &StartTasksResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15375 | StopTask | train | func (c *Client) StopTask(id string) *StopTasksResult {
resp, err := c.do("PUT", fmt.Sprintf("/tasks/%v/stop", id), ContentTypeJSON)
if err != nil {
return &StopTasksResult{Err: err}
}
if resp == nil {
return nil
}
switch resp.Meta.Type {
case rbody.ScheduledTaskStoppedType:
// Success
return &StopTasksResult{resp.Body.(*rbody.ScheduledTaskStopped), nil}
case rbody.ErrorType:
return &StopTasksResult{Err: resp.Body.(*rbody.Error)}
default:
return &StopTasksResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15376 | RemoveTask | train | func (c *Client) RemoveTask(id string) *RemoveTasksResult {
resp, err := c.do("DELETE", fmt.Sprintf("/tasks/%v", id), ContentTypeJSON)
if err != nil {
return &RemoveTasksResult{Err: err}
}
switch resp.Meta.Type {
case rbody.ScheduledTaskRemovedType:
// Success
return &RemoveTasksResult{resp.Body.(*rbody.ScheduledTaskRemoved), nil}
case rbody.ErrorType:
return &RemoveTasksResult{Err: resp.Body.(*rbody.Error)}
default:
return &RemoveTasksResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15377 | EnableTask | train | func (c *Client) EnableTask(id string) *EnableTaskResult {
resp, err := c.do("PUT", fmt.Sprintf("/tasks/%v/enable", id), ContentTypeJSON)
if err != nil {
return &EnableTaskResult{Err: err}
}
switch resp.Meta.Type {
case rbody.ScheduledTaskEnabledType:
return &EnableTaskResult{resp.Body.(*rbody.ScheduledTaskEnabled), nil}
case rbody.ErrorType:
return &EnableTaskResult{Err: resp.Body.(*rbody.Error)}
default:
return &EnableTaskResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15378 | UnmarshalJSON | train | func (c *ConfigPolicyNode) UnmarshalJSON(data []byte) error {
m := map[string]interface{}{}
decoder := json.NewDecoder(bytes.NewReader(data))
if err := decoder.Decode(&m); err != nil {
return err
}
if rs, ok := m["rules"]; ok {
if rules, ok := rs.(map[string]interface{}); ok {
addRulesToConfigPolicyNode(rules, c)
}
}
return nil
} | go | {
"resource": ""
} |
q15379 | Add | train | func (p *ConfigPolicyNode) Add(rules ...Rule) {
p.mutex.Lock()
defer p.mutex.Unlock()
for _, r := range rules {
p.rules[r.Key()] = r
}
} | go | {
"resource": ""
} |
q15380 | Process | train | func (c *ConfigPolicyNode) Process(m map[string]ctypes.ConfigValue) (*map[string]ctypes.ConfigValue, *ProcessingErrors) {
c.mutex.Lock()
defer c.mutex.Unlock()
pErrors := NewProcessingErrors()
// Loop through each rule and process
for key, rule := range c.rules {
// items exists for rule
if cv, ok := m[key]; ok {
// Validate versus matching data
e := rule.Validate(cv)
if e != nil {
pErrors.AddError(e)
}
} else {
// If it was required add error
if rule.Required() {
e := fmt.Errorf("required key missing (%s)", key)
pErrors.AddError(e)
} else {
// If default returns we should add it
cv := rule.Default()
if cv != nil {
m[key] = cv
}
}
}
}
if pErrors.HasErrors() {
return nil, pErrors
}
return &m, pErrors
} | go | {
"resource": ""
} |
q15381 | addRulesToConfigPolicyNode | train | func addRulesToConfigPolicyNode(rules map[string]interface{}, cpn *ConfigPolicyNode) error {
for k, rule := range rules {
if rule, ok := rule.(map[string]interface{}); ok {
req, _ := rule["required"].(bool)
switch rule["type"] {
case "integer":
r, _ := NewIntegerRule(k, req)
if d, ok := rule["default"]; ok {
// json encoding an int results in a float when decoding
def_, _ := d.(float64)
def := int(def_)
r.default_ = &def
}
if m, ok := rule["minimum"]; ok {
min_, _ := m.(float64)
min := int(min_)
r.minimum = &min
}
if m, ok := rule["maximum"]; ok {
max_, _ := m.(float64)
max := int(max_)
r.maximum = &max
}
cpn.Add(r)
case "string":
r, _ := NewStringRule(k, req)
if d, ok := rule["default"]; ok {
def, _ := d.(string)
if def != "" {
r.default_ = &def
}
}
cpn.Add(r)
case "bool":
r, _ := NewBoolRule(k, req)
if d, ok := rule["default"]; ok {
def, _ := d.(bool)
r.default_ = &def
}
cpn.Add(r)
case "float":
r, _ := NewFloatRule(k, req)
if d, ok := rule["default"]; ok {
def, _ := d.(float64)
r.default_ = &def
}
if m, ok := rule["minimum"]; ok {
min, _ := m.(float64)
r.minimum = &min
}
if m, ok := rule["maximum"]; ok {
max, _ := m.(float64)
r.maximum = &max
}
cpn.Add(r)
default:
return errors.New("unknown type")
}
}
}
return nil
} | go | {
"resource": ""
} |
q15382 | SetCertPath | train | func (a Arg) SetCertPath(certPath string) Arg {
a.CertPath = certPath
return a
} | go | {
"resource": ""
} |
q15383 | SetKeyPath | train | func (a Arg) SetKeyPath(keyPath string) Arg {
a.KeyPath = keyPath
return a
} | go | {
"resource": ""
} |
q15384 | SetTLSEnabled | train | func (a Arg) SetTLSEnabled(tlsEnabled bool) Arg {
a.TLSEnabled = tlsEnabled
return a
} | go | {
"resource": ""
} |
q15385 | SetCACertPaths | train | func (a Arg) SetCACertPaths(caCertPaths string) Arg {
a.CACertPaths = caCertPaths
return a
} | go | {
"resource": ""
} |
q15386 | NewArg | train | func NewArg(logLevel int, pprof bool) Arg {
return Arg{
LogLevel: log.Level(logLevel),
PingTimeoutDuration: PingTimeoutDurationDefault,
Pprof: pprof,
}
} | go | {
"resource": ""
} |
q15387 | NewWindowedSchedule | train | func NewWindowedSchedule(i time.Duration, start *time.Time, stop *time.Time, count uint) *WindowedSchedule {
// if stop and count were both defined, ignore the `count`
if count != 0 && stop != nil {
count = 0
// log about ignoring the `count`
logger.WithFields(log.Fields{
"_block": "NewWindowedSchedule",
}).Warning("The window stop timestamp and the count cannot be specified simultaneously. The parameter `count` has been ignored.")
}
return &WindowedSchedule{
Interval: i,
StartTime: start,
StopTime: stop,
Count: count,
}
} | go | {
"resource": ""
} |
q15388 | setStopOnTime | train | func (w *WindowedSchedule) setStopOnTime() {
if w.StopTime == nil && w.Count != 0 {
// determine the window stop based on the `count` and `interval`
var newStop time.Time
// if start is not set or points in the past,
// use the current time to calculate stopOnTime
if w.StartTime != nil && time.Now().Before(*w.StartTime) {
newStop = w.StartTime.Add(time.Duration(w.Count) * w.Interval)
} else {
// set a new stop timestamp from this point in time
newStop = time.Now().Add(time.Duration(w.Count) * w.Interval)
}
// set calculated new stop
w.stopOnTime = &newStop
return
}
// stopOnTime is determined by StopTime
w.stopOnTime = w.StopTime
} | go | {
"resource": ""
} |
q15389 | Validate | train | func (w *WindowedSchedule) Validate() error {
// if the stop time was set but it is in the past, return an error
if w.StopTime != nil && time.Now().After(*w.StopTime) {
return ErrInvalidStopTime
}
// if the start and stop time were both set and the stop time is before
// the start time, return an error
if w.StopTime != nil && w.StartTime != nil && w.StopTime.Before(*w.StartTime) {
return ErrStopBeforeStart
}
// if the interval is less than zero, return an error
if w.Interval <= 0 {
return ErrInvalidInterval
}
// the schedule passed validation, set as active
w.state = Active
return nil
} | go | {
"resource": ""
} |
q15390 | Wait | train | func (w *WindowedSchedule) Wait(last time.Time) Response {
// If within the window we wait our interval and return
// otherwise we exit with a completed state.
var m uint
if (last == time.Time{}) {
// the first waiting in cycles, so
// set the `stopOnTime` determining the right-window boundary
w.setStopOnTime()
}
// Do we even have a specific start time?
if w.StartTime != nil {
// Wait till it is time to start if before the window start
if time.Now().Before(*w.StartTime) {
wait := w.StartTime.Sub(time.Now())
logger.WithFields(log.Fields{
"_block": "windowed-wait",
"sleep-duration": wait,
}).Debug("Waiting for window to start")
time.Sleep(wait)
}
}
// Do we even have a stop time?
if w.stopOnTime != nil {
if time.Now().Before(*w.stopOnTime) {
logger.WithFields(log.Fields{
"_block": "windowed-wait",
"time-before-stop": w.stopOnTime.Sub(time.Now()),
}).Debug("Within window, calling interval")
m, _ = waitOnInterval(last, w.Interval)
// check if the schedule should be ended after waiting on interval
if time.Now().After(*w.stopOnTime) {
logger.WithFields(log.Fields{
"_block": "windowed-wait",
}).Debug("schedule has ended")
w.state = Ended
}
} else {
logger.WithFields(log.Fields{
"_block": "windowed-wait",
}).Debug("schedule has ended")
w.state = Ended
m = 0
}
} else {
// This has no end like a simple schedule
m, _ = waitOnInterval(last, w.Interval)
}
return &WindowedScheduleResponse{
state: w.GetState(),
missed: m,
lastTime: time.Now(),
}
} | go | {
"resource": ""
} |
q15391 | GetIP | train | func GetIP() string {
ifaces, err := net.Interfaces()
if err != nil {
return "127.0.0.1"
}
for _, i := range ifaces {
addrs, err := i.Addrs()
if err != nil {
return "127.0.0.1"
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPAddr:
ip = v.IP
case *net.IPNet:
ip = v.IP
}
if ip == nil || ip.IsLoopback() {
continue
}
ip = ip.To4()
if ip == nil {
continue // not an ipv4 address
}
return ip.String()
}
}
return "127.0.0.1"
} | go | {
"resource": ""
} |
q15392 | add | train | func (l *loadedPlugins) add(lp *loadedPlugin) serror.SnapError {
l.Lock()
defer l.Unlock()
if _, exists := l.table[lp.Key()]; exists {
return serror.New(ErrPluginAlreadyLoaded, map[string]interface{}{
"plugin-name": lp.Meta.Name,
"plugin-version": lp.Meta.Version,
"plugin-type": lp.Type.String(),
})
}
l.table[lp.Key()] = lp
return nil
} | go | {
"resource": ""
} |
q15393 | get | train | func (l *loadedPlugins) get(key string) (*loadedPlugin, error) {
l.RLock()
defer l.RUnlock()
lp, ok := l.table[key]
if !ok {
tnv := strings.Split(key, core.Separator)
if len(tnv) != 3 {
return nil, ErrBadKey
}
v, err := strconv.Atoi(tnv[2])
if err != nil {
return nil, ErrBadKey
}
if v < 1 {
pmLogger.Info("finding latest plugin")
return l.findLatest(tnv[0], tnv[1])
}
return nil, ErrPluginNotFound
}
return lp, nil
} | go | {
"resource": ""
} |
q15394 | Key | train | func (lp *loadedPlugin) Key() string {
return fmt.Sprintf("%s"+core.Separator+"%s"+core.Separator+"%d", lp.TypeName(), lp.Name(), lp.Version())
} | go | {
"resource": ""
} |
q15395 | OptEnableManagerTLS | train | func OptEnableManagerTLS(grpcSecurity client.GRPCSecurity) pluginManagerOpt {
return func(p *pluginManager) {
p.grpcSecurity = grpcSecurity
}
} | go | {
"resource": ""
} |
q15396 | OptSetPluginTags | train | func OptSetPluginTags(tags map[string]map[string]string) pluginManagerOpt {
return func(p *pluginManager) {
p.pluginTags = tags
}
} | go | {
"resource": ""
} |
q15397 | SetPluginTags | train | func (p *pluginManager) SetPluginTags(tags map[string]map[string]string) {
p.pluginTags = tags
} | go | {
"resource": ""
} |
q15398 | GenerateArgs | train | func (p *pluginManager) GenerateArgs(logLevel int) plugin.Arg {
return plugin.NewArg(logLevel, p.pprof)
} | go | {
"resource": ""
} |
q15399 | New | train | func New(cfg *Config) (*Server, error) {
// pull a few parameters from the configuration passed in by snapteld
s := &Server{
err: make(chan error),
killChan: make(chan struct{}),
addrString: cfg.Address,
pprof: cfg.Pprof,
}
if cfg.HTTPS {
var err error
s.snapTLS, err = newtls(cfg.RestCertificate, cfg.RestKey)
if err != nil {
return nil, err
}
protocolPrefix = "https"
}
restLogger.Info(fmt.Sprintf("Configuring REST API with HTTPS set to: %v", cfg.HTTPS))
s.apis = []api.API{
v1.New(&s.wg, s.killChan, protocolPrefix),
v2.New(&s.wg, s.killChan, protocolPrefix),
}
s.n = negroni.New(
NewLogger(),
negroni.NewRecovery(),
negroni.HandlerFunc(s.authMiddleware),
)
s.r = httprouter.New()
// CORS has to be turned on explicitly in the global config.
// Otherwise, it defauts to the same origin.
origins, err := s.getAllowedOrigins(cfg.Corsd)
if err != nil {
return nil, err
}
if len(origins) > 0 {
c := cors.New(cors.Options{
AllowedOrigins: origins,
AllowedMethods: []string{allowedMethods},
AllowedHeaders: []string{allowedHeaders},
MaxAge: maxAge,
})
s.n.Use(c)
}
// Use negroni to handle routes
s.n.UseHandler(s.r)
return s, nil
} | go | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.