_id stringlengths 2 7 | title stringlengths 1 118 | partition stringclasses 3 values | text stringlengths 52 85.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q13000 | redirectHandler | train | func (cli *client) redirectHandler() httpcli.Opt {
return httpcli.HandleResponse(func(hres *http.Response, rc mesosclient.ResponseClass, err error) (mesos.Response, error) {
resp, err := cli.HandleResponse(hres, rc, err) // default response handler
if err == nil || !apierrors.CodeNotLeader.Matches(err) {
return resp, err
}
// TODO(jdef) for now, we're tightly coupled to the httpcli package's Response type
res, ok := resp.(*httpcli.Response)
if !ok {
if resp != nil {
resp.Close()
}
return nil, errNotHTTPCli
}
if debug {
log.Println("master changed?")
}
location, ok := buildNewEndpoint(res.Header.Get("Location"), cli.Endpoint())
if !ok {
return nil, errBadLocation
}
res.Close()
return nil, &mesosRedirectionError{location}
})
} | go | {
"resource": ""
} |
q13001 | Filters | train | func Filters(fo ...mesos.FilterOpt) scheduler.CallOpt {
return func(c *scheduler.Call) {
switch c.Type {
case scheduler.Call_ACCEPT:
c.Accept.Filters = mesos.OptionalFilters(fo...)
case scheduler.Call_ACCEPT_INVERSE_OFFERS:
c.AcceptInverseOffers.Filters = mesos.OptionalFilters(fo...)
case scheduler.Call_DECLINE:
c.Decline.Filters = mesos.OptionalFilters(fo...)
case scheduler.Call_DECLINE_INVERSE_OFFERS:
c.DeclineInverseOffers.Filters = mesos.OptionalFilters(fo...)
default:
panic("filters not supported for type " + c.Type.String())
}
}
} | go | {
"resource": ""
} |
q13002 | RefuseSecondsWithJitter | train | func RefuseSecondsWithJitter(r *rand.Rand, d time.Duration) scheduler.CallOpt {
return Filters(func(f *mesos.Filters) {
s := time.Duration(r.Int63n(int64(d))).Seconds()
f.RefuseSeconds = &s
})
} | go | {
"resource": ""
} |
q13003 | RefuseSeconds | train | func RefuseSeconds(d time.Duration) scheduler.CallOpt {
asFloat := d.Seconds()
return Filters(func(f *mesos.Filters) {
f.RefuseSeconds = &asFloat
})
} | go | {
"resource": ""
} |
q13004 | Subscribe | train | func Subscribe(info *mesos.FrameworkInfo) *scheduler.Call {
return &scheduler.Call{
Type: scheduler.Call_SUBSCRIBE,
FrameworkID: info.GetID(),
Subscribe: &scheduler.Call_Subscribe{FrameworkInfo: info},
}
} | go | {
"resource": ""
} |
q13005 | Accept | train | func Accept(ops ...AcceptOpt) *scheduler.Call {
ab := &acceptBuilder{
offerIDs: make(map[mesos.OfferID]struct{}, len(ops)),
}
for _, op := range ops {
op(ab)
}
offerIDs := make([]mesos.OfferID, 0, len(ab.offerIDs))
for id := range ab.offerIDs {
offerIDs = append(offerIDs, id)
}
return &scheduler.Call{
Type: scheduler.Call_ACCEPT,
Accept: &scheduler.Call_Accept{
OfferIDs: offerIDs,
Operations: ab.operations,
},
}
} | go | {
"resource": ""
} |
q13006 | AcceptInverseOffers | train | func AcceptInverseOffers(offerIDs ...mesos.OfferID) *scheduler.Call {
return &scheduler.Call{
Type: scheduler.Call_ACCEPT_INVERSE_OFFERS,
AcceptInverseOffers: &scheduler.Call_AcceptInverseOffers{
InverseOfferIDs: offerIDs,
},
}
} | go | {
"resource": ""
} |
q13007 | DeclineInverseOffers | train | func DeclineInverseOffers(offerIDs ...mesos.OfferID) *scheduler.Call {
return &scheduler.Call{
Type: scheduler.Call_DECLINE_INVERSE_OFFERS,
DeclineInverseOffers: &scheduler.Call_DeclineInverseOffers{
InverseOfferIDs: offerIDs,
},
}
} | go | {
"resource": ""
} |
q13008 | OpLaunch | train | func OpLaunch(ti ...mesos.TaskInfo) mesos.Offer_Operation {
return mesos.Offer_Operation{
Type: mesos.Offer_Operation_LAUNCH,
Launch: &mesos.Offer_Operation_Launch{
TaskInfos: ti,
},
}
} | go | {
"resource": ""
} |
q13009 | ReviveWith | train | func ReviveWith(roles []string) *scheduler.Call {
return &scheduler.Call{
Type: scheduler.Call_REVIVE,
Revive: &scheduler.Call_Revive{Roles: roles},
}
} | go | {
"resource": ""
} |
q13010 | SuppressWith | train | func SuppressWith(roles []string) *scheduler.Call {
return &scheduler.Call{
Type: scheduler.Call_SUPPRESS,
Suppress: &scheduler.Call_Suppress{Roles: roles},
}
} | go | {
"resource": ""
} |
q13011 | Decline | train | func Decline(offerIDs ...mesos.OfferID) *scheduler.Call {
return &scheduler.Call{
Type: scheduler.Call_DECLINE,
Decline: &scheduler.Call_Decline{
OfferIDs: offerIDs,
},
}
} | go | {
"resource": ""
} |
q13012 | Kill | train | func Kill(taskID, agentID string) *scheduler.Call {
return &scheduler.Call{
Type: scheduler.Call_KILL,
Kill: &scheduler.Call_Kill{
TaskID: mesos.TaskID{Value: taskID},
AgentID: optionalAgentID(agentID),
},
}
} | go | {
"resource": ""
} |
q13013 | Shutdown | train | func Shutdown(executorID, agentID string) *scheduler.Call {
return &scheduler.Call{
Type: scheduler.Call_SHUTDOWN,
Shutdown: &scheduler.Call_Shutdown{
ExecutorID: mesos.ExecutorID{Value: executorID},
AgentID: mesos.AgentID{Value: agentID},
},
}
} | go | {
"resource": ""
} |
q13014 | Acknowledge | train | func Acknowledge(agentID, taskID string, uuid []byte) *scheduler.Call {
return &scheduler.Call{
Type: scheduler.Call_ACKNOWLEDGE,
Acknowledge: &scheduler.Call_Acknowledge{
AgentID: mesos.AgentID{Value: agentID},
TaskID: mesos.TaskID{Value: taskID},
UUID: uuid,
},
}
} | go | {
"resource": ""
} |
q13015 | Reconcile | train | func Reconcile(opts ...scheduler.ReconcileOpt) *scheduler.Call {
return &scheduler.Call{
Type: scheduler.Call_RECONCILE,
Reconcile: (&scheduler.Call_Reconcile{}).With(opts...),
}
} | go | {
"resource": ""
} |
q13016 | Message | train | func Message(agentID, executorID string, data []byte) *scheduler.Call {
return &scheduler.Call{
Type: scheduler.Call_MESSAGE,
Message: &scheduler.Call_Message{
AgentID: mesos.AgentID{Value: agentID},
ExecutorID: mesos.ExecutorID{Value: executorID},
Data: data,
},
}
} | go | {
"resource": ""
} |
q13017 | Request | train | func Request(requests ...mesos.Request) *scheduler.Call {
return &scheduler.Call{
Type: scheduler.Call_REQUEST,
Request: &scheduler.Call_Request{
Requests: requests,
},
}
} | go | {
"resource": ""
} |
q13018 | ReconcileOperations | train | func ReconcileOperations(req []ReconcileOperationRequest) *scheduler.Call {
var operations []scheduler.Call_ReconcileOperations_Operation
for i := range req {
operations = append(operations, scheduler.Call_ReconcileOperations_Operation{
OperationID: mesos.OperationID{Value: req[i].OperationID},
AgentID: optionalAgentID(req[i].AgentID),
ResourceProviderID: optionalResourceProviderID(req[i].ResourceProviderID),
})
}
return &scheduler.Call{
Type: scheduler.Call_RECONCILE_OPERATIONS,
ReconcileOperations: &scheduler.Call_ReconcileOperations{
Operations: operations,
},
}
} | go | {
"resource": ""
} |
q13019 | Accept | train | func (f FilterFunc) Accept(o *mesos.Offer) bool {
if f == nil {
return true
}
return f(o)
} | go | {
"resource": ""
} |
q13020 | ByHostname | train | func ByHostname(hostname string) Filter {
if hostname == "" {
return FilterFunc(nil)
}
return FilterFunc(func(o *mesos.Offer) bool {
return o.Hostname == hostname
})
} | go | {
"resource": ""
} |
q13021 | ByAttributes | train | func ByAttributes(f func(attr []mesos.Attribute) bool) Filter {
if f == nil {
return FilterFunc(nil)
}
return FilterFunc(func(o *mesos.Offer) bool {
return f(o.Attributes)
})
} | go | {
"resource": ""
} |
q13022 | ContainsResources | train | func ContainsResources(wanted mesos.Resources) Filter {
return FilterFunc(func(o *mesos.Offer) bool {
return resources.ContainsAll(resources.Flatten(mesos.Resources(o.Resources)), wanted)
})
} | go | {
"resource": ""
} |
q13023 | Error | train | func (code Code) Error(details string) error {
if !code.IsError() {
return nil
}
err := &Error{
code: code,
message: ErrorTable[code],
}
if details != "" {
err.message = err.message + ": " + details
}
return err
} | go | {
"resource": ""
} |
q13024 | Temporary | train | func (e *Error) Temporary() bool {
switch e.code {
// TODO(jdef): NotFound **could** be a temporary error because there's a race at mesos startup in which the
// HTTP server responds before the internal listeners have been initialized. But it could also be reported
// because the client is accessing an invalid endpoint; as of right now, a client cannot distinguish between
// these cases.
// https://issues.apache.org/jira/browse/MESOS-7697
case CodeRateLimitExceeded, CodeMesosUnavailable:
return true
default:
return false
}
} | go | {
"resource": ""
} |
q13025 | SubscriptionLoss | train | func (e *Error) SubscriptionLoss() (result bool) {
_, result = CodesIndicatingSubscriptionLoss[e.code]
return
} | go | {
"resource": ""
} |
q13026 | Matches | train | func (code Code) Matches(err error) bool {
if err == nil {
return !code.IsError()
}
apiErr, ok := err.(*Error)
return ok && apiErr.code == code
} | go | {
"resource": ""
} |
q13027 | context | train | func (driver *MesosSchedulerDriver) context() context.Context {
// set a "session" attribute so that the messenger can see it
// and use it for reporting delivery errors.
return sessionid.NewContext(context.TODO(), driver.connection.String())
} | go | {
"resource": ""
} |
q13028 | handleMasterChanged | train | func (driver *MesosSchedulerDriver) handleMasterChanged(_ context.Context, from *upid.UPID, pbMsg proto.Message) {
if driver.status == mesos.Status_DRIVER_ABORTED {
log.Info("Ignoring master change because the driver is aborted.")
return
} else if !from.Equal(driver.self) {
log.Errorf("ignoring master changed message received from upid '%v'", from)
return
}
// Reconnect every time a master is detected.
wasConnected := driver.connected
driver.connected = false
driver.authenticated = false
alertScheduler := false
if wasConnected {
log.V(3).Info("Disconnecting scheduler.")
driver.masterPid = nil
alertScheduler = true
}
msg := pbMsg.(*mesos.InternalMasterChangeDetected)
master := msg.Master
if master != nil {
log.Infof("New master %s detected\n", master.GetPid())
pid, err := upid.Parse(master.GetPid())
if err != nil {
panic("Unable to parse Master's PID value.") // this should not happen.
}
driver.masterPid = pid // save for downstream ops.
defer driver.tryAuthentication()
} else {
log.Infoln("No master detected.")
}
if alertScheduler {
driver.withScheduler(func(s Scheduler) { s.Disconnected(driver) })
}
} | go | {
"resource": ""
} |
q13029 | tryAuthentication | train | func (driver *MesosSchedulerDriver) tryAuthentication() {
if driver.authenticated {
// programming error
panic("already authenticated")
}
masterPid := driver.masterPid // save for referencing later in goroutine
if masterPid == nil {
log.Info("skipping authentication attempt because we lost the master")
return
}
if driver.authenticating.inProgress() {
// authentication is in progress, try to cancel it (we may too late already)
driver.authenticating.cancel()
driver.reauthenticate = true
return
}
if driver.credential != nil {
// authentication can block and we don't want to hold up the messenger loop
authenticating := &authenticationAttempt{done: make(chan struct{})}
go func() {
defer authenticating.cancel()
result := &mesos.InternalAuthenticationResult{
//TODO(jdef): is this really needed?
Success: proto.Bool(false),
Completed: proto.Bool(false),
Pid: proto.String(masterPid.String()),
}
// don't reference driver.authenticating here since it may have changed
if err := driver.authenticate(masterPid, authenticating); err != nil {
log.Errorf("Scheduler failed to authenticate: %v\n", err)
if err == auth.AuthenticationFailed {
result.Completed = proto.Bool(true)
}
} else {
result.Completed = proto.Bool(true)
result.Success = proto.Bool(true)
}
pid := driver.messenger.UPID()
driver.messenger.Route(context.TODO(), &pid, result)
}()
driver.authenticating = authenticating
} else {
log.Infoln("No credentials were provided. " +
"Attempting to register scheduler without authentication.")
driver.authenticated = true
go driver.doReliableRegistration(float64(registrationBackoffFactor))
}
} | go | {
"resource": ""
} |
q13030 | Running | train | func (driver *MesosSchedulerDriver) Running() bool {
driver.eventLock.RLock()
defer driver.eventLock.RUnlock()
return driver.status == mesos.Status_DRIVER_RUNNING
} | go | {
"resource": ""
} |
q13031 | statusUpdated | train | func (driver *MesosSchedulerDriver) statusUpdated(ctx context.Context, from *upid.UPID, pbMsg proto.Message) {
msg := pbMsg.(*mesos.StatusUpdateMessage)
if driver.status != mesos.Status_DRIVER_RUNNING {
log.V(1).Infoln("Ignoring StatusUpdate message, the driver is not running!")
return
}
if !from.Equal(driver.self) {
if !driver.connected {
log.V(1).Infoln("Ignoring StatusUpdate message, the driver is not connected!")
return
}
if !driver.masterPid.Equal(from) {
log.Warningf("ignoring status message because it was sent from '%v' instead of leading master '%v'", from, driver.masterPid)
return
}
}
log.V(2).Infof("Received status update from %q status source %q", from.String(), msg.GetPid())
status := msg.Update.GetStatus()
// see https://github.com/apache/mesos/blob/master/src/sched/sched.cpp#L887
// If the update does not have a 'uuid', it does not need
// acknowledging. However, prior to 0.23.0, the update uuid
// was required and always set. We also don't want to ACK updates
// that were internally generated. In 0.24.0, we can rely on the
// update uuid check here, until then we must still check for
// this being sent from the driver (from == UPID()) or from
// the master (pid == UPID()).
// TODO(vinod): Get rid of this logic in 0.25.0 because master
// and slave correctly set task status in 0.24.0.
if clearUUID := len(msg.Update.Uuid) == 0 || from.Equal(driver.self) || msg.GetPid() == driver.self.String(); clearUUID {
status.Uuid = nil
} else {
status.Uuid = msg.Update.Uuid
}
if driver.status == mesos.Status_DRIVER_ABORTED {
log.V(1).Infoln("Not sending StatusUpdate ACK, the driver is aborted!")
} else {
// Send StatusUpdate Acknowledgement; see above for the rules.
// Only send ACK if udpate was not from this driver and spec'd a UUID; this is compat w/ 0.23+
ackRequired := len(msg.Update.Uuid) > 0 && !from.Equal(driver.self) && msg.GetPid() != driver.self.String()
if ackRequired {
ackMsg := &mesos.StatusUpdateAcknowledgementMessage{
SlaveId: msg.Update.SlaveId,
FrameworkId: driver.frameworkInfo.Id,
TaskId: msg.Update.Status.TaskId,
Uuid: msg.Update.Uuid,
}
log.V(2).Infof("Sending ACK for status update %+v to %q", *msg.Update, from.String())
if err := driver.send(ctx, driver.masterPid, ackMsg); err != nil {
log.Errorf("Failed to send StatusUpdate ACK message: %v", err)
}
} else {
log.V(2).Infof("Not sending ACK, update is not from slave %q", from.String())
}
}
driver.withScheduler(func(s Scheduler) { s.StatusUpdate(driver, status) })
} | go | {
"resource": ""
} |
q13032 | start | train | func (driver *MesosSchedulerDriver) start() (mesos.Status, error) {
select {
case <-driver.started:
return driver.status, errors.New("Unable to Start: driver has already been started once.")
default: // proceed
}
log.Infoln("Starting the scheduler driver...")
if driver.status != mesos.Status_DRIVER_NOT_STARTED {
return driver.status, fmt.Errorf("Unable to Start, expecting driver status %s, but is %s:", mesos.Status_DRIVER_NOT_STARTED, driver.status)
}
// Start the messenger.
if err := driver.messenger.Start(); err != nil {
log.Errorf("Scheduler failed to start the messenger: %v\n", err)
return driver.status, err
}
pid := driver.messenger.UPID()
driver.self = &pid
driver.status = mesos.Status_DRIVER_RUNNING
close(driver.started)
log.Infof("Mesos scheduler driver started with PID=%v", driver.self)
listener := detector.OnMasterChanged(func(m *mesos.MasterInfo) {
driver.messenger.Route(context.TODO(), driver.self, &mesos.InternalMasterChangeDetected{
Master: m,
})
})
if driver.masterDetector != nil {
// register with Detect() AFTER we have a self pid from the messenger, otherwise things get ugly
// because our internal messaging depends on it. detector callbacks are routed over the messenger
// bus, maintaining serial (concurrency-safe) callback execution.
log.V(1).Infof("starting master detector %T: %+v", driver.masterDetector, driver.masterDetector)
driver.masterDetector.Detect(listener)
log.V(2).Infoln("master detector started")
}
return driver.status, nil
} | go | {
"resource": ""
} |
q13033 | join | train | func (driver *MesosSchedulerDriver) join() (stat mesos.Status, err error) {
if stat = driver.status; stat != mesos.Status_DRIVER_RUNNING {
err = fmt.Errorf("Unable to Join, expecting driver status %s, but is %s", mesos.Status_DRIVER_RUNNING, stat)
return
}
timeout := 1 * time.Second
t := time.NewTimer(timeout)
defer t.Stop()
driver.eventLock.Unlock()
defer func() {
driver.eventLock.Lock()
stat = driver.status
}()
waitForDeath:
for {
select {
case <-driver.done:
break waitForDeath
case <-t.C:
}
t.Reset(timeout)
}
return
} | go | {
"resource": ""
} |
q13034 | Run | train | func (driver *MesosSchedulerDriver) Run() (mesos.Status, error) {
driver.eventLock.Lock()
defer driver.eventLock.Unlock()
return driver.run(driver.context())
} | go | {
"resource": ""
} |
q13035 | run | train | func (driver *MesosSchedulerDriver) run(ctx context.Context) (mesos.Status, error) {
stat, err := driver.start()
if err != nil {
return driver.stop(ctx, err, false)
}
if stat != mesos.Status_DRIVER_RUNNING {
return stat, fmt.Errorf("Unable to Run, expecting driver status %s, but is %s:", mesos.Status_DRIVER_RUNNING, driver.status)
}
log.Infoln("Scheduler driver running. Waiting to be stopped.")
return driver.join()
} | go | {
"resource": ""
} |
q13036 | Stop | train | func (driver *MesosSchedulerDriver) Stop(failover bool) (mesos.Status, error) {
driver.eventLock.Lock()
defer driver.eventLock.Unlock()
return driver.stop(driver.context(), nil, failover)
} | go | {
"resource": ""
} |
q13037 | abort | train | func (driver *MesosSchedulerDriver) abort(ctx context.Context, cause error) (stat mesos.Status, err error) {
if driver.masterDetector != nil {
defer driver.masterDetector.Cancel()
}
log.Infof("Aborting framework [%+v]", driver.frameworkInfo.Id)
if driver.connected {
_, err = driver.stop(ctx, cause, true)
} else {
driver._stop(cause, mesos.Status_DRIVER_ABORTED)
}
stat = mesos.Status_DRIVER_ABORTED
driver.status = stat
return
} | go | {
"resource": ""
} |
q13038 | pushLostTask | train | func (driver *MesosSchedulerDriver) pushLostTask(ctx context.Context, taskInfo *mesos.TaskInfo, why string) {
msg := &mesos.StatusUpdateMessage{
Update: &mesos.StatusUpdate{
FrameworkId: driver.frameworkInfo.Id,
Status: &mesos.TaskStatus{
TaskId: taskInfo.TaskId,
State: mesos.TaskState_TASK_LOST.Enum(),
Source: mesos.TaskStatus_SOURCE_MASTER.Enum(),
Message: proto.String(why),
Reason: mesos.TaskStatus_REASON_MASTER_DISCONNECTED.Enum(),
},
SlaveId: taskInfo.SlaveId,
ExecutorId: taskInfo.Executor.ExecutorId,
Timestamp: proto.Float64(float64(time.Now().Unix())),
},
Pid: proto.String(driver.self.String()),
}
// put it on internal chanel
// will cause handler to push to attached Scheduler
driver.statusUpdated(ctx, driver.self, msg)
} | go | {
"resource": ""
} |
q13039 | fatal | train | func (driver *MesosSchedulerDriver) fatal(ctx context.Context, err string) {
if driver.status == mesos.Status_DRIVER_ABORTED {
log.V(3).Infoln("Ignoring error message, the driver is aborted!")
return
}
driver.abort(ctx, &ErrDriverAborted{Reason: err})
} | go | {
"resource": ""
} |
q13040 | LoginProviderFrom | train | func LoginProviderFrom(ctx context.Context) (name string, ok bool) {
name, ok = ctx.Value(loginProviderNameKey).(string)
return
} | go | {
"resource": ""
} |
q13041 | LoginProvider | train | func LoginProvider(ctx context.Context) string {
name, _ := LoginProviderFrom(ctx)
return name
} | go | {
"resource": ""
} |
q13042 | Registered | train | func (e *Executor) Registered(executor.ExecutorDriver, *mesosproto.ExecutorInfo, *mesosproto.FrameworkInfo, *mesosproto.SlaveInfo) {
e.Called()
} | go | {
"resource": ""
} |
q13043 | NewEncoder | train | func NewEncoder(s encoding.Sink) encoding.Encoder {
w := s()
return encoding.EncoderFunc(func(m encoding.Marshaler) error {
b, err := json.Marshal(m)
if err != nil {
return err
}
return w.WriteFrame(b)
})
} | go | {
"resource": ""
} |
q13044 | NewDecoder | train | func NewDecoder(s encoding.Source) encoding.Decoder {
r := s()
dec := framing.NewDecoder(r, json.Unmarshal)
return encoding.DecoderFunc(func(u encoding.Unmarshaler) error { return dec.Decode(u) })
} | go | {
"resource": ""
} |
q13045 | AttachContainerInput | train | func AttachContainerInput(cid mesos.ContainerID) *agent.Call {
return &agent.Call{
Type: agent.Call_ATTACH_CONTAINER_INPUT,
AttachContainerInput: &agent.Call_AttachContainerInput{
Type: agent.Call_AttachContainerInput_CONTAINER_ID,
ContainerID: &cid,
},
}
} | go | {
"resource": ""
} |
q13046 | NewSlaveHealthChecker | train | func NewSlaveHealthChecker(slaveUPID *upid.UPID, threshold int, checkDuration time.Duration, timeout time.Duration) *SlaveHealthChecker {
tr := &http.Transport{}
checker := &SlaveHealthChecker{
slaveUPID: slaveUPID,
client: &http.Client{Timeout: timeout, Transport: tr},
threshold: int32(threshold),
checkDuration: checkDuration,
stop: make(chan struct{}),
ch: make(chan time.Time, 1),
tr: tr,
}
if timeout == 0 {
checker.client.Timeout = defaultTimeout
}
if checkDuration == 0 {
checker.checkDuration = defaultCheckDuration
}
if threshold <= 0 {
checker.threshold = defaultThreshold
}
return checker
} | go | {
"resource": ""
} |
q13047 | Start | train | func (s *SlaveHealthChecker) Start() <-chan time.Time {
go func() {
t := time.NewTicker(s.checkDuration)
defer t.Stop()
for {
select {
case <-t.C:
select {
case <-s.stop:
return
default:
// continue
}
if paused, slavepid := func() (x bool, y upid.UPID) {
s.RLock()
defer s.RUnlock()
x = s.paused
if s.slaveUPID != nil {
y = *s.slaveUPID
}
return
}(); !paused {
s.doCheck(slavepid)
}
case <-s.stop:
return
}
}
}()
return s.ch
} | go | {
"resource": ""
} |
q13048 | Pause | train | func (s *SlaveHealthChecker) Pause() {
s.Lock()
defer s.Unlock()
s.paused = true
} | go | {
"resource": ""
} |
q13049 | Continue | train | func (s *SlaveHealthChecker) Continue(slaveUPID *upid.UPID) {
s.Lock()
defer s.Unlock()
s.paused = false
s.slaveUPID = slaveUPID
} | go | {
"resource": ""
} |
q13050 | AckStatusUpdatesF | train | func AckStatusUpdatesF(callerLookup func() calls.Caller) Rule {
return func(ctx context.Context, e *scheduler.Event, err error, chain Chain) (context.Context, *scheduler.Event, error) {
// aggressively attempt to ack updates: even if there's pre-existing error state attempt
// to acknowledge all status updates.
origErr := err
if e.GetType() == scheduler.Event_UPDATE {
var (
s = e.GetUpdate().GetStatus()
uuid = s.GetUUID()
)
// only ACK non-empty UUID's, as per mesos scheduler spec
if len(uuid) > 0 {
ack := calls.Acknowledge(
s.GetAgentID().GetValue(),
s.TaskID.Value,
uuid,
)
err = calls.CallNoData(ctx, callerLookup(), ack)
if err != nil {
// TODO(jdef): not sure how important this is; if there's an error ack'ing
// because we beacame disconnected, then we'll just reconnect later and
// Mesos will ask us to ACK anyway -- why pay special attention to these
// call failures vs others?
err = &calls.AckError{Ack: ack, Cause: err}
return ctx, e, Error2(origErr, err) // drop (do not propagate to chain)
}
}
}
return chain(ctx, e, origErr)
}
} | go | {
"resource": ""
} |
q13051 | DefaultEventLogger | train | func DefaultEventLogger(eventLabel string) func(*scheduler.Event) {
if eventLabel == "" {
return func(e *scheduler.Event) { log.Println(e) }
}
return func(e *scheduler.Event) { log.Println(eventLabel, e) }
} | go | {
"resource": ""
} |
q13052 | LogEvents | train | func LogEvents(f func(*scheduler.Event)) Rule {
if f == nil {
f = DefaultEventLogger(DefaultEventLabel)
}
return Rule(func(ctx context.Context, e *scheduler.Event, err error, chain Chain) (context.Context, *scheduler.Event, error) {
f(e)
return chain(ctx, e, err)
})
} | go | {
"resource": ""
} |
q13053 | AckOperationUpdatesF | train | func AckOperationUpdatesF(callerLookup func() calls.Caller) Rule {
return func(ctx context.Context, e *scheduler.Event, err error, chain Chain) (context.Context, *scheduler.Event, error) {
// aggressively attempt to ack updates: even if there's pre-existing error state attempt
// to acknowledge all offer operation status updates.
origErr := err
if e.GetType() == scheduler.Event_UPDATE_OPERATION_STATUS {
var (
s = e.GetUpdateOperationStatus().GetStatus()
uuid = s.GetUUID().GetValue()
)
// only ACK non-empty UUID's, as per mesos scheduler spec.
if len(uuid) > 0 {
// the fact that we're receiving this offer operation status update means that the
// framework supplied an operation_id to the master when executing the offer operation,
// therefore the operation_id included in the status object here should be non-empty.
opID := s.GetOperationID().GetValue()
if opID == "" {
panic("expected non-empty offer operation ID for offer operation status update")
}
// try to extract a resource provider ID; we can safely assume that all converted resources
// are for the same provider ID (including a non-specified one).
rpID := ""
conv := s.GetConvertedResources()
for i := range conv {
id := conv[i].GetProviderID().GetValue()
if id != "" {
rpID = id
break
}
}
ack := calls.AcknowledgeOperationStatus(
"", // agentID: optional
rpID, // optional
uuid,
opID,
)
err = calls.CallNoData(ctx, callerLookup(), ack)
if err != nil {
// TODO(jdef): not sure how important this is; if there's an error ack'ing
// because we became disconnected, then we'll just reconnect later and
// Mesos will ask us to ACK anyway -- why pay special attention to these
// call failures vs others?
err = &calls.AckError{Ack: ack, Cause: err}
return ctx, e, Error2(origErr, err) // drop (do not propagate to chain)
}
}
}
return chain(ctx, e, origErr)
}
} | go | {
"resource": ""
} |
q13054 | WithSubscriptionTerminated | train | func WithSubscriptionTerminated(handler func(error)) Option {
return func(c *Config) Option {
old := c.subscriptionTerminated
c.subscriptionTerminated = handler
return WithSubscriptionTerminated(old)
}
} | go | {
"resource": ""
} |
q13055 | Run | train | func Run(ctx context.Context, framework *mesos.FrameworkInfo, caller calls.Caller, options ...Option) (lastErr error) {
var config Config
for _, opt := range options {
if opt != nil {
opt(&config)
}
}
if config.handler == nil {
config.handler = DefaultHandler
}
subscribe := calls.Subscribe(framework)
subscribe.Subscribe.SuppressedRoles = config.initSuppressRoles
for !isDone(ctx) {
frameworkID := config.tryFrameworkID()
if framework.GetFailoverTimeout() > 0 && frameworkID != "" {
subscribe.With(calls.SubscribeTo(frameworkID))
}
if config.registrationTokens != nil {
select {
case _, ok := <-config.registrationTokens:
if !ok {
// re-registration canceled, exit Run loop
return
}
case <-ctx.Done():
return ctx.Err()
}
}
resp, err := caller.Call(ctx, subscribe)
lastErr = processSubscription(ctx, config, resp, err)
if config.subscriptionTerminated != nil {
config.subscriptionTerminated(lastErr)
}
}
return
} | go | {
"resource": ""
} |
q13056 | Send | train | func (f SenderFunc) Send(ctx context.Context, r Request) (mesos.Response, error) {
return f(ctx, r)
} | go | {
"resource": ""
} |
q13057 | IgnoreResponse | train | func IgnoreResponse(s Sender) SenderFunc {
return func(ctx context.Context, r Request) (mesos.Response, error) {
resp, err := s.Send(ctx, r)
if resp != nil {
resp.Close()
}
return nil, err
}
} | go | {
"resource": ""
} |
q13058 | SendNoData | train | func SendNoData(ctx context.Context, sender Sender, r Request) (err error) {
_, err = IgnoreResponse(sender).Send(ctx, r)
return
} | go | {
"resource": ""
} |
q13059 | NewMessenger | train | func NewMessenger() *Messenger {
return &Messenger{
messageQueue: make(chan *message, 1),
handlers: make(map[string]messenger.MessageHandler),
stop: make(chan struct{}),
}
} | go | {
"resource": ""
} |
q13060 | Install | train | func (m *Messenger) Install(handler messenger.MessageHandler, msg proto.Message) error {
m.handlers[reflect.TypeOf(msg).Elem().Name()] = handler
return m.Called().Error(0)
} | go | {
"resource": ""
} |
q13061 | Send | train | func (m *Messenger) Send(ctx context.Context, upid *upid.UPID, msg proto.Message) error {
return m.Called().Error(0)
} | go | {
"resource": ""
} |
q13062 | Stop | train | func (m *Messenger) Stop() error {
// don't close an already-closed channel
select {
case <-m.stop:
// noop
default:
close(m.stop)
}
return m.Called().Error(0)
} | go | {
"resource": ""
} |
q13063 | UPID | train | func (m *Messenger) UPID() upid.UPID {
return m.Called().Get(0).(upid.UPID)
} | go | {
"resource": ""
} |
q13064 | Recv | train | func (m *Messenger) Recv(from *upid.UPID, msg proto.Message) {
m.messageQueue <- &message{from, msg}
} | go | {
"resource": ""
} |
q13065 | Call | train | func (r Rule) Call(ctx context.Context, c *executor.Call) (mesos.Response, error) {
if r == nil {
return nil, nil
}
_, _, resp, err := r(ctx, c, nil, nil, ChainIdentity)
return resp, err
} | go | {
"resource": ""
} |
q13066 | Call | train | func (rs Rules) Call(ctx context.Context, c *executor.Call) (mesos.Response, error) {
return Rule(rs.Eval).Call(ctx, c)
} | go | {
"resource": ""
} |
q13067 | Eval | train | func (r Rule) Eval(ctx context.Context, e *scheduler.Event, err error, ch Chain) (context.Context, *scheduler.Event, error) {
if r != nil {
return r(ctx, e, err, ch)
}
return ch(ctx, e, err)
} | go | {
"resource": ""
} |
q13068 | IsStrictSubroleOf | train | func IsStrictSubroleOf(left, right string) bool {
return len(left) > len(right) && left[len(right)] == '/' && strings.HasPrefix(left, right)
} | go | {
"resource": ""
} |
q13069 | Framework | train | func Framework(id string) executor.CallOpt {
return func(c *executor.Call) {
c.FrameworkID = mesos.FrameworkID{Value: id}
}
} | go | {
"resource": ""
} |
q13070 | Executor | train | func Executor(id string) executor.CallOpt {
return func(c *executor.Call) {
c.ExecutorID = mesos.ExecutorID{Value: id}
}
} | go | {
"resource": ""
} |
q13071 | Subscribe | train | func Subscribe(unackdTasks []mesos.TaskInfo, unackdUpdates []executor.Call_Update) *executor.Call {
return &executor.Call{
Type: executor.Call_SUBSCRIBE,
Subscribe: &executor.Call_Subscribe{
UnacknowledgedTasks: unackdTasks,
UnacknowledgedUpdates: unackdUpdates,
},
}
} | go | {
"resource": ""
} |
q13072 | Update | train | func Update(status mesos.TaskStatus) *executor.Call {
return &executor.Call{
Type: executor.Call_UPDATE,
Update: &executor.Call_Update{
Status: status,
},
}
} | go | {
"resource": ""
} |
q13073 | Message | train | func Message(data []byte) *executor.Call {
return &executor.Call{
Type: executor.Call_MESSAGE,
Message: &executor.Call_Message{
Data: data,
},
}
} | go | {
"resource": ""
} |
q13074 | Equivalent | train | func (left Label) Equivalent(right Label) bool {
if left.Key != right.Key {
return false
}
if left.Value == nil {
return right.Value == nil
} else {
return right.Value != nil && *left.Value == *right.Value
}
} | go | {
"resource": ""
} |
q13075 | NewDecoder | train | func NewDecoder(r Reader, uf UnmarshalFunc) DecoderFunc {
return func(m interface{}) error {
// Note: the buf returned by ReadFrame will change over time, it can't be sub-sliced
// and then those sub-slices retained. Examination of generated proto code seems to indicate
// that byte buffers are copied vs. referenced by sub-slice (gogo protoc).
frame, err := r.ReadFrame()
if err != nil {
return err
}
return uf(frame, m)
}
} | go | {
"resource": ""
} |
q13076 | With | train | func (c *Call) With(opts ...CallOpt) *Call {
for _, opt := range opts {
opt(c)
}
return c
} | go | {
"resource": ""
} |
q13077 | IDs | train | func (offers Slice) IDs() []mesos.OfferID {
ids := make([]mesos.OfferID, len(offers))
for i := range offers {
ids[i] = offers[i].ID
}
return ids
} | go | {
"resource": ""
} |
q13078 | IDs | train | func (offers Index) IDs() []mesos.OfferID {
ids := make([]mesos.OfferID, 0, len(offers))
for _, offer := range offers {
ids = append(ids, offer.GetID())
}
return ids
} | go | {
"resource": ""
} |
q13079 | Filter | train | func (offers Slice) Filter(filter Filter) (result Slice) {
if sz := len(result); sz > 0 {
result = make(Slice, 0, sz)
for i := range offers {
if filter.Accept(&offers[i]) {
result = append(result, offers[i])
}
}
}
return
} | go | {
"resource": ""
} |
q13080 | Filter | train | func (offers Index) Filter(filter Filter) (result Index) {
if sz := len(result); sz > 0 {
result = make(Index, sz)
for id, offer := range offers {
if filter.Accept(offer) {
result[id] = offer
}
}
}
return
} | go | {
"resource": ""
} |
q13081 | ToSlice | train | func (offers Index) ToSlice() (slice Slice) {
if sz := len(offers); sz > 0 {
slice = make(Slice, 0, sz)
for _, offer := range offers {
slice = append(slice, *offer)
}
}
return
} | go | {
"resource": ""
} |
q13082 | Push | train | func Push(r RequestStreaming, c ...*master.Call) RequestStreamingFunc {
return func() *master.Call {
if len(c) == 0 {
return r.Call()
}
head := c[0]
c = c[1:]
return head
}
} | go | {
"resource": ""
} |
q13083 | SenderWith | train | func SenderWith(s Sender, opts ...executor.CallOpt) SenderFunc {
if len(opts) == 0 {
return s.Send
}
return func(ctx context.Context, r Request) (mesos.Response, error) {
f := func() (c *executor.Call) {
if c = r.Call(); c != nil {
c = c.With(opts...)
}
return
}
switch r.(type) {
case RequestStreaming:
return s.Send(ctx, RequestStreamingFunc(f))
default:
return s.Send(ctx, RequestFunc(f))
}
}
} | go | {
"resource": ""
} |
q13084 | RequestURI | train | func (m *Message) RequestURI() string {
if m.isV1API() {
return fmt.Sprintf("/api/v1/%s", m.Name)
}
return fmt.Sprintf("/%s/%s", m.UPID.ID, m.Name)
} | go | {
"resource": ""
} |
q13085 | AddressList | train | func (h *Header) AddressList(key string) ([]*Address, error) {
v := h.Get(key)
if v == "" {
return nil, nil
}
return parseAddressList(v)
} | go | {
"resource": ""
} |
q13086 | SetAddressList | train | func (h *Header) SetAddressList(key string, addrs []*Address) {
h.Set(key, formatAddressList(addrs))
} | go | {
"resource": ""
} |
q13087 | Date | train | func (h *Header) Date() (time.Time, error) {
return mail.ParseDate(h.Get("Date"))
} | go | {
"resource": ""
} |
q13088 | SetDate | train | func (h *Header) SetDate(t time.Time) {
h.Set("Date", t.Format(dateLayout))
} | go | {
"resource": ""
} |
q13089 | Filename | train | func (h *AttachmentHeader) Filename() (string, error) {
_, params, err := h.ContentDisposition()
filename, ok := params["filename"]
if !ok {
// Using "name" in Content-Type is discouraged
_, params, err = h.ContentType()
filename = params["name"]
}
return filename, err
} | go | {
"resource": ""
} |
q13090 | SetFilename | train | func (h *AttachmentHeader) SetFilename(filename string) {
dispParams := map[string]string{"filename": filename}
h.SetContentDisposition("attachment", dispParams)
} | go | {
"resource": ""
} |
q13091 | headerToMap | train | func headerToMap(h textproto.Header) stdtextproto.MIMEHeader {
m := make(stdtextproto.MIMEHeader)
fields := h.Fields()
for fields.Next() {
m.Add(fields.Key(), fields.Value())
}
return m
} | go | {
"resource": ""
} |
q13092 | SetContentType | train | func (h *Header) SetContentType(t string, params map[string]string) {
h.Set("Content-Type", formatHeaderWithParams(t, params))
} | go | {
"resource": ""
} |
q13093 | ContentDisposition | train | func (h *Header) ContentDisposition() (disp string, params map[string]string, err error) {
return parseHeaderWithParams(h.Get("Content-Disposition"))
} | go | {
"resource": ""
} |
q13094 | SetContentDisposition | train | func (h *Header) SetContentDisposition(disp string, params map[string]string) {
h.Set("Content-Disposition", formatHeaderWithParams(disp, params))
} | go | {
"resource": ""
} |
q13095 | Text | train | func (h *Header) Text(k string) (string, error) {
return decodeHeader(h.Get(k))
} | go | {
"resource": ""
} |
q13096 | SetText | train | func (h *Header) SetText(k, v string) {
h.Set(k, encodeHeader(v))
} | go | {
"resource": ""
} |
q13097 | New | train | func New(header Header, body io.Reader) (*Entity, error) {
var err error
enc := header.Get("Content-Transfer-Encoding")
if decoded, encErr := encodingReader(enc, body); encErr != nil {
err = unknownEncodingError{encErr}
} else {
body = decoded
}
mediaType, mediaParams, _ := header.ContentType()
if ch, ok := mediaParams["charset"]; ok {
if converted, charsetErr := charsetReader(ch, body); charsetErr != nil {
err = unknownCharsetError{charsetErr}
} else {
body = converted
}
}
return &Entity{
Header: header,
Body: body,
mediaType: mediaType,
mediaParams: mediaParams,
}, err
} | go | {
"resource": ""
} |
q13098 | Read | train | func Read(r io.Reader) (*Entity, error) {
br := bufio.NewReader(r)
h, err := textproto.ReadHeader(br)
if err != nil {
return nil, err
}
return New(Header{h}, br)
} | go | {
"resource": ""
} |
q13099 | MultipartReader | train | func (e *Entity) MultipartReader() MultipartReader {
if !strings.HasPrefix(e.mediaType, "multipart/") {
return nil
}
if mb, ok := e.Body.(*multipartBody); ok {
return mb
}
return &multipartReader{multipart.NewReader(e.Body, e.mediaParams["boundary"])}
} | go | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.