_id stringlengths 2 7 | title stringlengths 1 118 | partition stringclasses 3 values | text stringlengths 52 85.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q12900 | Timeout | train | func Timeout(d time.Duration) ConfigOpt {
return func(c *Config) {
c.transport.ResponseHeaderTimeout = d
c.transport.TLSHandshakeTimeout = d
c.dialer.Timeout = d
}
} | go | {
"resource": ""
} |
q12901 | RoundTripper | train | func RoundTripper(rt http.RoundTripper) ConfigOpt {
return func(c *Config) {
c.client.Transport = rt
}
} | go | {
"resource": ""
} |
q12902 | TLSConfig | train | func TLSConfig(tc *tls.Config) ConfigOpt {
return func(c *Config) {
c.transport.TLSClientConfig = tc
}
} | go | {
"resource": ""
} |
q12903 | Transport | train | func Transport(modifyTransport func(*http.Transport)) ConfigOpt {
return func(c *Config) {
if modifyTransport != nil {
modifyTransport(c.transport)
}
}
} | go | {
"resource": ""
} |
q12904 | WrapRoundTripper | train | func WrapRoundTripper(f func(http.RoundTripper) http.RoundTripper) ConfigOpt {
return func(c *Config) {
if f != nil {
if rt := f(c.client.Transport); rt != nil {
c.client.Transport = rt
}
}
}
} | go | {
"resource": ""
} |
q12905 | Minus | train | func (resources Resources) Minus(that ...Resource) Resources {
x := resources.Clone()
return x.Subtract(that...)
} | go | {
"resource": ""
} |
q12906 | Plus | train | func (resources Resources) Plus(that ...Resource) Resources {
x := resources.Clone()
return x.Add(that...)
} | go | {
"resource": ""
} |
q12907 | Minus1 | train | func (resources *Resources) Minus1(that Resource) Resources {
x := resources.Clone()
return x.Subtract1(that)
} | go | {
"resource": ""
} |
q12908 | Addable | train | func (left *Resource) Addable(right Resource) bool {
if left == nil {
return true
}
if left.GetName() != right.GetName() ||
left.GetType() != right.GetType() {
return false
}
if !IsCapabilityReservationRefinementEnabled() && left.GetRole() != right.GetRole() {
return false
}
if a, b := left.GetShared(), right.GetShared(); (a == nil) != (b == nil) {
// shared has no fields
return false
} else if a != nil {
// For shared resources, they can be added only if left == right.
return left.Equivalent(right)
}
if a, b := left.GetAllocationInfo(), right.GetAllocationInfo(); !a.Equivalent(b) {
return false
}
if !left.GetReservation().Equivalent(right.GetReservation()) {
return false
}
if a, b := left.Reservations, right.Reservations; len(a) != len(b) {
return false
} else {
for i := range a {
aa := &a[i]
if !aa.Equivalent(&b[i]) {
return false
}
}
}
if !left.GetDisk().Equivalent(right.GetDisk()) {
return false
}
if ls := left.GetDisk().GetSource(); ls != nil {
switch ls.GetType() {
case Resource_DiskInfo_Source_PATH:
// Two PATH resources can be added if their disks are identical
case Resource_DiskInfo_Source_BLOCK,
Resource_DiskInfo_Source_MOUNT:
// Two resources that represent exclusive 'MOUNT' or 'RAW' disks
// cannot be added together; this would defeat the exclusivity.
return false
case Resource_DiskInfo_Source_RAW:
// We can only add resources representing 'RAW' disks if
// they have no identity.
if ls.GetID() != "" {
return false
}
case Resource_DiskInfo_Source_UNKNOWN:
panic("unreachable")
}
}
// from apache/mesos: src/common/resources.cpp
// TODO(jieyu): Even if two Resource objects with DiskInfo have the
// same persistence ID, they cannot be added together. In fact, this
// shouldn't happen if we do not add resources from different
// namespaces (e.g., across slave). Consider adding a warning.
if left.GetDisk().GetPersistence() != nil {
return false
}
if (left.GetRevocable() == nil) != (right.GetRevocable() == nil) {
return false
}
if a, b := left.GetProviderID(), right.GetProviderID(); (a == nil) != (b == nil) {
return false
} else if a != nil && a.Value != b.Value {
return false
}
return true
} | go | {
"resource": ""
} |
q12909 | Contains | train | func (left Resource) Contains(right Resource) bool {
if !left.Subtractable(right) {
return false
}
switch left.GetType() {
case SCALAR:
return right.GetScalar().Compare(left.GetScalar()) <= 0
case RANGES:
return right.GetRanges().Compare(left.GetRanges()) <= 0
case SET:
return right.GetSet().Compare(left.GetSet()) <= 0
default:
return false
}
} | go | {
"resource": ""
} |
q12910 | Subtract | train | func (left *Resource) Subtract(right Resource) {
switch right.checkType(left.GetType()) {
case SCALAR:
left.Scalar = left.GetScalar().Subtract(right.GetScalar())
case RANGES:
left.Ranges = left.GetRanges().Subtract(right.GetRanges())
case SET:
left.Set = left.GetSet().Subtract(right.GetSet())
}
} | go | {
"resource": ""
} |
q12911 | checkType | train | func (left *Resource) checkType(t Value_Type) Value_Type {
if left != nil && left.GetType() != t {
panic(fmt.Sprintf("expected type %v instead of %v", t, left.GetType()))
}
return t
} | go | {
"resource": ""
} |
q12912 | IsEmpty | train | func (left *Resource) IsEmpty() bool {
if left == nil {
return true
}
switch left.GetType() {
case SCALAR:
return left.GetScalar().GetValue() == 0
case RANGES:
return len(left.GetRanges().GetRange()) == 0
case SET:
return len(left.GetSet().GetItem()) == 0
}
return false
} | go | {
"resource": ""
} |
q12913 | IsUnreserved | train | func (left *Resource) IsUnreserved() bool {
// role != RoleDefault -> static reservation
// GetReservation() != nil -> dynamic reservation
// return {no-static-reservation} && {no-dynamic-reservation}
return (left.Role == nil || left.GetRole() == "*") && left.GetReservation() == nil && len(left.GetReservations()) == 0
} | go | {
"resource": ""
} |
q12914 | ReservationRole | train | func (r *Resource) ReservationRole() string {
// if using reservation refinement, return the role of the last refinement
rs := r.GetReservations()
if x := len(rs); x > 0 {
return rs[x-1].GetRole()
}
// if using the old reservation API, role is a first class field of Resource
// (and it's never stored in Resource.Reservation).
return r.GetRole()
} | go | {
"resource": ""
} |
q12915 | IsAllocatableTo | train | func (left *Resource) IsAllocatableTo(role string) bool {
if left.IsUnreserved() {
return true
}
r := left.ReservationRole()
return role == r || roles.IsStrictSubroleOf(role, r)
} | go | {
"resource": ""
} |
q12916 | IsDynamicallyReserved | train | func (left *Resource) IsDynamicallyReserved() bool {
if left.IsReserved("") {
if left.GetReservation() != nil {
return true
}
rs := left.GetReservations()
return rs[len(rs)-1].GetType() == Resource_ReservationInfo_DYNAMIC
}
return false
} | go | {
"resource": ""
} |
q12917 | IsDisk | train | func (left *Resource) IsDisk(t Resource_DiskInfo_Source_Type) bool {
if s := left.GetDisk().GetSource(); s != nil {
return s.GetType() == t
}
return false
} | go | {
"resource": ""
} |
q12918 | PopReservation | train | func (rs Resources) PopReservation() (result Resources) {
pop_next:
for i := range rs {
r := &rs[i]
ls := len(r.Reservations)
if ls == 0 {
panic(fmt.Sprintf("no reservations exist for resource %q", r))
}
r = proto.Clone(r).(*Resource) // avoid modifying rs
r.Reservations[ls-1] = Resource_ReservationInfo{} // don't leak nested pointers
r.Reservations = r.Reservations[:ls-1] // shrink the slice
// unroll Add1 to avoid additional calls to Clone
rr := *r
for j := range result {
r2 := &result[j]
if r2.Addable(rr) {
r2.Add(rr)
continue pop_next
}
}
// cannot be combined with an existing resource
result = append(result, rr)
}
return
} | go | {
"resource": ""
} |
q12919 | Allocate | train | func (r *Resource) Allocate(role string) {
if role == "" {
panic(fmt.Sprintf("cannot allocate resource to an empty-string role: %q", r))
}
r.AllocationInfo = &Resource_AllocationInfo{Role: &role}
} | go | {
"resource": ""
} |
q12920 | Allocate | train | func (rs Resources) Allocate(role string) Resources {
if role == "" {
panic(fmt.Sprintf("cannot allocate resources to an empty-string role: %q", rs))
}
for i := range rs {
rs[i].AllocationInfo = &Resource_AllocationInfo{Role: &role}
}
return rs
} | go | {
"resource": ""
} |
q12921 | Unallocate | train | func (rs Resources) Unallocate() Resources {
for i := range rs {
rs[i].AllocationInfo = nil
}
return rs
} | go | {
"resource": ""
} |
q12922 | Merged | train | func (opts Opts) Merged() Opt {
if len(opts) == 0 {
return nil
}
return func(c *Client) Opt {
var (
size = len(opts)
undo = make(Opts, size)
)
size-- // make this a zero-based offset
for i, opt := range opts {
if opt != nil {
undo[size-i] = opt(c)
}
}
return undo.Merged()
}
} | go | {
"resource": ""
} |
q12923 | And | train | func (o Opt) And(other Opt) Opt {
if o == nil {
if other == nil {
return nil
}
return other
}
if other == nil {
return o
}
return Opts{o, other}.Merged()
} | go | {
"resource": ""
} |
q12924 | NewHTTPTransporter | train | func NewHTTPTransporter(upid upid.UPID, address net.IP, opts ...httpOpt) *HTTPTransporter {
transport := httpTransport
client := HttpClient
client.Transport = &transport
mux := http.NewServeMux()
result := &HTTPTransporter{
upid: upid,
messageQueue: make(chan *Message, defaultQueueSize),
mux: mux,
client: &client,
tr: &transport,
address: address,
shouldQuit: make(chan struct{}),
server: &http.Server{
ReadTimeout: ReadTimeout,
WriteTimeout: WriteTimeout,
Handler: mux,
},
}
for _, f := range opts {
f(result)
}
result.state = ¬StartedState{result}
return result
} | go | {
"resource": ""
} |
q12925 | Send | train | func (t *HTTPTransporter) Send(ctx context.Context, msg *Message) (sendError error) {
return t.getState().Send(ctx, msg)
} | go | {
"resource": ""
} |
q12926 | send | train | func (t *HTTPTransporter) send(ctx context.Context, msg *Message) (sendError error) {
log.V(2).Infof("Sending message to %v via http\n", msg.UPID)
req, err := t.makeLibprocessRequest(msg)
if err != nil {
return err
}
return t.httpDo(ctx, req, func(resp *http.Response, err error) error {
if err != nil {
log.V(1).Infof("Failed to POST: %v\n", err)
return &networkError{err}
}
defer resp.Body.Close()
// ensure master acknowledgement.
if (resp.StatusCode != http.StatusOK) && (resp.StatusCode != http.StatusAccepted) {
return &mesosError{
errorCode: resp.StatusCode,
upid: msg.UPID.String(),
uri: msg.RequestURI(),
status: resp.Status,
}
}
return nil
})
} | go | {
"resource": ""
} |
q12927 | Start | train | func (t *HTTPTransporter) Start() (upid.UPID, <-chan error) {
t.stateLock.Lock()
defer t.stateLock.Unlock()
return t.state.Start()
} | go | {
"resource": ""
} |
q12928 | start | train | func (t *HTTPTransporter) start() (upid.UPID, <-chan error) {
ch := make(chan error, 1)
if err := t.listen(); err != nil {
ch <- err
return upid.UPID{}, ch
}
// TODO(yifan): Set read/write deadline.
go func() {
err := t.server.Serve(t.listener)
select {
case <-t.shouldQuit:
log.V(1).Infof("HTTP server stopped because of shutdown")
ch <- nil
default:
if err != nil && log.V(1) {
log.Errorln("HTTP server stopped with error", err.Error())
} else {
log.V(1).Infof("HTTP server stopped")
}
ch <- err
t.Stop(false)
}
}()
return t.upid, ch
} | go | {
"resource": ""
} |
q12929 | Stop | train | func (t *HTTPTransporter) Stop(graceful bool) error {
t.stateLock.Lock()
defer t.stateLock.Unlock()
return t.state.Stop(graceful)
} | go | {
"resource": ""
} |
q12930 | stop | train | func (t *HTTPTransporter) stop(graceful bool) error {
close(t.shouldQuit)
log.Info("stopping HTTP transport")
//TODO(jdef) if graceful, wait for pending requests to terminate
err := t.listener.Close()
return err
} | go | {
"resource": ""
} |
q12931 | UPID | train | func (t *HTTPTransporter) UPID() upid.UPID {
t.stateLock.Lock()
defer t.stateLock.Unlock()
return t.upid
} | go | {
"resource": ""
} |
q12932 | NewDecoder | train | func NewDecoder(s encoding.Source) encoding.Decoder {
r := s()
var (
uf = func(b []byte, m interface{}) error { return proto.Unmarshal(b, m.(proto.Message)) }
dec = framing.NewDecoder(r, uf)
)
return encoding.DecoderFunc(func(u encoding.Unmarshaler) error { return dec.Decode(u) })
} | go | {
"resource": ""
} |
q12933 | Call | train | func (f CallerFunc) Call(ctx context.Context, c *scheduler.Call) (mesos.Response, error) {
return f(ctx, c)
} | go | {
"resource": ""
} |
q12934 | CallNoData | train | func CallNoData(ctx context.Context, caller Caller, call *scheduler.Call) error {
resp, err := caller.Call(ctx, call)
if resp != nil {
resp.Close()
}
return err
} | go | {
"resource": ""
} |
q12935 | Metrics | train | func Metrics(harness metrics.Harness, labeler Labeler) Rule {
if harness == nil {
panic("harness is a required parameter")
}
if labeler == nil {
labeler = defaultLabeler
}
return func(ctx context.Context, e *scheduler.Event, err error, ch Chain) (context.Context, *scheduler.Event, error) {
labels := labeler(ctx, e)
harness(func() error {
ctx, e, err = ch(ctx, e, err)
return err
}, labels...)
return ctx, e, err
}
} | go | {
"resource": ""
} |
q12936 | Handle | train | func Handle(h events.Handler) Rule {
if h == nil {
return nil
}
return func(ctx context.Context, e *scheduler.Event, err error, chain Chain) (context.Context, *scheduler.Event, error) {
newErr := h.HandleEvent(ctx, e)
return chain(ctx, e, Error2(err, newErr))
}
} | go | {
"resource": ""
} |
q12937 | putOffer | train | func (cache *schedCache) putOffer(offer *mesos.Offer, pid *upid.UPID) {
if offer == nil || pid == nil {
log.V(3).Infoln("WARN: Offer not cached. The offer or pid cannot be nil")
return
}
log.V(3).Infoln("Caching offer ", offer.Id.GetValue(), " with slavePID ", pid.String())
cache.lock.Lock()
cache.savedOffers[offer.Id.GetValue()] = &cachedOffer{offer: offer, slavePid: pid}
cache.lock.Unlock()
} | go | {
"resource": ""
} |
q12938 | getOffer | train | func (cache *schedCache) getOffer(offerId *mesos.OfferID) *cachedOffer {
if offerId == nil {
log.V(3).Infoln("WARN: OfferId == nil, returning nil")
return nil
}
cache.lock.RLock()
defer cache.lock.RUnlock()
return cache.savedOffers[offerId.GetValue()]
} | go | {
"resource": ""
} |
q12939 | SourceReader | train | func SourceReader(r io.Reader) Source {
ch := make(chan framing.ReaderFunc, 1)
ch <- framing.ReadAll(r)
return func() framing.Reader {
select {
case f := <-ch:
return f
default:
return framing.ReaderFunc(framing.EOFReaderFunc)
}
}
} | go | {
"resource": ""
} |
q12940 | ReadAll | train | func ReadAll(r io.Reader) ReaderFunc {
return func() (b []byte, err error) {
b, err = ioutil.ReadAll(r)
if len(b) == 0 && err == nil {
err = io.EOF
}
return
}
} | go | {
"resource": ""
} |
q12941 | WriterFor | train | func WriterFor(w io.Writer) WriterFunc {
return func(b []byte) error {
n, err := w.Write(b)
if err == nil && n != len(b) {
return io.ErrShortWrite
}
return err
}
} | go | {
"resource": ""
} |
q12942 | Int | train | func (a Adder) Int(x int, s ...string) {
a(float64(x), s...)
} | go | {
"resource": ""
} |
q12943 | NewHarness | train | func NewHarness(counts, errors Counter, timed Watcher, clock func() time.Time) Harness {
var harness Harness
if timed != nil && clock != nil {
harness = func(f func() error, labels ...string) error {
counts(labels...)
var (
t = clock()
err = f()
)
timed.Since(t, labels...)
if err != nil {
errors(labels...)
}
return err
}
} else {
harness = func(f func() error, labels ...string) error {
counts(labels...)
err := f()
if err != nil {
errors(labels...)
}
return err
}
}
return harness
} | go | {
"resource": ""
} |
q12944 | BasicAuth | train | func BasicAuth(username, passwd string) ConfigOpt {
// TODO(jdef) this could be more efficient. according to the stdlib we're not supposed to
// mutate the original Request, so we copy here (including headers). another approach would
// be to generate a functional RequestOpt that adds the right header.
return WrapRoundTripper(func(rt http.RoundTripper) http.RoundTripper {
return roundTripperFunc(func(req *http.Request) (*http.Response, error) {
var h http.Header
if req.Header != nil {
h = make(http.Header, len(req.Header))
for k, v := range req.Header {
h[k] = append(make([]string, 0, len(v)), v...)
}
}
clonedReq := *req
clonedReq.Header = h
clonedReq.SetBasicAuth(username, passwd)
return rt.RoundTrip(&clonedReq)
})
})
} | go | {
"resource": ""
} |
q12945 | Call | train | func Call(caller calls.Caller) Rule {
if caller == nil {
return nil
}
return func(ctx context.Context, c *scheduler.Call, _ mesos.Response, _ error, ch Chain) (context.Context, *scheduler.Call, mesos.Response, error) {
resp, err := caller.Call(ctx, c)
return ch(ctx, c, resp, err)
}
} | go | {
"resource": ""
} |
q12946 | Caller | train | func (r Rule) Caller(caller calls.Caller) Rule {
return Rules{r, Call(caller)}.Eval
} | go | {
"resource": ""
} |
q12947 | CallerF | train | func (r Rule) CallerF(cf calls.CallerFunc) Rule {
return r.Caller(calls.Caller(cf))
} | go | {
"resource": ""
} |
q12948 | Close | train | func (l *L) Close() {
if atomic.AddInt32(&l.value, 1) == 1 {
close(l.line)
}
<-l.line // concurrent calls to Close block until the latch is actually closed
} | go | {
"resource": ""
} |
q12949 | Reset | train | func (l *L) Reset() *L {
l.line, l.value = make(chan struct{}), 0
return l
} | go | {
"resource": ""
} |
q12950 | If | train | func (rf Reducer) If(f func(*mesos.Resource) bool) Reducer {
if f == nil {
return rf
}
return func(acc, x *mesos.Resource) *mesos.Resource {
if f(x) {
return rf(acc, x)
}
return acc
}
} | go | {
"resource": ""
} |
q12951 | IfNot | train | func (rf Reducer) IfNot(f func(*mesos.Resource) bool) Reducer {
if f == nil {
return rf
}
return rf.If(func(r *mesos.Resource) bool {
return !f(r)
})
} | go | {
"resource": ""
} |
q12952 | Reduce | train | func Reduce(rf Reducer, rs ...mesos.Resource) (r *mesos.Resource) {
if rf == nil {
panic("Reduce: reducer func may not be nil")
}
for i := range rs {
r = rf(r, &rs[i])
}
return
} | go | {
"resource": ""
} |
q12953 | With | train | func (r *Call_Reconcile) With(opts ...ReconcileOpt) *Call_Reconcile {
for _, opt := range opts {
if opt != nil {
opt(r)
}
}
return r
} | go | {
"resource": ""
} |
q12954 | Copy | train | func (co CallOptions) Copy() CallOptions {
if len(co) == 0 {
return nil
}
x := make(CallOptions, len(co))
copy(x, co)
return x
} | go | {
"resource": ""
} |
q12955 | IllegalState | train | func IllegalState(m Interface, data []byte) (StepFunc, []byte, error) {
return IllegalState, nil, IllegalStateErr
} | go | {
"resource": ""
} |
q12956 | NewReader | train | func NewReader(read io.Reader, opt ...Opt) framing.Reader {
debug.Log("new frame reader")
r := &reader{Scanner: bufio.NewScanner(read)}
r.Split(func(data []byte, atEOF bool) (int, []byte, error) {
// Scanner panics if we invoke Split after scanning has started,
// use this proxy func as a work-around.
return r.splitf(data, atEOF)
})
buf := make([]byte, 16*1024)
r.Buffer(buf, 1<<22) // 1<<22 == max protobuf size
r.splitf = r.splitSize
// apply options
for _, f := range opt {
if f != nil {
f(r)
}
}
return r
} | go | {
"resource": ""
} |
q12957 | ReadFrame | train | func (r *reader) ReadFrame() (tok []byte, err error) {
for r.Scan() {
b := r.Bytes()
if len(b) == 0 {
continue
}
tok = b
debug.Log("len(tok)", len(tok))
break
}
// either scan failed, or it succeeded and we have a token...
err = r.Err()
if err == nil && len(tok) == 0 {
err = io.EOF
}
return
} | go | {
"resource": ""
} |
q12958 | NewMesosExecutorDriver | train | func NewMesosExecutorDriver(config DriverConfig) (*MesosExecutorDriver, error) {
if config.Executor == nil {
msg := "Executor callback interface cannot be nil."
log.Errorln(msg)
return nil, fmt.Errorf(msg)
}
hostname := mesosutil.GetHostname(config.HostnameOverride)
newMessenger := config.NewMessenger
if newMessenger == nil {
newMessenger = func() (messenger.Messenger, error) {
process := process.New("executor")
return messenger.ForHostname(process, hostname, config.BindingAddress, config.BindingPort, config.PublishedAddress)
}
}
driver := &MesosExecutorDriver{
status: mesosproto.Status_DRIVER_NOT_STARTED,
stopCh: make(chan struct{}),
updates: make(map[string]*mesosproto.StatusUpdate),
tasks: make(map[string]*mesosproto.TaskInfo),
workDir: ".",
started: make(chan struct{}),
recoveryTimeout: defaultRecoveryTimeout,
}
driver.cond = sync.NewCond(&driver.lock)
// decouple serialized executor callback execution from goroutines of this driver
var execLock sync.Mutex
driver.withExecutor = func(f func(e Executor)) {
go func() {
execLock.Lock()
defer execLock.Unlock()
f(config.Executor)
}()
}
var err error
if driver.messenger, err = newMessenger(); err != nil {
return nil, err
}
if err = driver.init(); err != nil {
log.Errorf("failed to initialize the driver: %v", err)
return nil, err
}
return driver, nil
} | go | {
"resource": ""
} |
q12959 | Stop | train | func (driver *MesosExecutorDriver) Stop() (mesosproto.Status, error) {
driver.lock.Lock()
defer driver.lock.Unlock()
return driver.stop()
} | go | {
"resource": ""
} |
q12960 | _stop | train | func (driver *MesosExecutorDriver) _stop(stopStatus mesosproto.Status) error {
err := driver.messenger.Stop()
defer func() {
select {
case <-driver.stopCh:
// already closed
default:
close(driver.stopCh)
}
driver.cond.Broadcast()
}()
driver.status = stopStatus
if err != nil {
return err
}
return nil
} | go | {
"resource": ""
} |
q12961 | Abort | train | func (driver *MesosExecutorDriver) Abort() (mesosproto.Status, error) {
driver.lock.Lock()
defer driver.lock.Unlock()
return driver.abort()
} | go | {
"resource": ""
} |
q12962 | Join | train | func (driver *MesosExecutorDriver) Join() (mesosproto.Status, error) {
driver.lock.Lock()
defer driver.lock.Unlock()
return driver.join()
} | go | {
"resource": ""
} |
q12963 | SendStatusUpdate | train | func (driver *MesosExecutorDriver) SendStatusUpdate(taskStatus *mesosproto.TaskStatus) (mesosproto.Status, error) {
driver.lock.Lock()
defer driver.lock.Unlock()
return driver.sendStatusUpdate(taskStatus)
} | go | {
"resource": ""
} |
q12964 | SendFrameworkMessage | train | func (driver *MesosExecutorDriver) SendFrameworkMessage(data string) (mesosproto.Status, error) {
driver.lock.Lock()
defer driver.lock.Unlock()
return driver.sendFrameworkMessage(data)
} | go | {
"resource": ""
} |
q12965 | unacknowledgedTasks | train | func unacknowledgedTasks(state *internalState) (result []mesos.TaskInfo) {
if n := len(state.unackedTasks); n > 0 {
result = make([]mesos.TaskInfo, 0, n)
for k := range state.unackedTasks {
result = append(result, state.unackedTasks[k])
}
}
return
} | go | {
"resource": ""
} |
q12966 | unacknowledgedUpdates | train | func unacknowledgedUpdates(state *internalState) (result []executor.Call_Update) {
if n := len(state.unackedUpdates); n > 0 {
result = make([]executor.Call_Update, 0, n)
for k := range state.unackedUpdates {
result = append(result, state.unackedUpdates[k])
}
}
return
} | go | {
"resource": ""
} |
q12967 | NewStandalone | train | func NewStandalone(mi *mesos.MasterInfo) *Standalone {
log.V(2).Infof("creating new standalone detector for %+v", mi)
stand := &Standalone{
ch: make(chan *mesos.MasterInfo),
tr: &http.Transport{},
initial: mi,
done: make(chan struct{}),
leaderSyncInterval: defaultMesosLeaderSyncInterval,
httpClientTimeout: defaultMesosHttpClientTimeout,
assumedMasterPort: defaultMesosMasterPort,
}
stand.poller = stand._poller
stand.fetchPid = stand._fetchPid
return stand
} | go | {
"resource": ""
} |
q12968 | Detect | train | func (s *Standalone) Detect(o MasterChanged) error {
log.V(2).Info("Detect()")
s.pollOnce.Do(func() {
log.V(1).Info("spinning up asyc master detector poller")
// delayed initialization allows unit tests to modify timeouts before detection starts
s.client = &http.Client{
Transport: s.tr,
Timeout: s.httpClientTimeout,
}
go s.poller(s.fetchPid)
})
if o != nil {
log.V(1).Info("spawning asyc master detector listener")
go func() {
log.V(2).Infof("waiting for polled to send updates")
pollWaiter:
for {
select {
case mi, ok := <-s.ch:
if !ok {
break pollWaiter
}
log.V(1).Infof("detected master change: %+v", mi)
o.OnMasterChanged(mi)
case <-s.done:
return
}
}
o.OnMasterChanged(nil)
}()
} else {
log.Warningf("detect called with a nil master change listener")
}
return nil
} | go | {
"resource": ""
} |
q12969 | HandleEvent | train | func (hs Handlers) HandleEvent(ctx context.Context, e *executor.Event) (err error) {
if h := hs[e.GetType()]; h != nil {
return h.HandleEvent(ctx, e)
}
return nil
} | go | {
"resource": ""
} |
q12970 | Parse | train | func Parse(input string) (*UPID, error) {
upid := new(UPID)
splits := strings.Split(input, "@")
if len(splits) != 2 {
return nil, fmt.Errorf("Expect one `@' in the input")
}
upid.ID = splits[0]
if _, err := net.ResolveTCPAddr("tcp4", splits[1]); err != nil {
return nil, err
}
upid.Host, upid.Port, _ = net.SplitHostPort(splits[1])
return upid, nil
} | go | {
"resource": ""
} |
q12971 | Equal | train | func (u *UPID) Equal(upid *UPID) bool {
if u == nil {
return upid == nil
} else {
return upid != nil && u.ID == upid.ID && u.Host == upid.Host && u.Port == upid.Port
}
} | go | {
"resource": ""
} |
q12972 | GetMetrics | train | func GetMetrics(d *time.Duration) (call *master.Call) {
call = &master.Call{
Type: master.Call_GET_METRICS,
GetMetrics: &master.Call_GetMetrics{},
}
if d != nil {
call.GetMetrics.Timeout = &mesos.DurationInfo{
Nanoseconds: d.Nanoseconds(),
}
}
return
} | go | {
"resource": ""
} |
q12973 | ListFiles | train | func ListFiles(path string) *master.Call {
return &master.Call{
Type: master.Call_LIST_FILES,
ListFiles: &master.Call_ListFiles{
Path: path,
},
}
} | go | {
"resource": ""
} |
q12974 | ReadFile | train | func ReadFile(path string, offset uint64) *master.Call {
return &master.Call{
Type: master.Call_READ_FILE,
ReadFile: &master.Call_ReadFile{
Path: path,
Offset: offset,
},
}
} | go | {
"resource": ""
} |
q12975 | UpdateWeights | train | func UpdateWeights(weights ...mesos.WeightInfo) *master.Call {
return &master.Call{
Type: master.Call_UPDATE_WEIGHTS,
UpdateWeights: &master.Call_UpdateWeights{
WeightInfos: weights,
},
}
} | go | {
"resource": ""
} |
q12976 | ReserveResources | train | func ReserveResources(a mesos.AgentID, r ...mesos.Resource) *master.Call {
return &master.Call{
Type: master.Call_RESERVE_RESOURCES,
ReserveResources: &master.Call_ReserveResources{
AgentID: a,
Resources: r,
},
}
} | go | {
"resource": ""
} |
q12977 | UnreserveResources | train | func UnreserveResources(a mesos.AgentID, r ...mesos.Resource) *master.Call {
return &master.Call{
Type: master.Call_UNRESERVE_RESOURCES,
UnreserveResources: &master.Call_UnreserveResources{
AgentID: a,
Resources: r,
},
}
} | go | {
"resource": ""
} |
q12978 | CreateVolumes | train | func CreateVolumes(a mesos.AgentID, v ...mesos.Resource) *master.Call {
return &master.Call{
Type: master.Call_CREATE_VOLUMES,
CreateVolumes: &master.Call_CreateVolumes{
AgentID: a,
Volumes: v,
},
}
} | go | {
"resource": ""
} |
q12979 | DestroyVolumes | train | func DestroyVolumes(a mesos.AgentID, v ...mesos.Resource) *master.Call {
return &master.Call{
Type: master.Call_DESTROY_VOLUMES,
DestroyVolumes: &master.Call_DestroyVolumes{
AgentID: a,
Volumes: v,
},
}
} | go | {
"resource": ""
} |
q12980 | GrowVolume | train | func GrowVolume(a *mesos.AgentID, volume, addition mesos.Resource) *master.Call {
return &master.Call{
Type: master.Call_GROW_VOLUME,
GrowVolume: &master.Call_GrowVolume{
AgentID: a,
Volume: volume,
Addition: addition,
},
}
} | go | {
"resource": ""
} |
q12981 | ShrinkVolume | train | func ShrinkVolume(a *mesos.AgentID, volume mesos.Resource, subtract mesos.Value_Scalar) *master.Call {
return &master.Call{
Type: master.Call_SHRINK_VOLUME,
ShrinkVolume: &master.Call_ShrinkVolume{
AgentID: a,
Volume: volume,
Subtract: subtract,
},
}
} | go | {
"resource": ""
} |
q12982 | UpdateMaintenanceSchedule | train | func UpdateMaintenanceSchedule(s maintenance.Schedule) *master.Call {
return &master.Call{
Type: master.Call_UPDATE_MAINTENANCE_SCHEDULE,
UpdateMaintenanceSchedule: &master.Call_UpdateMaintenanceSchedule{
Schedule: s,
},
}
} | go | {
"resource": ""
} |
q12983 | StartMaintenance | train | func StartMaintenance(m ...mesos.MachineID) *master.Call {
return &master.Call{
Type: master.Call_START_MAINTENANCE,
StartMaintenance: &master.Call_StartMaintenance{
Machines: m,
},
}
} | go | {
"resource": ""
} |
q12984 | StopMaintenance | train | func StopMaintenance(m ...mesos.MachineID) *master.Call {
return &master.Call{
Type: master.Call_STOP_MAINTENANCE,
StopMaintenance: &master.Call_StopMaintenance{
Machines: m,
},
}
} | go | {
"resource": ""
} |
q12985 | SetQuota | train | func SetQuota(qr quota.QuotaRequest) *master.Call {
return &master.Call{
Type: master.Call_SET_QUOTA,
SetQuota: &master.Call_SetQuota{
QuotaRequest: qr,
},
}
} | go | {
"resource": ""
} |
q12986 | RemoveQuota | train | func RemoveQuota(role string) *master.Call {
return &master.Call{
Type: master.Call_REMOVE_QUOTA,
RemoveQuota: &master.Call_RemoveQuota{
Role: role,
},
}
} | go | {
"resource": ""
} |
q12987 | MarkAgentGone | train | func MarkAgentGone(id mesos.AgentID) *master.Call {
return &master.Call{
Type: master.Call_MARK_AGENT_GONE,
MarkAgentGone: &master.Call_MarkAgentGone{
AgentID: id,
},
}
} | go | {
"resource": ""
} |
q12988 | Teardown | train | func Teardown(id mesos.FrameworkID) *master.Call {
return &master.Call{
Type: master.Call_TEARDOWN,
Teardown: &master.Call_Teardown{
FrameworkID: id,
},
}
} | go | {
"resource": ""
} |
q12989 | Span | train | func (rb *RangeBuilder) Span(bp, ep uint64) *RangeBuilder {
rb.Ranges = append(rb.Ranges, mesos.Value_Range{Begin: bp, End: ep})
return rb
} | go | {
"resource": ""
} |
q12990 | Otherwise | train | func (hs Handlers) Otherwise(f HandlerFunc) HandlerFunc {
if f == nil {
return hs.HandleEvent
}
return func(ctx context.Context, e *scheduler.Event) error {
if h := hs[e.GetType()]; h != nil {
return h.HandleEvent(ctx, e)
}
return f(ctx, e)
}
} | go | {
"resource": ""
} |
q12991 | Cancel | train | func (d *httpDecoder) Cancel(graceful bool) {
log.V(2).Infof("%scancel:%t", d.idtag, graceful)
d.cancelGuard.Lock()
defer d.cancelGuard.Unlock()
select {
case <-d.shouldQuit:
// already quitting, but perhaps gracefully?
default:
close(d.shouldQuit)
}
// allow caller to "upgrade" from a graceful cancel to a forced one
if !graceful {
select {
case <-d.forceQuit:
// already forcefully quitting
default:
close(d.forceQuit) // push it!
}
}
} | go | {
"resource": ""
} |
q12992 | updateForRequest | train | func (d *httpDecoder) updateForRequest(bootstrapping bool) {
// check "Transfer-Encoding" for "chunked"
d.chunked = false
for _, v := range d.req.Header["Transfer-Encoding"] {
if v == "chunked" {
d.chunked = true
break
}
}
if !d.chunked && d.req.ContentLength < 0 {
if bootstrapping {
// strongly suspect that Go's internal net/http lib is stripping
// the Transfer-Encoding header from the initial request, so this
// workaround makes a very mesos-specific assumption: an unknown
// Content-Length indicates a chunked stream.
d.chunked = true
} else {
// via https://tools.ietf.org/html/rfc7230#section-3.3.2
d.req.ContentLength = 0
}
}
// check "Connection" for "Keep-Alive"
d.kalive = d.req.Header.Get("Connection") == "Keep-Alive"
log.V(2).Infof(d.idtag+"update-for-request: chunked %v keep-alive %v", d.chunked, d.kalive)
} | go | {
"resource": ""
} |
q12993 | terminateState | train | func terminateState(d *httpDecoder) httpState {
log.V(2).Infoln(d.idtag + "terminate-state")
// closing these chans tells Decoder users that it's wrapping up
close(d.msg)
close(d.errCh)
// attempt to forcefully close the connection and signal response handlers that
// no further responses should be written
d.Cancel(false)
if d.con != nil {
d.con.Close()
}
// there is no spoon
return nil
} | go | {
"resource": ""
} |
q12994 | checkTimeoutOrFail | train | func (d *httpDecoder) checkTimeoutOrFail(err error, stateContinue httpState) (httpState, bool) {
if err != nil {
if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
select {
case <-d.forceQuit:
return terminateState, true
case <-d.shouldQuit:
return gracefulTerminateState, true
default:
return stateContinue, true
}
}
d.sendError(err)
return terminateState, true
}
return nil, false
} | go | {
"resource": ""
} |
q12995 | Eval | train | func (rs Rules) Eval(ctx context.Context, e *executor.Event, err error, ch Chain) (context.Context, *executor.Event, error) {
return ch(rs.Chain()(ctx, e, err))
} | go | {
"resource": ""
} |
q12996 | MaxRedirects | train | func MaxRedirects(mr int) Option {
return func(c *client) Option {
old := c.redirect.MaxAttempts
c.redirect.MaxAttempts = mr
return MaxRedirects(old)
}
} | go | {
"resource": ""
} |
q12997 | AllowReconnection | train | func AllowReconnection(v bool) Option {
return func(c *client) Option {
old := c.allowReconnect
c.allowReconnect = v
return AllowReconnection(old)
}
} | go | {
"resource": ""
} |
q12998 | NewCaller | train | func NewCaller(cl *httpcli.Client, opts ...Option) calls.Caller {
result := &client{Client: cl, redirect: DefaultRedirectSettings}
cl.With(result.redirectHandler())
for _, o := range opts {
if o != nil {
o(result)
}
}
return &state{
client: result,
fn: disconnectedFn,
}
} | go | {
"resource": ""
} |
q12999 | Call | train | func (cli *client) Call(ctx context.Context, call *scheduler.Call) (mesos.Response, error) {
return cli.httpDo(ctx, call)
} | go | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.