id
stringlengths
2
7
text
stringlengths
17
51.2k
title
stringclasses
1 value
c174900
It could probably be improved // with some focused tuning. const lenMultiple = 2 if len(u.entries) == 0 { u.entries = nil } else if len(u.entries)*lenMultiple < cap(u.entries) { newEntries := make([]pb.Entry, len(u.entries)) copy(newEntries, u.entries) u.entries = newEntries } }
c174901
err = st.Snapshotter.SaveSnap(snap) if err != nil { return err } return st.WAL.ReleaseLockTo(snap.Metadata.Index) }
c174902
== 0 { return nil, ErrNoAvailableEndpoints } return newClient(&cfg) }
c174903
return &Client{ctx: cctx, cancel: cancel} }
c174904
New(Config{Endpoints: []string{url}}) }
c174905
if c.conn != nil { return toErr(c.ctx, c.conn.Close()) } return c.ctx.Err() }
c174906
:= make([]string, len(c.cfg.Endpoints)) copy(eps, c.cfg.Endpoints) return eps }
c174907
c.cfg.Endpoints = eps c.resolverGroup.SetEndpoints(eps) }
c174908
{ eps = append(eps, m.ClientURLs...) } c.SetEndpoints(eps...) return nil }
c174909
// once it is available. rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction)) opts = append(opts, // Disable stream retry by default since go-grpc-middleware/retry does not support client streams. // Streams that are safe to retry are enabled individually. grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)), grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)), ) return opts, nil }
c174910
directly dial endpoints and // using the same dial functions that we use for load balancer dialing. return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds) }
c174911
_ := endpoint.ParseEndpoint(ep) target := c.resolverGroup.Target(host) creds := c.dialWithBalancerCreds(ep) return c.dial(target, creds, dopts...) }
c174912
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) } cancel() } opts = append(opts, c.cfg.DialOptions...) dctx := c.ctx if c.cfg.DialTimeout > 0 { var cancel context.CancelFunc dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options? } conn, err := grpc.DialContext(dctx, target, opts...) if err != nil { return nil, err } return conn, nil }
c174913
rpctypes.MetadataHasLeader) return metadata.NewOutgoingContext(ctx, md) }
c174914
zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction)) return jitterUp(waitBetween, jitterFraction) } c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum)) return 0 } }
c174915
system in an inconsistent state, but retrying could make progress. // (e.g., failed in middle of send, corrupted frame) // TODO: are permanent Internal errors possible from grpc? return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal }
c174916
&leasePrefix{l, []byte(prefix)} }
c174917
== EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision }
c174918
v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) } return v3rpc.ErrFutureRev } return nil }
c174919
== 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 }
c174920
case <-ctx.Done(): if err == nil { return ctx.Err() } return err case <-donec: if wgs.closeErr != nil { return wgs.closeErr } // retry; may have dropped stream from no ctxs return w.RequestProgress(ctx) } }
c174921
if w.resuming[0] != nil { return w.resuming[0] } w.resuming = w.resuming[1:len(w.resuming)] } return nil }
c174922
// watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to // indicate they should be broadcast. if wr.IsProgressNotify() && pbresp.WatchId == -1 { return w.broadcastResponse(wr) } return w.unicastResponse(wr, pbresp.WatchId) }
c174923
{ case ws.recvc <- wr: case <-ws.donec: } } return true }
c174924
w.substreams[watchId] if !ok { return false } select { case ws.recvc <- wr: case <-ws.donec: return false } return true }
c174925
ws := range w.resuming { if ws != nil { <-ws.donec } } }
c174926
ProgressNotify: wr.progressNotify, Filters: wr.filters, PrevKv: wr.prevKV, Fragment: wr.fragment, } cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} return &pb.WatchRequest{RequestUnion: cr} }
c174927
cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req} return &pb.WatchRequest{RequestUnion: cr} }
c174928
_, exists = us.d[value] return exists }
c174929
!us.Contains(s) { return false } } return true }
c174930
v1.Sort() v2.Sort() return reflect.DeepEqual(v1, v2) }
c174931
values = append(values, val) } return values }
c174932
range us.d { cp.Add(val) } return cp }
c174933
for _, val := range oValues { if _, ok := result.d[val]; !ok { continue } delete(result.d, val) } return result }
c174934
defaultV2MembersPrefix) return &ep }
c174935
"Do not convert TTL keys") mc.Flags().StringVar(&migrateDatadir, "data-dir", "", "Path to the data directory") mc.Flags().StringVar(&migrateWALdir, "wal-dir", "", "Path to the WAL directory") mc.Flags().StringVar(&migrateTransformer, "transformer", "", "Path to the user-provided transformer program") return mc }
c174936
cc.Type { case raftpb.ConfChangeAddNode: if len(cc.Context) > 0 { rc.transport.AddPeer(types.ID(cc.NodeID), []string{string(cc.Context)}) } case raftpb.ConfChangeRemoveNode: if cc.NodeID == uint64(rc.id) { log.Println("I've been removed from the cluster! Shutting down.") return false } rc.transport.RemovePeer(types.ID(cc.NodeID)) } } // after commit, update appliedIndex rc.appliedIndex = ents[i].Index // special nil commit to signal replay has finished if ents[i].Index == rc.lastIndex { select { case rc.commitC <- nil: case <-rc.stopc: return false } } } return true }
c174937
log.Printf("loading WAL at term %d and index %d", walsnap.Term, walsnap.Index) w, err := wal.Open(zap.NewExample(), rc.waldir, walsnap) if err != nil { log.Fatalf("raftexample: error loading wal (%v)", err) } return w }
c174938
if snapshot != nil { rc.raftStorage.ApplySnapshot(*snapshot) } rc.raftStorage.SetHardState(st) // append to storage so raft starts at the right place in log rc.raftStorage.Append(ents) // send nil once lastIndex is published so client knows commit channel is current if len(ents) > 0 { rc.lastIndex = ents[len(ents)-1].Index } else { rc.commitC <- nil } return w }
c174939
close(rc.commitC) close(rc.errorC) rc.node.Stop() }
c174940
cmd.Flags().Int64Var(&watchRev, "rev", 0, "Revision to start watching") cmd.Flags().BoolVar(&watchPrevKey, "prev-kv", false, "get the previous key-value pair before the event happens") return cmd }
c174941
return ms.hardState, ms.snapshot.Metadata.ConfState, nil }
c174942
defer ms.Unlock() ms.hardState = st return nil }
c174943
only contains dummy entries. if len(ms.ents) == 1 { return nil, ErrUnavailable } ents := ms.ents[lo-offset : hi-offset] return limitSize(ents, maxSize), nil }
c174944
if i < offset { return 0, ErrCompacted } if int(i-offset) >= len(ms.ents) { return 0, ErrUnavailable } return ms.ents[i-offset].Term, nil }
c174945
defer ms.Unlock() return ms.lastIndex(), nil }
c174946
defer ms.Unlock() return ms.firstIndex(), nil }
c174947
defer ms.Unlock() return ms.snapshot, nil }
c174948
snapIndex := snap.Metadata.Index if msIndex >= snapIndex { return ErrSnapOutOfDate } ms.snapshot = snap ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}} return nil }
c174949
offset ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i) ents[0].Index = ms.ents[i].Index ents[0].Term = ms.ents[i].Term ents = append(ents, ms.ents[i+1:]...) ms.ents = ents return nil }
c174950
p.mu.Unlock() if u == p.urls[p.picked] { p.picked = (p.picked + 1) % len(p.urls) } }
c174951
list") ec.AddCommand(newEpHealthCommand()) ec.AddCommand(newEpStatusCommand()) ec.AddCommand(newEpHashKVCommand()) return ec }
c174952
the // endpoint is health. ctx, cancel := commandCtx(cmd) _, err = cli.Get(ctx, "health") cancel() eh := epHealth{Ep: ep, Health: false, Took: time.Since(st).String()} // permission denied is OK since proposal goes through consensus to get it if err == nil || err == rpctypes.ErrPermissionDenied { eh.Health = true } else { eh.Error = err.Error() } hch <- eh }(cfg) } wg.Wait() close(hch) errs := false healthList := []epHealth{} for h := range hch { healthList = append(healthList, h) if h.Error != "" { errs = true } } display.EndpointHealth(healthList) if errs { ExitWithError(ExitError, fmt.Errorf("unhealthy cluster")) } }
c174953
and participates in leader election", Run: electCommandFunc, } cmd.Flags().BoolVarP(&electListen, "listen", "l", false, "observation mode") return cmd }
c174954
} cmd.PersistentFlags().BoolVar(&epClusterEndpoints, "cluster", false, "use all endpoints from the cluster member list") cmd.Flags().StringVar(&defragDataDir, "data-dir", "", "Optional. If present, defragments a data directory not in use by etcd.") return cmd }
c174955
"registered balancer", zap.String("policy", bb.cfg.Policy.String()), zap.String("name", bb.cfg.Name), ) }
c174956
{ bb.lg = zap.NewNop() } // TODO: support multiple connections bb.mu.Lock() bb.currentConn = cc bb.mu.Unlock() bb.lg.Info( "built balancer", zap.String("balancer-id", bb.id), zap.String("policy", bb.policy.String()), zap.String("resolver-target", cc.Target()), ) return bb }
c174957
switch state { case connectivity.Ready: cse.numReady += updateVal case connectivity.Connecting: cse.numConnecting += updateVal case connectivity.TransientFailure: cse.numTransientFailure += updateVal } } // Evaluate. if cse.numReady > 0 { return connectivity.Ready } if cse.numConnecting > 0 { return connectivity.Connecting } return connectivity.TransientFailure }
c174958
token revision in case the auth store was updated while // the request has been handled. if ai.Revision != 0 && ai.Revision != s.authStore.Revision() { return auth.ErrAuthOldRevision } return nil }
c174959
filtered { continue } if !w.prevKV { evCopy := *ev evCopy.PrevKv = nil ev = &evCopy } events = append(events, ev) } if lastRev >= w.nextrev { w.nextrev = lastRev + 1 } // all events are filtered out? if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 && wr.CompactRevision == 0 { return } w.lastHeader = wr.Header w.post(&pb.WatchResponse{ Header: &wr.Header, Created: wr.Created, CompactRevision: wr.CompactRevision, Canceled: wr.Canceled, WatchId: w.id, Events: events, }) }
c174960
<-time.After(50 * time.Millisecond): w.wps.cancel() return false } return true }
c174961
:= ac.CORS["*"] if ok { return true } _, ok = ac.CORS[origin] return ok }
c174962
:= ac.HostWhitelist["*"] if ok { return true } _, ok = ac.HostWhitelist[host] return ok }
c174963
0, len(ss.valids)) for k := range ss.valids { s = append(s, k) } sort.Strings(s) return s }
c174964
vm[v] = struct{}{} } return &SelectiveStringsValue{valids: vm, vs: []string{}} }
c174965
clientv3.KV { return &kvPrefix{kv, prefix} }
c174966
nil { plog.Panicf("new URLsValue should never fail: %v", err) } return v }
c174967
[]url.URL(*fs.Lookup(urlsFlagName).Value.(*URLsValue)) }
c174968
) } stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv}) if e.cfg.logger != nil { e.cfg.logger.Info( "stopped serving peer traffic", zap.String("address", u), ) } return nil } } // start peer servers in a goroutine for _, pl := range e.Peers { go func(l *peerListener) { u := l.Addr().String() if e.cfg.logger != nil { e.cfg.logger.Info( "serving peer traffic", zap.String("address", u), ) } else { plog.Info("listening for peers on ", u) } e.errHandler(l.serve()) }(pl) } return nil }
c174969
tx.Lock() tx.UnsafeCreateBucket(keyBucketName) tx.UnsafeCreateBucket(metaBucketName) tx.Unlock() s.b.ForceCommit() s.mu.Lock() defer s.mu.Unlock() if err := s.restore(); err != nil { // TODO: return the error instead of panic here? panic("failed to recover store from backend") } return s }
c174970
zap.Int("given-revision-bytes-size", len(b)), ) } else { plog.Panicf("cannot append mark to non normal revision bytes") } } return append(b, markTombstone) }
c174971
ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil { return err } return os.Remove(f) }
c174972
// a directory, this will return syscall.ENOTDIR return err } return IsDirWriteable(dir) }
c174973
if len(ns) != 0 { err = fmt.Errorf("expected %q to be empty, got %q", dir, ns) } } return err }
c174974
lerr != nil { return lerr } if err = f.Truncate(off); err != nil { return err } // make sure blocks remain allocated if err = Preallocate(f, lenf, true); err != nil { return err } _, err = f.Seek(off, io.SeekStart) return err }
c174975
<-fp.filec: case err = <-fp.errc: } return f, err }
c174976
zap.AddCallerSkip(1), zap.ErrorOutput(syncer)) return &zapRaftLogger{lg: lg, sugar: lg.Sugar()} }
c174977
!= "" { cp, err = tlsutil.NewCertPool([]string{yc.TrustedCAfile}) if err != nil { return nil, err } } tlscfg := &tls.Config{ MinVersion: tls.VersionTLS12, InsecureSkipVerify: yc.InsecureSkipTLSVerify, RootCAs: cp, } if cert != nil { tlscfg.Certificates = []tls.Certificate{*cert} } yc.Config.TLS = tlscfg return &yc.Config, nil }
c174978
return RegisterElectionHandlerClient(ctx, mux, v3electionpb.NewElectionClient(conn)) }
c174979
return } curVersion = v enabledMap = capabilityMaps[curVersion.String()] enableMapMu.Unlock() if lg != nil { lg.Info( "enabled capabilities for version", zap.String("cluster-version", version.Cluster(v.String())), ) } else { plog.Infof("enabled capabilities for version %s", version.Cluster(v.String())) } }
c174980
c.Flags().IntVarP(&lockTTL, "ttl", "", lockTTL, "timeout for session") return c }
c174981
r.Tick() r.tickMu.Unlock() }
c174982
ticks; i++ { r.tick() } }
c174983
ac.AddCommand(newAuthEnableCommand()) ac.AddCommand(newAuthDisableCommand()) return ac }
c174984
err = cli.RoleAdd(ctx, "root"); err != nil { break } if _, err = cli.UserGrantRole(ctx, "root", "root"); err != nil { break } } } cancel() if err != nil { ExitWithError(ExitError, err) } fmt.Println("Authentication Enabled") }
c174985
accept any arguments")) } ctx, cancel := commandCtx(cmd) _, err := mustClientFromCmd(cmd).Auth.AuthDisable(ctx) cancel() if err != nil { ExitWithError(ExitError, err) } fmt.Println("Authentication Disabled") }
c174986
kc: pb.NewKVClient(c.conn), } }
c174987
lc: pb.NewLeaseClient(c.conn), } }
c174988
cc: pb.NewClusterClient(c.conn), } }
c174989
{ return &retryMaintenanceClient{ mc: pb.NewMaintenanceClient(conn), } }
c174990
ac: pb.NewAuthClient(c.conn), } }
c174991
}, Action: func(c *cli.Context) error { mkdirCommandFunc(c, mustNewKeyAPI(c), client.PrevIgnore) return nil }, } }
c174992
waiters _, err = client.Put(b.ctx, b.key+"/ready", "") return err } _, err = WaitEvents( client, b.key+"/ready", ek.Revision(), []mvccpb.Event_EventType{mvccpb.PUT}) return err }
c174993
_, err = WaitEvents( client, string(highest.Key), highest.ModRevision, []mvccpb.Event_EventType{mvccpb.DELETE}) if err != nil { return err } return b.Leave() } // delete self and wait on lowest process if err = b.myKey.Delete(); err != nil { return err } key := string(lowest.Key) _, err = WaitEvents( client, key, lowest.ModRevision, []mvccpb.Event_EventType{mvccpb.DELETE}) if err != nil { return err } return b.Leave() }
c174994
in v3.5 mux.HandleFunc(configPath+"/local/log", logHandleFunc) HandleMetricsHealth(mux, server) mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) }
c174995
lg.Warn( "unexpected v2 response error", zap.String("remote-addr", r.RemoteAddr), zap.String("internal-server-error", err.Error()), ) } else { mlog.MergeErrorf("got unexpected response error (%v)", err) } } herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") if et := herr.WriteTo(w); et != nil { if lg != nil { lg.Debug( "failed to write v2 HTTP error", zap.String("remote-addr", r.RemoteAddr), zap.String("internal-server-error", err.Error()), zap.Error(et), ) } else { plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) } } } }
c174996
nil { if c.lg != nil { c.lg.Panic("two member with same name found", zap.String("name", name)) } else { plog.Panicf("two members with the given name %q exist", name) } } memb = m } } return memb.Clone() }
c174997
urls = append(urls, p.PeerURLs...) } sort.Strings(urls) return urls }
c174998
for _, u := range m.PeerURLs { if urls[u] { return ErrPeerURLexists } } case raftpb.ConfChangeRemoveNode: if members[id] == nil { return ErrIDNotFound } case raftpb.ConfChangeUpdateNode: if members[id] == nil { return ErrIDNotFound } urls := make(map[string]bool) for _, m := range members { if m.ID == id { continue } for _, u := range m.PeerURLs { urls[u] = true } } m := new(Member) if err := json.Unmarshal(cc.Context, m); err != nil { if c.lg != nil { c.lg.Panic("failed to unmarshal member", zap.Error(err)) } else { plog.Panicf("unmarshal member should never fail: %v", err) } } for _, u := range m.PeerURLs { if urls[u] { return ErrPeerURLexists } } default: if c.lg != nil { c.lg.Panic("unknown ConfChange type", zap.String("type", cc.Type.String())) } else { plog.Panicf("ConfChange type should be either AddNode, RemoveNode or UpdateNode") } } return nil }
c174999
zap.String("local-member-id", c.localID.String()), zap.String("added-peer-id", m.ID.String()), zap.Strings("added-peer-peer-urls", m.PeerURLs), ) } else { plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.cid) } }