id stringlengths 2 7 | text stringlengths 17 51.2k | title stringclasses 1 value |
|---|---|---|
c175000 | "skipped removing already removed member",
zap.String("cluster-id", c.cid.String()),
zap.String("local-member-id", c.localID.String()),
zap.String("removed-remote-peer-id", id.String()),
)
}
} else {
plog.Infof("removed member %s from cluster %s", id, c.cid)
}
} | |
c175001 | ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
defer cancel()
for i := range ems {
if ok, err := netutil.URLStringsEqual(ctx, lg, ems[i].PeerURLs, lms[i].PeerURLs); !ok {
return fmt.Errorf("unmatched member while checking PeerURLs (%v)", err)
}
lms[i].ID = ems[i].ID
}
local.members = make(map[types.ID]*Member)
for _, m := range lms {
local.members[m.ID] = m
}
return nil
} | |
c175002 | keyi := i.(*keyIndex)
keyi.keep(rev, available)
return true
})
return available
} | |
c175003 | {
continue
}
// remove all channels that required a leader from keepalive
newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
newCtxs := make([]context.Context, len(newChs))
newIdx := 0
for i := range ka.chs {
if ka.chs[i] == nil {
continue
}
newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
newIdx++
}
ka.chs, ka.ctxs = newChs, newCtxs
}
} | |
c175004 |
l.mu.Lock()
defer l.mu.Unlock()
if l.stream != nil && l.streamCancel != nil {
l.streamCancel()
}
l.streamCancel = cancel
l.stream = stream
go l.sendKeepAliveLoop(stream)
return stream, nil
} | |
c175005 | range ka.chs {
select {
case ch <- karesp:
default:
if l.lg != nil {
l.lg.Warn("lease keepalive response queue is full; dropping response send",
zap.Int("queue-size", len(ch)),
zap.Int("queue-capacity", cap(ch)),
)
}
}
// still advance in order to rate-limit keep-alive sends
ka.nextKeepAlive = nextKeepAlive
}
} | |
c175006 | // waited too long for response; lease may be expired
ka.close()
delete(l.keepAlives, id)
}
}
l.mu.Unlock()
}
} | |
c175007 | // TODO do something with this error?
return
}
}
select {
case <-time.After(retryConnWait):
case <-stream.Context().Done():
return
case <-l.donec:
return
case <-l.stopCtx.Done():
return
}
}
} | |
c175008 | struct{}),
}
lkv.wg.Add(2)
go func() {
defer lkv.wg.Done()
lkv.monitorSession()
}()
go func() {
defer lkv.wg.Done()
lkv.leases.clearOldRevokes(cctx)
}()
return lkv, lkv.Close, lkv.waitSession(cctx)
} | |
c175009 | for ctx.Err() == nil {
if _, err := lkv.kv.Txn(ctx).If(cmp).Then(op).Commit(); err == nil {
return
}
}
} | |
c175010 | Target: pb.Compare_LEASE}
} | |
c175011 | cmp.TargetUnion.(*pb.Compare_Value); ok {
return tu.Value
}
return nil
} | |
c175012 | = []byte(end)
return cmp
} | |
c175013 |
cmp.RangeEnd = getPrefix(cmp.Key)
return cmp
} | |
c175014 | if v, ok := val.(int); ok {
return int64(v)
}
panic("bad value")
} | |
c175015 | v, ok := val.(LeaseID); ok {
return int64(v)
}
return mustInt64(val)
} | |
c175016 | err error
switch e.Type {
case etcd.EventTypePut:
err = json.Unmarshal(e.Kv.Value, &jupdate)
jupdate.Op = naming.Add
case etcd.EventTypeDelete:
err = json.Unmarshal(e.PrevKv.Value, &jupdate)
jupdate.Op = naming.Delete
default:
continue
}
if err == nil {
updates = append(updates, &jupdate)
}
}
return updates, nil
} | |
c175017 | nil, fmt.Errorf("can't find journal (%v)", err)
}
return zapcore.AddSync(jw), nil
} | |
c175018 |
ModifiedIndex: createdIndex,
Parent: parent,
store: store,
ExpireTime: expireTime,
Value: value,
}
} | |
c175019 | parent,
ExpireTime: expireTime,
Children: make(map[string]*node),
store: store,
}
} | |
c175020 | "", n.store.CurrentIndex)
}
return n.Value, nil
} | |
c175021 | n.store.CurrentIndex)
}
n.Value = value
n.ModifiedIndex = index
return nil
} | |
c175022 | len(n.Children))
i := 0
for _, node := range n.Children {
nodes[i] = node
i++
}
return nodes, nil
} | |
c175023 | n.Path, n.store.CurrentIndex)
}
child, ok := n.Children[name]
if ok {
return child, nil
}
return nil, nil
} | |
c175024 | return v2error.NewError(v2error.EcodeNodeExist, "", n.store.CurrentIndex)
}
n.Children[name] = child
return nil
} | |
c175025 | the operation
// is not recursive
return v2error.NewError(v2error.EcodeDirNotEmpty, n.Path, n.store.CurrentIndex)
}
for _, child := range n.Children { // delete all children
child.Remove(true, true, callback)
}
// delete self
_, name := path.Split(n.Path)
if n.Parent != nil && n.Parent.Children[name] == n {
delete(n.Parent.Children, name)
if callback != nil {
callback(n.Path)
}
if !n.IsPermanent() {
n.store.ttlKeyHeap.remove(n)
}
}
return nil
} | |
c175026 | CompareMatch
case indexMatch && !valueMatch:
which = CompareValueNotMatch
case valueMatch && !indexMatch:
which = CompareIndexNotMatch
default:
which = CompareNotMatch
}
return ok, which
} | |
c175027 | return newkv
}
clone := newDir(n.store, n.Path, n.CreatedIndex, n.Parent, n.ExpireTime)
clone.ModifiedIndex = n.ModifiedIndex
for key, child := range n.Children {
clone.Children[key] = child.Clone()
}
return clone
} | |
c175028 |
return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1
} | |
c175029 | remote types.ID) bool {
t := transport.ActiveSince(remote)
return !t.IsZero() && t.Before(since)
} | |
c175030 | == self || isConnectedSince(transport, since, m.ID) {
connectedNum++
}
}
return connectedNum
} | |
c175031 |
}
if tm.Before(oldest) {
oldest = tm
longest = id
}
}
if uint64(longest) == 0 {
return longest, false
}
return longest, true
} | |
c175032 |
// if any data for a sector chunk is all 0, it's a torn write
for _, sect := range chunks {
isZero := true
for _, v := range sect {
if v != 0 {
isZero = false
break
}
}
if isZero {
return true
}
}
return false
} | |
c175033 | return startMockServersUnix(count)
default:
return nil, fmt.Errorf("unsupported network type: %s", network)
}
} | |
c175034 | }
}
svr := grpc.NewServer()
pb.RegisterKVServer(svr, &mockKVServer{})
ms.Servers[idx].GrpcServer = svr
ms.wg.Add(1)
go func(svr *grpc.Server, l net.Listener) {
svr.Serve(l)
}(ms.Servers[idx].GrpcServer, ms.Servers[idx].ln)
return nil
} | |
c175035 |
ms.Servers[idx].GrpcServer.Stop()
ms.Servers[idx].GrpcServer = nil
ms.Servers[idx].ln = nil
ms.wg.Done()
} | |
c175036 |
ms.StopAt(idx)
}
ms.wg.Wait()
} | |
c175037 | }
cc.AddCommand(NewCheckPerfCommand())
cc.AddCommand(NewCheckDatascaleCommand())
return cc
} | |
c175038 | check's keys.")
cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "Compact storage with last revision after test is finished.")
cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "Defragment storage after test is finished.")
return cmd
} | |
c175039 |
cmd.Flags().StringVar(&checkDatascalePrefix, "prefix", "/etcdctl-check-datascale/", "The prefix for writing the datascale check's keys.")
cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "Compact storage with last revision after test is finished.")
cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "Defragment storage after test is finished.")
return cmd
} | |
c175040 | or equal to the given key using byte compare")
cmd.Flags().Int64Var(&getRev, "rev", 0, "Specify the kv revision")
cmd.Flags().BoolVar(&getKeysOnly, "keys-only", false, "Get only the keys")
cmd.Flags().BoolVar(&printValueOnly, "print-value-only", false, `Only write values when using the "simple" output format`)
return cmd
} | |
c175041 | Usage: "returns result in sorted order"},
cli.BoolFlag{Name: "quorum, q", Usage: "require quorum for get request"},
},
Action: func(c *cli.Context) error {
getCommandFunc(c, mustNewKeyAPI(c))
return nil
},
}
} | |
c175042 | }
return m.PeerURLs[rand.Intn(len(m.PeerURLs))]
} | |
c175043 | promhttp.Handler())
mux.Handle(PathHealth, NewHealthHandler(func() Health { return checkHealth(srv) }))
} | |
c175044 | a directory)"},
cli.StringFlag{Name: "with-value", Value: "", Usage: "previous value"},
cli.IntFlag{Name: "with-index", Value: 0, Usage: "previous index"},
},
Action: func(c *cli.Context) error {
rmCommandFunc(c, mustNewKeyAPI(c))
return nil
},
}
} | |
c175045 | Dir: dir, Recursive: recursive})
cancel()
if err != nil {
handleError(c, ExitServerError, err)
}
if !resp.Node.Dir || c.GlobalString("output") != "simple" {
printResponseKey(resp, c.GlobalString("output"))
}
} | |
c175046 | rpctypes.ErrGRPCDuplicateKey
}
}
if dels.Intersects(adt.NewStringAffinePoint(k)) {
return nil, dels, rpctypes.ErrGRPCDuplicateKey
}
puts[k] = struct{}{}
}
dels.Union(delsThen, adt.NewStringAffineInterval("\x00", ""))
dels.Union(delsElse, adt.NewStringAffineInterval("\x00", ""))
}
// collect and check this level's puts
for _, req := range reqs {
tv, ok := req.Request.(*pb.RequestOp_RequestPut)
if !ok || tv.RequestPut == nil {
continue
}
k := string(tv.RequestPut.Key)
if _, ok := puts[k]; ok {
return nil, dels, rpctypes.ErrGRPCDuplicateKey
}
if dels.Intersects(adt.NewStringAffinePoint(k)) {
return nil, dels, rpctypes.ErrGRPCDuplicateKey
}
puts[k] = struct{}{}
}
return puts, dels, nil
} | |
c175047 |
totalEventsCounter.Add(float64(n))
} | |
c175048 | return RegisterKVHandlerClient(ctx, mux, etcdserverpb.NewKVClient(conn))
} | |
c175049 | return RegisterWatchHandlerClient(ctx, mux, etcdserverpb.NewWatchClient(conn))
} | |
c175050 | return RegisterLeaseHandlerClient(ctx, mux, etcdserverpb.NewLeaseClient(conn))
} | |
c175051 | return RegisterClusterHandlerClient(ctx, mux, etcdserverpb.NewClusterClient(conn))
} | |
c175052 | return RegisterMaintenanceHandlerClient(ctx, mux, etcdserverpb.NewMaintenanceClient(conn))
} | |
c175053 | return RegisterAuthHandlerClient(ctx, mux, etcdserverpb.NewAuthClient(conn))
} | |
c175054 | e.Server to join the cluster
case <-e.Server.StopNotify(): // publish aborted from 'ErrStopped'
}
return e.Server.StopNotify(), e.Err(), nil
} | |
c175055 | invalid file/dir %s under data dir %s (Ignore this if you are upgrading etcd)", name, dir)
}
}
}
if m && p {
if lg != nil {
lg.Fatal("invalid datadir; both member and proxy directories exist")
} else {
plog.Fatal("invalid datadir. Both member and proxy directories exist.")
}
}
if m {
return dirMember
}
if p {
return dirProxy
}
return dirEmpty
} | |
c175056 | if err != nil {
return nil, err
}
last := filepath.Join(dirpath, names[len(names)-1])
return fileutil.LockFile(last, os.O_RDWR, fileutil.PrivateFileMode)
} | |
c175057 |
select {
case <-l.leaderc:
l.leaderc = make(chan struct{})
default:
}
} | |
c175058 | defer l.mu.RUnlock()
return l.leaderc
} | |
c175059 | <subcommand>",
Short: "grpc-proxy related command",
}
lpc.AddCommand(newGRPCProxyStartCommand())
return lpc
} | |
c175060 | commands",
}
mc.AddCommand(NewMemberAddCommand())
mc.AddCommand(NewMemberRemoveCommand())
mc.AddCommand(NewMemberUpdateCommand())
mc.AddCommand(NewMemberListCommand())
return mc
} | |
c175061 | memberAddCommandFunc,
}
cc.Flags().StringVar(&memberPeerURLs, "peer-urls", "", "comma separated peer URLs for the new member.")
return cc
} | |
c175062 | the cluster",
Run: memberRemoveCommandFunc,
}
return cc
} | |
c175063 | memberUpdateCommandFunc,
}
cc.Flags().StringVar(&memberPeerURLs, "peer-urls", "", "comma separated peer URLs for the updated member.")
return cc
} | |
c175064 | set to simple, this command prints out comma-separated member lists for each endpoint.
The items in the lists are ID, Status, Name, Peer Addrs, Client Addrs.
`,
Run: memberListCommandFunc,
}
return cc
} | |
c175065 | break
}
// quorum get to sync cluster list
gresp, gerr := cli.Get(ctx, "_")
if gerr != nil {
ExitWithError(ExitError, err)
}
resp.Header.MemberId = gresp.Header.MemberId
listResp, err = cli.MemberList(ctx)
}
cancel()
conf := []string{}
for _, memb := range listResp.Members {
for _, u := range memb.PeerURLs {
n := memb.Name
if memb.ID == newID {
n = newMemberName
}
conf = append(conf, fmt.Sprintf("%s=%s", n, u))
}
}
fmt.Print("\n")
fmt.Printf("ETCD_NAME=%q\n", newMemberName)
fmt.Printf("ETCD_INITIAL_CLUSTER=%q\n", strings.Join(conf, ","))
fmt.Printf("ETCD_INITIAL_ADVERTISE_PEER_URLS=%q\n", memberPeerURLs)
fmt.Printf("ETCD_INITIAL_CLUSTER_STATE=\"existing\"\n")
}
} | |
c175066 | Hex", err))
}
ctx, cancel := commandCtx(cmd)
resp, err := mustClientFromCmd(cmd).MemberRemove(ctx, id)
cancel()
if err != nil {
ExitWithError(ExitError, err)
}
display.MemberRemove(id, *resp)
} | |
c175067 | arg (%v), expecting ID in Hex", err))
}
if len(memberPeerURLs) == 0 {
ExitWithError(ExitBadArgs, fmt.Errorf("member peer urls not provided"))
}
urls := strings.Split(memberPeerURLs, ",")
ctx, cancel := commandCtx(cmd)
resp, err := mustClientFromCmd(cmd).MemberUpdate(ctx, id, urls)
cancel()
if err != nil {
ExitWithError(ExitError, err)
}
display.MemberUpdate(id, *resp)
} | |
c175068 | != nil {
ExitWithError(ExitError, err)
}
display.MemberList(*resp)
} | |
c175069 | w.dirFile, err = fileutil.OpenDir(w.dir); err != nil {
return nil, err
}
return w, nil
} | |
c175070 |
return openAtIndex(lg, dirpath, snap, false)
} | |
c175071 | {
return ErrCRCMismatch
}
decoder.updateCRC(rec.Crc)
case snapshotType:
var loadedSnap walpb.Snapshot
pbutil.MustUnmarshal(&loadedSnap, rec.Data)
if loadedSnap.Index == snap.Index {
if loadedSnap.Term != snap.Term {
return ErrSnapshotMismatch
}
match = true
}
// We ignore all entry and state type records as these
// are not necessary for validating the WAL contents
case entryType:
case stateType:
default:
return fmt.Errorf("unexpected block type %d", rec.Type)
}
}
if closer != nil {
closer()
}
// We do not have to read out all the WAL entries
// as the decoder is opened in read mode.
if err != io.EOF && err != io.ErrUnexpectedEOF {
return err
}
if !match {
return ErrSnapshotNotFound
}
return nil
} | |
c175072 | w.lg != nil {
w.lg.Warn("failed to close WAL", zap.Error(err))
} else {
plog.Errorf("failed to unlock during closing wal: %s", err)
}
}
}
return w.dirFile.Close()
} | |
c175073 | it is watching.
if (w.recursive || originalPath || deleted) && e.Index() >= w.sinceIndex {
// We cannot block here if the eventChan capacity is full, otherwise
// etcd will hang. eventChan capacity is full when the rate of
// notifications are higher than our send rate.
// If this happens, we close the channel.
select {
case w.eventChan <- e:
default:
// We have missed a notification. Remove the watcher.
// Removing the watcher also closes the eventChan.
w.remove()
}
return true
}
return false
} | |
c175074 | w.hub.mutex.Unlock()
close(w.eventChan)
if w.remove != nil {
w.remove()
}
} | |
c175075 | strings.Count(normalForm, "/") + depth
return fmt.Sprintf("%s/%03d/k/%s", s.pfx, n, normalForm)
} | |
c175076 | CreatedIndex: mkV2Rev(kv.CreateRevision),
ModifiedIndex: mkV2Rev(kv.ModRevision),
}
if !n.Dir {
v := string(kv.Value)
n.Value = &v
}
return n
} | |
c175077 | if pkv != nil && pkv.CreateRevision > 0 {
return pkv
}
}
return nil
} | |
c175078 | newReport(precision),
results: make(chan Result, 16),
}
} | |
c175079 | URLsMap{}
for k, v := range m {
um[k], err = NewURLs(strings.Split(v, sep))
if err != nil {
return nil, err
}
}
return um, nil
} | |
c175080 | append(pairs, fmt.Sprintf("%s=%s", name, url.String()))
}
}
sort.Strings(pairs)
return strings.Join(pairs, ",")
} | |
c175081 | {
for _, u := range us {
urls = append(urls, u.String())
}
}
sort.Strings(urls)
return urls
} | |
c175082 | ""
if i := strings.Index(key, "="); i >= 0 {
key, value = key[:i], key[i+1:]
}
m[key] = append(m[key], value)
}
return m
} | |
c175083 | {
mux := http.NewServeMux()
etcdhttp.HandleBasic(mux, server)
handleV2(lg, mux, server, timeout)
return requestLogger(lg, mux)
} | |
c175084 | trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
if noValueOnSuccess &&
(ev.Action == v2store.Set || ev.Action == v2store.CompareAndSwap ||
ev.Action == v2store.Create || ev.Action == v2store.Update) {
ev.Node = nil
ev.PrevNode = nil
}
return json.NewEncoder(w).Encode(ev)
} | |
c175085 | "v2 response error",
zap.String("internal-server-error", err.Error()),
)
} else {
mlog.MergeError(err)
}
default:
if lg != nil {
lg.Warn(
"unexpected v2 response error",
zap.String("internal-server-error", err.Error()),
)
} else {
mlog.MergeErrorf("got unexpected response error (%v)", err)
}
}
ee := v2error.NewError(v2error.EcodeRaftInternal, err.Error(), 0)
ee.WriteTo(w)
}
} | |
c175086 |
i, err = strconv.ParseUint(vals[0], 10, 64)
}
return
} | |
c175087 |
b, err = strconv.ParseBool(vals[0])
}
return
} | |
c175088 | resp.Header, nil
}
lastKey := string(resp.Kvs[0].Key)
if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
return nil, err
}
}
} | |
c175089 | struct{}{}
}
for _, v := range errorOutputPaths {
errOutputs[v] = struct{}{}
}
errOutputSlice := make([]string, 0)
if _, ok := errOutputs["/dev/null"]; ok {
// "/dev/null" to discard all
errOutputSlice = []string{"/dev/null"}
} else {
for k := range errOutputs {
errOutputSlice = append(errOutputSlice, k)
}
}
cfg.ErrorOutputPaths = errOutputSlice
sort.Strings(cfg.ErrorOutputPaths)
return cfg
} | |
c175090 | 100,
ElectionMs: 1000,
InitialElectionTickAdvance: true,
LPUrls: []url.URL{*lpurl},
LCUrls: []url.URL{*lcurl},
APUrls: []url.URL{*apurl},
ACUrls: []url.URL{*acurl},
ClusterState: ClusterStateFlagNew,
InitialClusterToken: "etcd-cluster",
StrictReconfigCheck: DefaultStrictReconfigCheck,
Metrics: "basic",
EnableV2: DefaultEnableV2,
CORS: map[string]struct{}{"*": {}},
HostWhitelist: map[string]struct{}{"*": {}},
AuthToken: "simple",
BcryptCost: uint(bcrypt.DefaultCost),
PreVote: false, // TODO: enable by default in v3.5
loggerMu: new(sync.RWMutex),
logger: nil,
Logger: "capnslog",
DeprecatedLogOutput: []string{DefaultLogOutput},
LogOutputs: []string{DefaultLogOutput},
Debug: false,
LogPkgLevels: "",
}
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
return cfg
} | |
c175091 | strings.Join(clusterStrs, ",")
if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.TrustedCAFile == "" {
cfg.PeerTLSInfo.ServerName = cfg.DNSCluster
}
urlsmap, err = types.NewURLsMap(clusterStr)
// only etcd member must belong to the discovered cluster.
// proxy does not need to belong to the discovered cluster.
if which == "etcd" {
if _, ok := urlsmap[cfg.Name]; !ok {
return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name)
}
}
default:
// We're statically configured, and cluster has appropriately been set.
urlsmap, err = types.NewURLsMap(cfg.InitialCluster)
}
return urlsmap, token, err
} | |
c175092 |
defaultHTTPClusterStrs, httpCerr := srv.GetCluster("http", "etcd-server"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls)
if httpCerr != nil {
clusterStrs = append(clusterStrs, defaultHTTPClusterStrs...)
}
if lg != nil {
lg.Info(
"get cluster for etcd-server SRV",
zap.String("service-scheme", "http"),
zap.String("service-name", "etcd-server"+serviceNameSuffix),
zap.String("server-name", cfg.Name),
zap.String("discovery-srv", cfg.DNSCluster),
zap.Strings("advertise-peer-urls", cfg.getAPURLs()),
zap.Strings("found-cluster", clusterStrs),
zap.Error(httpCerr),
)
}
return clusterStrs, cerr
} | |
c175093 | support /etc/hosts ?
continue
}
if net.ParseIP(host) == nil {
return fmt.Errorf("expected IP in URL for binding (%s)", url.String())
}
}
return nil
} | |
c175094 |
if terr != nil {
err = terr
continue
}
n := ""
url, ok := tcp2ap[tcpAddr.String()]
if ok {
n = name
}
if n == "" {
n = fmt.Sprintf("%d", tempName)
tempName++
}
// SRV records have a trailing dot but URL shouldn't.
shortHost := strings.TrimSuffix(srv.Target, ".")
urlHost := net.JoinHostPort(shortHost, port)
if ok && url.Scheme != scheme {
err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
} else {
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
}
}
if len(stringParts) == 0 {
return err
}
return nil
}
err := updateNodeMap(service, serviceScheme)
if err != nil {
return nil, fmt.Errorf("error querying DNS SRV records for _%s %s", service, err)
}
return stringParts, nil
} | |
c175095 |
if errHTTPS != nil && errHTTP != nil {
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
}
endpoints := make([]string, len(urls))
for i := range urls {
endpoints[i] = urls[i].String()
}
return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil
} | |
c175096 |
if serviceName != "" {
return fmt.Sprintf("%s-%s", service, serviceName)
}
return service
} | |
c175097 | names, err := dir.Readdirnames(-1)
if err != nil {
return nil, err
}
sort.Strings(names)
if op.ext != "" {
tss := make([]string, 0)
for _, v := range names {
if filepath.Ext(v) == op.ext {
tss = append(tss, v)
}
}
names = tss
}
return names, nil
} | |
c175098 | cancel()
if err != nil {
ExitWithError(ExitError, err)
}
fmt.Printf("Compacted with revision %d\n", rev)
} | |
c175099 |
if err != nil {
ExitWithError(ExitError, err)
}
fmt.Printf("Defragmented %q\n", ep)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.