_id stringlengths 2 7 | title stringlengths 1 118 | partition stringclasses 3 values | text stringlengths 52 85.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q11700 | CommitsAfterDate | train | func (repo *Repository) CommitsAfterDate(date string) (*list.List, error) {
stdout, err := NewCommand("log", _PRETTY_LOG_FORMAT, "--since="+date).RunInDirBytes(repo.Path)
if err != nil {
return nil, err
}
return repo.parsePrettyFormatLogToList(stdout)
} | go | {
"resource": ""
} |
q11701 | GetTag | train | func (repo *Repository) GetTag(name string) (*Tag, error) {
stdout, err := NewCommand("show-ref", "--tags", name).RunInDir(repo.Path)
if err != nil {
return nil, err
}
id, err := NewIDFromString(strings.Split(stdout, " ")[0])
if err != nil {
return nil, err
}
tag, err := repo.getTag(id)
if err != nil {
return nil, err
}
tag.Name = name
return tag, nil
} | go | {
"resource": ""
} |
q11702 | GetTags | train | func (repo *Repository) GetTags() ([]string, error) {
cmd := NewCommand("tag", "-l")
if version.Compare(gitVersion, "2.4.9", ">=") {
cmd.AddArguments("--sort=-creatordate")
}
stdout, err := cmd.RunInDir(repo.Path)
if err != nil {
return nil, err
}
tags := strings.Split(stdout, "\n")
tags = tags[:len(tags)-1]
if version.Compare(gitVersion, "2.4.9", "<") {
version.Sort(tags)
// Reverse order
for i := 0; i < len(tags)/2; i++ {
j := len(tags) - i - 1
tags[i], tags[j] = tags[j], tags[i]
}
}
return tags, nil
} | go | {
"resource": ""
} |
q11703 | DeleteTag | train | func (repo *Repository) DeleteTag(name string) error {
cmd := NewCommand("tag", "-d")
cmd.AddArguments(name)
_, err := cmd.RunInDir(repo.Path)
return err
} | go | {
"resource": ""
} |
q11704 | UnescapeChars | train | func UnescapeChars(in []byte) []byte {
// LEGACY [Go 1.7]: use more expressive bytes.ContainsAny
if bytes.IndexAny(in, "\\\t") == -1 {
return in
}
out := bytes.Replace(in, escapedSlash, regularSlash, -1)
out = bytes.Replace(out, escapedTab, regularTab, -1)
return out
} | go | {
"resource": ""
} |
q11705 | ListEntries | train | func (t *Tree) ListEntries() (Entries, error) {
if t.entriesParsed {
return t.entries, nil
}
t.entriesParsed = true
stdout, err := NewCommand("ls-tree", t.ID.String()).RunInDirBytes(t.repo.Path)
if err != nil {
return nil, err
}
t.entries, err = parseTreeData(t, stdout)
return t.entries, err
} | go | {
"resource": ""
} |
q11706 | IsRepoURLAccessible | train | func IsRepoURLAccessible(opts NetworkOptions) bool {
cmd := NewCommand("ls-remote", "-q", "-h", opts.URL, "HEAD")
if opts.Timeout <= 0 {
opts.Timeout = -1
}
_, err := cmd.RunTimeout(opts.Timeout)
if err != nil {
return false
}
return true
} | go | {
"resource": ""
} |
q11707 | InitRepository | train | func InitRepository(repoPath string, bare bool) error {
os.MkdirAll(repoPath, os.ModePerm)
cmd := NewCommand("init")
if bare {
cmd.AddArguments("--bare")
}
_, err := cmd.RunInDir(repoPath)
return err
} | go | {
"resource": ""
} |
q11708 | OpenRepository | train | func OpenRepository(repoPath string) (*Repository, error) {
repoPath, err := filepath.Abs(repoPath)
if err != nil {
return nil, err
} else if !isDir(repoPath) {
return nil, errors.New("no such file or directory")
}
return &Repository{
Path: repoPath,
commitCache: newObjectCache(),
tagCache: newObjectCache(),
}, nil
} | go | {
"resource": ""
} |
q11709 | Clone | train | func Clone(from, to string, opts CloneRepoOptions) (err error) {
toDir := path.Dir(to)
if err = os.MkdirAll(toDir, os.ModePerm); err != nil {
return err
}
cmd := NewCommand("clone")
if opts.Mirror {
cmd.AddArguments("--mirror")
}
if opts.Bare {
cmd.AddArguments("--bare")
}
if opts.Quiet {
cmd.AddArguments("--quiet")
}
if len(opts.Branch) > 0 {
cmd.AddArguments("-b", opts.Branch)
}
cmd.AddArguments(from, to)
if opts.Timeout <= 0 {
opts.Timeout = -1
}
_, err = cmd.RunTimeout(opts.Timeout)
return err
} | go | {
"resource": ""
} |
q11710 | Fetch | train | func Fetch(repoPath string, opts FetchRemoteOptions) error {
cmd := NewCommand("fetch")
if opts.Prune {
cmd.AddArguments("--prune")
}
if opts.Timeout <= 0 {
opts.Timeout = -1
}
_, err := cmd.RunInDirTimeout(opts.Timeout, repoPath)
return err
} | go | {
"resource": ""
} |
q11711 | Pull | train | func Pull(repoPath string, opts PullRemoteOptions) error {
cmd := NewCommand("pull")
if opts.Rebase {
cmd.AddArguments("--rebase")
}
if opts.All {
cmd.AddArguments("--all")
} else {
cmd.AddArguments(opts.Remote)
cmd.AddArguments(opts.Branch)
}
if opts.Timeout <= 0 {
opts.Timeout = -1
}
_, err := cmd.RunInDirTimeout(opts.Timeout, repoPath)
return err
} | go | {
"resource": ""
} |
q11712 | PushWithEnvs | train | func PushWithEnvs(repoPath, remote, branch string, envs []string) error {
_, err := NewCommand("push", remote, branch).AddEnvs(envs...).RunInDir(repoPath)
return err
} | go | {
"resource": ""
} |
q11713 | Push | train | func Push(repoPath, remote, branch string) error {
return PushWithEnvs(repoPath, remote, branch, nil)
} | go | {
"resource": ""
} |
q11714 | Checkout | train | func Checkout(repoPath string, opts CheckoutOptions) error {
cmd := NewCommand("checkout")
if len(opts.OldBranch) > 0 {
cmd.AddArguments("-b")
}
cmd.AddArguments(opts.Branch)
if len(opts.OldBranch) > 0 {
cmd.AddArguments(opts.OldBranch)
}
if opts.Timeout <= 0 {
opts.Timeout = -1
}
_, err := cmd.RunInDirTimeout(opts.Timeout, repoPath)
return err
} | go | {
"resource": ""
} |
q11715 | ResetHEAD | train | func ResetHEAD(repoPath string, hard bool, revision string) error {
cmd := NewCommand("reset")
if hard {
cmd.AddArguments("--hard")
}
_, err := cmd.AddArguments(revision).RunInDir(repoPath)
return err
} | go | {
"resource": ""
} |
q11716 | MoveFile | train | func MoveFile(repoPath, oldTreeName, newTreeName string) error {
_, err := NewCommand("mv").AddArguments(oldTreeName, newTreeName).RunInDir(repoPath)
return err
} | go | {
"resource": ""
} |
q11717 | GetRepoSize | train | func GetRepoSize(repoPath string) (*CountObject, error) {
cmd := NewCommand("count-objects", "-v")
stdout, err := cmd.RunInDir(repoPath)
if err != nil {
return nil, err
}
countObject := new(CountObject)
for _, line := range strings.Split(stdout, "\n") {
switch {
case strings.HasPrefix(line, _STAT_COUNT):
countObject.Count = com.StrTo(line[7:]).MustInt64()
case strings.HasPrefix(line, _STAT_SIZE):
countObject.Size = com.StrTo(line[6:]).MustInt64() * 1024
case strings.HasPrefix(line, _STAT_IN_PACK):
countObject.InPack = com.StrTo(line[9:]).MustInt64()
case strings.HasPrefix(line, _STAT_PACKS):
countObject.Packs = com.StrTo(line[7:]).MustInt64()
case strings.HasPrefix(line, _STAT_SIZE_PACK):
countObject.SizePack = com.StrTo(line[11:]).MustInt64() * 1024
case strings.HasPrefix(line, _STAT_PRUNE_PACKABLE):
countObject.PrunePackable = com.StrTo(line[16:]).MustInt64()
case strings.HasPrefix(line, _STAT_GARBAGE):
countObject.Garbage = com.StrTo(line[9:]).MustInt64()
case strings.HasPrefix(line, _STAT_SIZE_GARBAGE):
countObject.SizeGarbage = com.StrTo(line[14:]).MustInt64() * 1024
}
}
return countObject, nil
} | go | {
"resource": ""
} |
q11718 | UsingClient | train | func UsingClient(httpClient *http.Client) Option {
return func(c *Client) {
if httpClient == nil {
return
}
transport := getTransportLayer(httpClient, 0)
httpClient.Transport = transport
c.client = httpClient
}
} | go | {
"resource": ""
} |
q11719 | NewClient | train | func NewClient(baseURL string, options ...Option) (*Client, error) {
baseURL = formatBaseURL(baseURL)
if _, err := url.Parse(baseURL); err != nil {
return nil, err
}
c := &Client{baseURL: baseURL}
for _, opt := range options {
opt(c)
}
if c.client == nil {
httpClient := &http.Client{}
UsingClient(httpClient)(c)
}
return c, nil
} | go | {
"resource": ""
} |
q11720 | IsSubjectNotFound | train | func IsSubjectNotFound(err error) bool {
if err == nil {
return false
}
if resErr, ok := err.(ResourceError); ok {
return resErr.ErrorCode == subjectNotFoundCode
}
return false
} | go | {
"resource": ""
} |
q11721 | IsSchemaNotFound | train | func IsSchemaNotFound(err error) bool {
if err == nil {
return false
}
if resErr, ok := err.(ResourceError); ok {
return resErr.ErrorCode == schemaNotFoundCode
}
return false
} | go | {
"resource": ""
} |
q11722 | Versions | train | func (c *Client) Versions(subject string) (versions []int, err error) {
if subject == "" {
err = errRequired("subject")
return
}
// # List all versions of a particular subject
// GET /subjects/(string: subject)/versions
path := fmt.Sprintf(subjectPath, subject+"/versions")
resp, respErr := c.do(http.MethodGet, path, "", nil)
if respErr != nil {
err = respErr
return
}
err = c.readJSON(resp, &versions)
return
} | go | {
"resource": ""
} |
q11723 | IsRegistered | train | func (c *Client) IsRegistered(subject, schema string) (bool, Schema, error) {
var fs Schema
sc := schemaOnlyJSON{schema}
send, err := json.Marshal(sc)
if err != nil {
return false, fs, err
}
path := fmt.Sprintf(subjectPath, subject)
resp, err := c.do(http.MethodPost, path, "", send)
if err != nil {
// schema not found?
if IsSchemaNotFound(err) {
return false, fs, nil
}
// error?
return false, fs, err
}
if err = c.readJSON(resp, &fs); err != nil {
return true, fs, err // found but error when unmarshal.
}
// so we have a schema.
return true, fs, nil
} | go | {
"resource": ""
} |
q11724 | GetSchemaBySubject | train | func (c *Client) GetSchemaBySubject(subject string, versionID int) (Schema, error) {
return c.getSubjectSchemaAtVersion(subject, versionID)
} | go | {
"resource": ""
} |
q11725 | getConfigSubject | train | func (c *Client) getConfigSubject(subject string) (Config, error) {
var err error
var config = Config{}
path := fmt.Sprintf("/config/%s", subject)
resp, respErr := c.do(http.MethodGet, path, "", nil)
if respErr != nil && respErr.(ResourceError).ErrorCode != 404 {
return config, respErr
}
if resp != nil {
err = c.readJSON(resp, &config)
}
return config, err
} | go | {
"resource": ""
} |
q11726 | IsSchemaCompatible | train | func (c *Client) IsSchemaCompatible(subject string, avroSchema string, versionID int) (bool, error) {
return c.isSchemaCompatibleAtVersion(subject, avroSchema, versionID)
} | go | {
"resource": ""
} |
q11727 | Set | train | func (i *Int64Value) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 64)
*i = Int64Value(v)
return err
} | go | {
"resource": ""
} |
q11728 | Set | train | func (i *UintValue) Set(s string) error {
v, err := strconv.ParseUint(s, 0, 64)
*i = UintValue(v)
return err
} | go | {
"resource": ""
} |
q11729 | Set | train | func (s *StringValue) Set(val string) error {
*s = StringValue(val)
return nil
} | go | {
"resource": ""
} |
q11730 | Set | train | func (d *Duration) Set(s string) error {
if v, err := strconv.ParseInt(s, 10, 64); err == nil {
*d = Duration(time.Duration(v) * time.Second)
return nil
}
v, err := time.ParseDuration(s)
*d = Duration(v)
return err
} | go | {
"resource": ""
} |
q11731 | UnmarshalText | train | func (d *Duration) UnmarshalText(text []byte) error {
return d.Set(string(text))
} | go | {
"resource": ""
} |
q11732 | MarshalJSON | train | func (d *Duration) MarshalJSON() ([]byte, error) {
return json.Marshal(time.Duration(*d))
} | go | {
"resource": ""
} |
q11733 | UnmarshalJSON | train | func (d *Duration) UnmarshalJSON(text []byte) error {
if v, err := strconv.ParseInt(string(text), 10, 64); err == nil {
*d = Duration(time.Duration(v))
return nil
}
// We use json unmarshal on value because we have the quoted version
var value string
err := json.Unmarshal(text, &value)
if err != nil {
return err
}
v, err := time.ParseDuration(value)
*d = Duration(v)
return err
} | go | {
"resource": ""
} |
q11734 | Set | train | func (t *TimeValue) Set(s string) error {
v, err := time.Parse(time.RFC3339, s)
*t = TimeValue(v)
return err
} | go | {
"resource": ""
} |
q11735 | Set | train | func (s *SliceStrings) Set(str string) error {
fargs := func(c rune) bool {
return c == ',' || c == ';'
}
// get function
slice := strings.FieldsFunc(str, fargs)
*s = append(*s, slice...)
return nil
} | go | {
"resource": ""
} |
q11736 | getTypesRecursive | train | func getTypesRecursive(objValue reflect.Value, flagMap map[string]reflect.StructField, key string) error {
name := key
switch objValue.Kind() {
case reflect.Struct:
for i := 0; i < objValue.NumField(); i++ {
if objValue.Type().Field(i).Anonymous {
if err := getTypesRecursive(objValue.Field(i), flagMap, name); err != nil {
return err
}
} else if len(objValue.Type().Field(i).Tag.Get("description")) > 0 {
fieldName := objValue.Type().Field(i).Name
if !isExported(fieldName) {
return fmt.Errorf("field %s is an unexported field", fieldName)
}
if tag := objValue.Type().Field(i).Tag.Get("long"); len(tag) > 0 {
fieldName = tag
}
if len(key) == 0 {
name = strings.ToLower(fieldName)
} else {
name = key + "." + strings.ToLower(fieldName)
}
if _, ok := flagMap[name]; ok {
return fmt.Errorf("tag already exists: %s", name)
}
flagMap[name] = objValue.Type().Field(i)
if err := getTypesRecursive(objValue.Field(i), flagMap, name); err != nil {
return err
}
}
}
case reflect.Ptr:
if len(key) > 0 {
field := flagMap[name]
field.Type = reflect.TypeOf(false)
flagMap[name] = field
}
typ := objValue.Type().Elem()
inst := reflect.New(typ).Elem()
if err := getTypesRecursive(inst, flagMap, name); err != nil {
return err
}
default:
return nil
}
return nil
} | go | {
"resource": ""
} |
q11737 | GetBoolFlags | train | func GetBoolFlags(config interface{}) ([]string, error) {
flagMap := make(map[string]reflect.StructField)
if err := getTypesRecursive(reflect.ValueOf(config), flagMap, ""); err != nil {
return []string{}, err
}
flags := make([]string, 0, len(flagMap))
for f, structField := range flagMap {
if structField.Type.Kind() == reflect.Bool {
flags = append(flags, f)
}
}
return flags, nil
} | go | {
"resource": ""
} |
q11738 | GetFlags | train | func GetFlags(config interface{}) ([]string, error) {
flagMap := make(map[string]reflect.StructField)
if err := getTypesRecursive(reflect.ValueOf(config), flagMap, ""); err != nil {
return []string{}, err
}
flags := make([]string, 0, len(flagMap))
for f := range flagMap {
flags = append(flags, f)
}
return flags, nil
} | go | {
"resource": ""
} |
q11739 | setPointersNil | train | func setPointersNil(objValue reflect.Value) (reflect.Value, error) {
switch {
case objValue.Kind() != reflect.Ptr:
return objValue, fmt.Errorf("parameters objValue must be a not-nil pointer on a struct, not a %s", objValue.Kind())
case objValue.IsNil():
return objValue, errors.New("parameters objValue must be a not-nil pointer")
case objValue.Elem().Kind() != reflect.Struct:
// fmt.Printf("Parameters objValue must be a not-nil pointer on a struct, not a pointer on a %s\n", objValue.Elem().Kind().String())
return objValue, nil
}
// Clone
starObjValue := objValue.Elem()
nilPointersObjVal := reflect.New(starObjValue.Type())
starNilPointersObjVal := nilPointersObjVal.Elem()
starNilPointersObjVal.Set(starObjValue)
for i := 0; i < nilPointersObjVal.Elem().NumField(); i++ {
if field := nilPointersObjVal.Elem().Field(i); field.Kind() == reflect.Ptr && field.CanSet() {
field.Set(reflect.Zero(field.Type()))
}
}
return nilPointersObjVal, nil
} | go | {
"resource": ""
} |
q11740 | setFields | train | func setFields(fieldValue reflect.Value, val parse.Parser) error {
if fieldValue.CanSet() {
fieldValue.Set(reflect.ValueOf(val).Elem().Convert(fieldValue.Type()))
} else {
return fmt.Errorf("%s is not settable", fieldValue.Type().String())
}
return nil
} | go | {
"resource": ""
} |
q11741 | PrintHelp | train | func PrintHelp(flagMap map[string]reflect.StructField, defaultValmap map[string]reflect.Value, parsers map[reflect.Type]parse.Parser) error {
return PrintHelpWithCommand(flagMap, defaultValmap, parsers, nil, nil)
} | go | {
"resource": ""
} |
q11742 | PrintError | train | func PrintError(err error, flagMap map[string]reflect.StructField, defaultValmap map[string]reflect.Value, parsers map[reflect.Type]parse.Parser) error {
if err != flag.ErrHelp {
fmt.Printf("Error: %s\n", err)
}
if !strings.Contains(err.Error(), ":No parser for type") {
_ = PrintHelp(flagMap, defaultValmap, parsers)
}
return err
} | go | {
"resource": ""
} |
q11743 | PrintHelpWithCommand | train | func PrintHelpWithCommand(flagMap map[string]reflect.StructField, defaultValMap map[string]reflect.Value, parsers map[reflect.Type]parse.Parser, cmd *Command, subCmd []*Command) error {
// Hide command from help
if cmd != nil && cmd.HideHelp {
return fmt.Errorf("command %s not found", cmd.Name)
}
// Define a templates
// Using POSXE STD : http://pubs.opengroup.org/onlinepubs/9699919799/
const helper = `{{if .ProgDescription}}{{.ProgDescription}}
{{end}}Usage: {{.ProgName}} [flags] <command> [<arguments>]
Use "{{.ProgName}} <command> --help" for help on any command.
{{if .SubCommands}}
Commands:{{range $subCmdName, $subCmdDesc := .SubCommands}}
{{printf "\t%-50s %s" $subCmdName $subCmdDesc}}{{end}}
{{end}}
Flag's usage: {{.ProgName}} [--flag=flag_argument] [-f[flag_argument]] ... set flag_argument to flag(s)
or: {{.ProgName}} [--flag[=true|false| ]] [-f[true|false| ]] ... set true/false to boolean flag(s)
Flags:
`
// Use a struct to give data to template
type TempStruct struct {
ProgName string
ProgDescription string
SubCommands map[string]string
}
tempStruct := TempStruct{}
if cmd != nil {
tempStruct.ProgName = cmd.Name
tempStruct.ProgDescription = cmd.Description
tempStruct.SubCommands = map[string]string{}
if len(subCmd) > 1 && cmd == subCmd[0] {
for _, c := range subCmd[1:] {
if !c.HideHelp {
tempStruct.SubCommands[c.Name] = c.Description
}
}
}
} else {
_, tempStruct.ProgName = path.Split(os.Args[0])
}
// Run Template
tmplHelper, err := template.New("helper").Parse(helper)
if err != nil {
return err
}
err = tmplHelper.Execute(os.Stdout, tempStruct)
if err != nil {
return err
}
return printFlagsDescriptionsDefaultValues(flagMap, defaultValMap, parsers, os.Stdout)
} | go | {
"resource": ""
} |
q11744 | PrintErrorWithCommand | train | func PrintErrorWithCommand(err error, flagMap map[string]reflect.StructField, defaultValMap map[string]reflect.Value, parsers map[reflect.Type]parse.Parser, cmd *Command, subCmd []*Command) error {
if err != flag.ErrHelp {
fmt.Printf("Error here : %s\n", err)
}
if errHelp := PrintHelpWithCommand(flagMap, defaultValMap, parsers, cmd, subCmd); errHelp != nil {
return errHelp
}
return err
} | go | {
"resource": ""
} |
q11745 | New | train | func New(rootCommand *Command, args []string) *Flaeg {
var f Flaeg
f.commands = []*Command{rootCommand}
f.args = args
f.customParsers = map[reflect.Type]parse.Parser{}
return &f
} | go | {
"resource": ""
} |
q11746 | AddCommand | train | func (f *Flaeg) AddCommand(command *Command) {
f.commands = append(f.commands, command)
} | go | {
"resource": ""
} |
q11747 | AddParser | train | func (f *Flaeg) AddParser(typ reflect.Type, parser parse.Parser) {
f.customParsers[typ] = parser
} | go | {
"resource": ""
} |
q11748 | Run | train | func (f *Flaeg) Run() error {
if f.calledCommand == nil {
if _, _, err := f.findCommandWithCommandArgs(); err != nil {
return err
}
}
if _, err := f.Parse(f.calledCommand); err != nil {
return err
}
return f.calledCommand.Run()
} | go | {
"resource": ""
} |
q11749 | HealthCheck | train | func (d *DockerDiscovery) HealthCheck(svc *service.Service) (string, string) {
container, err := d.inspectContainer(svc)
if err != nil {
return "", ""
}
return container.Config.Labels["HealthCheck"], container.Config.Labels["HealthCheckArgs"]
} | go | {
"resource": ""
} |
q11750 | Run | train | func (d *DockerDiscovery) Run(looper director.Looper) {
connQuitChan := make(chan bool)
go d.manageConnection(connQuitChan)
go func() {
// Loop around, process any events which came in, and
// periodically fetch the whole container list
looper.Loop(func() error {
select {
case event := <-d.events:
if event == nil {
// This usually happens because of a Docker restart.
// Sleep, let us reconnect in the background, then loop.
return nil
}
log.Debugf("Event: %#v\n", event)
d.handleEvent(*event)
case <-time.After(d.sleepInterval):
d.getContainers()
case <-time.After(CacheDrainInterval):
d.containerCache.Drain(len(d.services))
}
return nil
})
// Propagate quit channel message
close(connQuitChan)
}()
} | go | {
"resource": ""
} |
q11751 | Services | train | func (d *DockerDiscovery) Services() []service.Service {
d.RLock()
defer d.RUnlock()
svcList := make([]service.Service, len(d.services))
for i, svc := range d.services {
svcList[i] = *svc
}
return svcList
} | go | {
"resource": ""
} |
q11752 | Listeners | train | func (d *DockerDiscovery) Listeners() []ChangeListener {
var listeners []ChangeListener
for _, cntnr := range d.services {
container, err := d.inspectContainer(cntnr)
if err != nil {
continue
}
listener := d.listenerForContainer(container)
if listener != nil {
listeners = append(listeners, *listener)
}
}
return listeners
} | go | {
"resource": ""
} |
q11753 | listenerForContainer | train | func (d *DockerDiscovery) listenerForContainer(cntnr *docker.Container) *ChangeListener {
// See if the container has the SidecarListener label, which
// will tell us the ServicePort of the port that should be
// subscribed to Sidecar events.
svcPortStr, ok := cntnr.Config.Labels["SidecarListener"]
if !ok {
return nil
}
// Be careful about ID matching
id := cntnr.ID
if len(id) > 12 {
id = id[:12]
}
svc := d.findServiceByID(id)
if svc == nil {
return nil
}
listenPort := portForServicePort(svc, svcPortStr, "tcp") // We only do HTTP (TCP)
// -1 is returned when there is no match
if listenPort == nil {
log.Warnf(
"SidecarListener label found on %s, but no matching ServicePort! '%s'",
svc.ID, svcPortStr,
)
return nil
}
return &ChangeListener{
Name: svc.ListenerName(),
Url: fmt.Sprintf("http://%s:%d/sidecar/update", listenPort.IP, listenPort.Port),
}
} | go | {
"resource": ""
} |
q11754 | portForServicePort | train | func portForServicePort(svc *service.Service, portStr string, pType string) *service.Port {
// Look up the ServicePort and translate to Docker port
svcPort, err := strconv.ParseInt(portStr, 10, 64)
if err != nil {
log.Warnf(
"SidecarListener label found on %s, can't decode port '%s'",
svc.ID, portStr,
)
return nil
}
for _, port := range svc.Ports {
if port.ServicePort == svcPort && port.Type == pType {
return &port
}
}
return nil
} | go | {
"resource": ""
} |
q11755 | Drain | train | func (c *ContainerCache) Drain(newSize int) {
c.Lock()
defer c.Unlock()
// Make a new one, leave the old one for GC
c.cache = make(map[string]*docker.Container, newSize)
} | go | {
"resource": ""
} |
q11756 | Prune | train | func (c *ContainerCache) Prune(liveContainers map[string]interface{}) {
c.Lock()
defer c.Unlock()
for id := range c.cache {
if _, ok := liveContainers[id]; !ok {
delete(c.cache, id)
}
}
} | go | {
"resource": ""
} |
q11757 | Get | train | func (c *ContainerCache) Get(svcID string) *docker.Container {
c.RLock()
defer c.RUnlock()
if container, ok := c.cache[svcID]; ok {
return container
}
return nil
} | go | {
"resource": ""
} |
q11758 | HealthCheck | train | func (d *MultiDiscovery) HealthCheck(svc *service.Service) (string, string) {
for _, disco := range d.Discoverers {
if healthCheck, healthCheckArgs := disco.HealthCheck(svc); healthCheck != "" {
return healthCheck, healthCheckArgs
}
}
return "", ""
} | go | {
"resource": ""
} |
q11759 | Services | train | func (d *MultiDiscovery) Services() []service.Service {
var aggregate []service.Service
for _, disco := range d.Discoverers {
services := disco.Services()
if len(services) > 0 {
aggregate = append(aggregate, services...)
}
}
return aggregate
} | go | {
"resource": ""
} |
q11760 | ShouldNotify | train | func ShouldNotify(oldStatus int, newStatus int) bool {
log.Debugf("Checking event. OldStatus: %s NewStatus: %s",
service.StatusString(oldStatus), service.StatusString(newStatus),
)
// Compare old and new states to find significant changes only
switch newStatus {
case service.ALIVE:
return true
case service.TOMBSTONE:
return true
case service.UNKNOWN:
if oldStatus == service.ALIVE {
return true
}
case service.UNHEALTHY:
if oldStatus == service.ALIVE {
return true
}
default:
log.Errorf("Got unknown service change status: %d", newStatus)
return false
}
log.Debugf("Skipped HAproxy update due to state machine check")
return false
} | go | {
"resource": ""
} |
q11761 | FetchState | train | func FetchState(url string) (*catalog.ServicesState, error) {
client := &http.Client{Timeout: 5 * time.Second}
resp, err := client.Get(url)
if err != nil {
return nil, err
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
return nil, fmt.Errorf("Bad status code on state fetch: %d", resp.StatusCode)
}
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
state, err := catalog.Decode(bytes)
if err != nil {
return nil, err
}
return state, nil
} | go | {
"resource": ""
} |
q11762 | IsSubscribed | train | func (rcvr *Receiver) IsSubscribed(svcName string) bool {
// If we didn't specify any specifically, then we want them all
if len(rcvr.Subscriptions) < 1 {
return true
}
for _, subName := range rcvr.Subscriptions {
if subName == svcName {
return true
}
}
return false
} | go | {
"resource": ""
} |
q11763 | Subscribe | train | func (rcvr *Receiver) Subscribe(svcName string) {
for _, subName := range rcvr.Subscriptions {
if subName == svcName {
return
}
}
rcvr.Subscriptions = append(rcvr.Subscriptions, svcName)
} | go | {
"resource": ""
} |
q11764 | ProcessUpdates | train | func (rcvr *Receiver) ProcessUpdates() {
if rcvr.Looper == nil {
log.Error("Unable to ProcessUpdates(), Looper is nil in receiver!")
return
}
rcvr.Looper.Loop(func() error {
// Batch up to RELOAD_BUFFER number updates into a
// single update.
first := <-rcvr.ReloadChan
pending := len(rcvr.ReloadChan)
// Call the callback
if rcvr.OnUpdate == nil {
log.Error("OnUpdate() callback not defined!")
} else {
rcvr.StateLock.Lock()
// Copy the state while locked so we don't have it change
// under us while writing and we don't hold onto the lock the
// whole time we're writing to disk (e.g. in haproxy-api).
var tmpState *catalog.ServicesState
tmpState = deepcopy.Copy(rcvr.CurrentState).(*catalog.ServicesState)
rcvr.StateLock.Unlock()
rcvr.OnUpdate(tmpState)
}
// We just flushed the most recent state, dump all the
// pending items up to that point.
var reload time.Time
for i := 0; i < pending; i++ {
reload = <-rcvr.ReloadChan
}
if pending > 0 {
log.Infof("Skipped %d messages between %s and %s", pending, first, reload)
}
// Don't notify more frequently than every RELOAD_HOLD_DOWN period. When a
// deployment rolls across the cluster it can trigger a bunch of groupable
// updates. The Looper handles the sleep after the return nil.
log.Debug("Holding down...")
return nil
})
} | go | {
"resource": ""
} |
q11765 | FetchInitialState | train | func (rcvr *Receiver) FetchInitialState(stateUrl string) error {
rcvr.StateLock.Lock()
defer rcvr.StateLock.Unlock()
log.Info("Fetching initial state on startup...")
state, err := FetchState(stateUrl)
if err != nil {
return err
} else {
log.Info("Successfully retrieved state")
rcvr.CurrentState = state
if rcvr.OnUpdate == nil {
log.Error("OnUpdate() callback not defined!")
} else {
rcvr.OnUpdate(state)
}
}
return nil
} | go | {
"resource": ""
} |
q11766 | configureOverrides | train | func configureOverrides(config *Config, opts *CliOpts) {
if len(*opts.AdvertiseIP) > 0 {
config.Sidecar.AdvertiseIP = *opts.AdvertiseIP
}
if len(*opts.ClusterIPs) > 0 {
config.Sidecar.Seeds = *opts.ClusterIPs
}
if len(*opts.ClusterName) > 0 {
config.Sidecar.ClusterName = *opts.ClusterName
}
if len(*opts.Discover) > 0 {
config.Sidecar.Discovery = *opts.Discover
}
if len(*opts.LoggingLevel) > 0 {
config.Sidecar.LoggingLevel = *opts.LoggingLevel
}
} | go | {
"resource": ""
} |
q11767 | configureDelegate | train | func configureDelegate(state *catalog.ServicesState, config *Config) *servicesDelegate {
delegate := NewServicesDelegate(state)
delegate.Metadata = NodeMetadata{
ClusterName: config.Sidecar.ClusterName,
State: "Running",
}
delegate.Start()
return delegate
} | go | {
"resource": ""
} |
q11768 | configureCpuProfiler | train | func configureCpuProfiler(opts *CliOpts) {
if !*opts.CpuProfile {
return
}
var profilerFile os.File
// Capture CTRL-C and stop the CPU profiler
sigChannel := make(chan os.Signal, 1)
signal.Notify(sigChannel, os.Interrupt)
go func() {
for sig := range sigChannel {
log.Printf("Captured %v, stopping profiler and exiting..", sig)
pprof.StopCPUProfile()
profilerFile.Close()
os.Exit(0)
}
}()
// Enable CPU profiling support if requested
if *opts.CpuProfile {
profilerFile, err := os.Create("sidecar.cpu.prof")
exitWithError(err, "Can't write profiling file")
pprof.StartCPUProfile(profilerFile)
log.Debug("Profiling!")
}
} | go | {
"resource": ""
} |
q11769 | configureLoggingFormat | train | func configureLoggingFormat(config *Config) {
if config.Sidecar.LoggingFormat == "json" {
log.SetFormatter(&log.JSONFormatter{})
} else {
// Default to verbose timestamping
log.SetFormatter(&log.TextFormatter{FullTimestamp: true})
}
} | go | {
"resource": ""
} |
q11770 | configureListeners | train | func configureListeners(config *Config, state *catalog.ServicesState) {
for _, url := range config.Listeners.Urls {
listener := catalog.NewUrlListener(url, false)
listener.Watch(state)
}
} | go | {
"resource": ""
} |
q11771 | NewServer | train | func NewServer(name string) *Server {
server := &Server{
Name: name,
// Pre-create for 5 services per host
Services: make(map[string]*service.Service, 5),
LastUpdated: time.Unix(0, 0),
LastChanged: time.Unix(0, 0),
}
return server
} | go | {
"resource": ""
} |
q11772 | NewServicesState | train | func NewServicesState() *ServicesState {
var err error
state := &ServicesState{
Servers: make(map[string]*Server, 5),
Broadcasts: make(chan [][]byte),
LastChanged: time.Unix(0, 0),
tombstoneRetransmit: TOMBSTONE_RETRANSMIT,
ServiceMsgs: make(chan service.Service, 25),
listeners: make(map[string]Listener),
}
state.Hostname, err = os.Hostname()
if err != nil {
log.Errorf("Error getting hostname! %s", err.Error())
}
return state
} | go | {
"resource": ""
} |
q11773 | ProcessServiceMsgs | train | func (state *ServicesState) ProcessServiceMsgs(looper director.Looper) {
looper.Loop(func() error {
service := <-state.ServiceMsgs
state.AddServiceEntry(service)
return nil
})
} | go | {
"resource": ""
} |
q11774 | HasServer | train | func (state *ServicesState) HasServer(hostname string) bool {
_, ok := state.Servers[hostname]
return ok
} | go | {
"resource": ""
} |
q11775 | ExpireServer | train | func (state *ServicesState) ExpireServer(hostname string) {
if !state.HasServer(hostname) || len(state.Servers[hostname].Services) == 0 {
log.Infof("No records to expire for %s", hostname)
return
}
hasLiveServices := false
for _, svc := range state.Servers[hostname].Services {
if !svc.IsTombstone() {
hasLiveServices = true
break
}
}
if !hasLiveServices {
log.Infof("No records to expire for %s (no live services)", hostname)
return
}
log.Infof("Expiring %s", hostname)
var tombstones []service.Service
for _, svc := range state.Servers[hostname].Services {
previousStatus := svc.Status
state.ServiceChanged(svc, previousStatus, svc.Updated)
svc.Tombstone()
tombstones = append(tombstones, *svc)
}
if len(tombstones) < 1 {
log.Warn("Tried to announce a zero length list of tombstones")
return
}
state.SendServices(
tombstones,
director.NewTimedLooper(TOMBSTONE_COUNT, state.tombstoneRetransmit, nil),
)
} | go | {
"resource": ""
} |
q11776 | ServiceChanged | train | func (state *ServicesState) ServiceChanged(svc *service.Service, previousStatus int, updated time.Time) {
state.serverChanged(svc.Hostname, updated)
state.NotifyListeners(svc, previousStatus, state.LastChanged)
} | go | {
"resource": ""
} |
q11777 | AddListener | train | func (state *ServicesState) AddListener(listener Listener) {
if listener.Chan() == nil {
log.Errorf("Refusing to add listener %s with nil channel!", listener.Name())
return
}
if cap(listener.Chan()) < 1 {
log.Errorf("Refusing to add blocking channel as listener: %s", listener.Name())
return
}
state.Lock()
defer state.Unlock()
state.listeners[listener.Name()] = listener
log.Debugf("AddListener(): added %s, new count %d", listener.Name(), len(state.listeners))
} | go | {
"resource": ""
} |
q11778 | RemoveListener | train | func (state *ServicesState) RemoveListener(name string) error {
state.Lock()
defer state.Unlock()
if _, ok := state.listeners[name]; ok {
delete(state.listeners, name)
log.Debugf("RemoveListener(): removed %s, new count %d", name, len(state.listeners))
return nil
}
return fmt.Errorf("No listener found with the name: %s", name)
} | go | {
"resource": ""
} |
q11779 | GetListeners | train | func (state *ServicesState) GetListeners() []Listener {
state.RLock()
var listeners []Listener
for _, listener := range state.listeners {
listeners = append(listeners, listener)
}
state.RUnlock()
return listeners
} | go | {
"resource": ""
} |
q11780 | AddServiceEntry | train | func (state *ServicesState) AddServiceEntry(newSvc service.Service) {
defer metrics.MeasureSince([]string{"services_state", "AddServiceEntry"}, time.Now())
state.Lock()
defer state.Unlock()
if !state.HasServer(newSvc.Hostname) {
state.Servers[newSvc.Hostname] = NewServer(newSvc.Hostname)
}
server := state.Servers[newSvc.Hostname]
// Only apply changes that are newer or services are missing
if !server.HasService(newSvc.ID) {
server.Services[newSvc.ID] = &newSvc
state.ServiceChanged(&newSvc, service.UNKNOWN, newSvc.Updated)
state.retransmit(newSvc)
} else if newSvc.Invalidates(server.Services[newSvc.ID]) {
// We have to set these even if the status did not change
server.LastUpdated = newSvc.Updated
// Store the previous newSvc so we can compare it
oldEntry := server.Services[newSvc.ID]
// Update the new one
server.Services[newSvc.ID] = &newSvc
// When the status changes, the SeviceChanged() method will
// update all the accounting fields in the state and Server newSvc.
if oldEntry.Status != newSvc.Status {
state.ServiceChanged(&newSvc, oldEntry.Status, newSvc.Updated)
}
// We tell our gossip peers about the updated service
// by sending them the record. We're saved from an endless
// retransmit loop by the Invalidates() call above.
state.retransmit(newSvc)
}
} | go | {
"resource": ""
} |
q11781 | Merge | train | func (state *ServicesState) Merge(otherState *ServicesState) {
for _, server := range otherState.Servers {
for _, service := range server.Services {
state.ServiceMsgs <- *service
}
}
} | go | {
"resource": ""
} |
q11782 | retransmit | train | func (state *ServicesState) retransmit(svc service.Service) {
// We don't retransmit our own events! We're already
// transmitting them.
if svc.Hostname == state.Hostname {
return
}
go func() {
encoded, err := svc.Encode()
if err != nil {
log.Errorf("ERROR encoding message to forward: (%s)", err.Error())
return
}
state.Broadcasts <- [][]byte{encoded}
}()
} | go | {
"resource": ""
} |
q11783 | Print | train | func (state *ServicesState) Print(list *memberlist.Memberlist) {
log.Println(state.Format(list))
} | go | {
"resource": ""
} |
q11784 | TrackNewServices | train | func (state *ServicesState) TrackNewServices(fn func() []service.Service, looper director.Looper) {
looper.Loop(func() error {
for _, container := range fn() {
state.ServiceMsgs <- container
}
return nil
})
} | go | {
"resource": ""
} |
q11785 | TrackLocalListeners | train | func (state *ServicesState) TrackLocalListeners(fn func() []Listener, looper director.Looper) {
looper.Loop(func() error {
discovered := fn()
// Add new listeners
for _, listener := range discovered {
state.RLock()
_, ok := state.listeners[listener.Name()]
state.RUnlock()
if !ok {
log.Infof("Adding listener %s because it was just discovered", listener.Name())
urlListener, ok := listener.(*UrlListener)
if ok {
urlListener.Watch(state)
} else {
state.AddListener(listener)
}
}
}
// Remove old ones
listeners := state.listeners
for _, listener := range listeners {
if listener.Managed() && !containsListener(discovered, listener.Name()) {
log.Infof("Removing listener %s because the service appears to be gone", listener.Name())
urlListener, ok := listener.(*UrlListener)
if ok {
log.Infof("Stopping UrlListener %s", listener.Name())
urlListener.Stop()
}
state.RemoveListener(listener.Name())
}
}
return nil
})
} | go | {
"resource": ""
} |
q11786 | IsNewService | train | func (state *ServicesState) IsNewService(svc *service.Service) bool {
var found *service.Service
if state.HasServer(svc.Hostname) {
found = state.Servers[svc.Hostname].Services[svc.ID]
}
if found == nil || (!svc.IsTombstone() && svc.Status != found.Status) {
return true
}
return false
} | go | {
"resource": ""
} |
q11787 | BroadcastServices | train | func (state *ServicesState) BroadcastServices(fn func() []service.Service, looper director.Looper) {
lastTime := time.Unix(0, 0)
looper.Loop(func() error {
defer metrics.MeasureSince([]string{"services_state", "BroadcastServices"}, time.Now())
var services []service.Service
haveNewServices := false
servicesList := fn()
state.RLock()
defer state.RUnlock()
for _, svc := range servicesList {
isNew := state.IsNewService(&svc)
// We'll broadcast it now if it's new or we've hit refresh window
if isNew {
log.Debug("Found service changes in BroadcastServices()")
haveNewServices = true
services = append(services, svc)
// Check that refresh window... is it time?
} else if time.Now().UTC().Add(0 - ALIVE_BROADCAST_INTERVAL).After(lastTime) {
services = append(services, svc)
}
}
if len(services) > 0 {
log.Debug("Starting to broadcast")
// Figure out how many times to announce the service. New services get more announcements.
runCount := 1
if haveNewServices {
runCount = ALIVE_COUNT
}
lastTime = time.Now().UTC()
state.SendServices(
services,
director.NewTimedLooper(runCount, state.tombstoneRetransmit, nil),
)
log.Debug("Completing broadcast")
} else {
// We expect there to always be _something_ in the channel
// once we've run.
state.Broadcasts <- nil
}
return nil
})
} | go | {
"resource": ""
} |
q11788 | SendServices | train | func (state *ServicesState) SendServices(services []service.Service, looper director.Looper) {
// Announce these every second for awhile
go func() {
defer metrics.MeasureSince([]string{"services_state", "SendServices"}, time.Now())
additionalTime := 0 * time.Second
looper.Loop(func() error {
var prepared [][]byte
for _, svc := range services {
svc.Updated = svc.Updated.Add(additionalTime)
encoded, err := svc.Encode()
if err != nil {
log.Errorf("ERROR encoding container: (%s)", err.Error())
}
prepared = append(prepared, encoded)
}
// We add time to make sure that these get retransmitted by peers.
// Otherwise they aren't "new" messages and don't get retransmitted.
additionalTime = additionalTime + 50*time.Nanosecond
state.Broadcasts <- prepared // Put it on the wire
return nil
})
}()
} | go | {
"resource": ""
} |
q11789 | ByService | train | func (state *ServicesState) ByService() map[string][]*service.Service {
serviceMap := make(map[string][]*service.Service)
state.EachServiceSorted(
func(hostname *string, serviceId *string, svc *service.Service) {
serviceMap[svc.Name] = append(serviceMap[svc.Name], svc)
},
)
return serviceMap
} | go | {
"resource": ""
} |
q11790 | Decode | train | func Decode(data []byte) (*ServicesState, error) {
newState := NewServicesState()
err := newState.UnmarshalJSON(data)
if err != nil {
log.Errorf("Error decoding state! (%s)", err.Error())
}
return newState, err
} | go | {
"resource": ""
} |
q11791 | New | train | func New(configFile string, pidFile string) *HAproxy {
reloadCmd := "haproxy -f " + configFile + " -p " + pidFile + " `[[ -f " + pidFile + " ]] && echo \"-sf $(cat " + pidFile + ")\"]]`"
verifyCmd := "haproxy -c -f " + configFile
proxy := HAproxy{
ReloadCmd: reloadCmd,
VerifyCmd: verifyCmd,
Template: "views/haproxy.cfg",
ConfigFile: configFile,
PidFile: pidFile,
}
return &proxy
} | go | {
"resource": ""
} |
q11792 | sanitizeName | train | func sanitizeName(image string) string {
replace := regexp.MustCompile("[^a-z0-9-]")
return replace.ReplaceAllString(image, "-")
} | go | {
"resource": ""
} |
q11793 | findPortForService | train | func findPortForService(svcPort string, svc *service.Service) string {
matchPort, err := strconv.ParseInt(svcPort, 10, 64)
if err != nil {
log.Errorf("Invalid value from template ('%s') can't parse as int64: %s", svcPort, err.Error())
return "-1"
}
for _, port := range svc.Ports {
if port.ServicePort == matchPort {
internalPort := strconv.FormatInt(port.Port, 10)
return internalPort
}
}
return "-1"
} | go | {
"resource": ""
} |
q11794 | findIpForService | train | func (h *HAproxy) findIpForService(svcPort string, svc *service.Service) string {
// We can turn off using IP addresses in the config, which is sometimes
// necessary (e.g. w/Docker for Mac).
if h.UseHostnames {
return svc.Hostname
}
matchPort, err := strconv.ParseInt(svcPort, 10, 64)
if err != nil {
log.Errorf("Invalid value from template ('%s') can't parse as int64: %s", svcPort, err.Error())
return "-1"
}
for _, port := range svc.Ports {
if port.ServicePort == matchPort {
return port.IP
}
}
// This defaults to the previous behavior of templating the hostname
// instead of the IP address. This relies on haproxy being able to
// resolve the hostname (which means non-FQDN hostnames are a hazard).
// Ideally this never happens for clusters that have IP addresses defined.
return svc.Hostname
} | go | {
"resource": ""
} |
q11795 | swallowSignals | train | func (h *HAproxy) swallowSignals() {
// from HAproxy which propagate.
sigChan := make(chan os.Signal, 10)
// Used to stop the goroutine
h.sigStopChan = make(chan struct{})
go func() {
for {
select {
case <-sigChan:
// swallow signal
case <-h.sigStopChan:
break
}
}
}()
signal.Notify(sigChan, syscall.SIGSTOP, syscall.SIGTSTP, syscall.SIGTTIN, syscall.SIGTTOU)
} | go | {
"resource": ""
} |
q11796 | ResetSignals | train | func (h *HAproxy) ResetSignals() {
h.sigLock.Lock()
signal.Reset(syscall.SIGSTOP, syscall.SIGTSTP, syscall.SIGTTIN, syscall.SIGTTOU)
select {
case h.sigStopChan <- struct{}{}: // nothing
default:
}
h.sigLock.Unlock()
} | go | {
"resource": ""
} |
q11797 | run | train | func (h *HAproxy) run(command string) error {
cmd := exec.Command("/bin/bash", "-c", command)
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
cmd.Stdout = stdout
cmd.Stderr = stderr
// The end effect of this signal handling requirement is that we can only run _one_
// command at a time. This is totally fine for HAproxy.
h.sigLock.Lock()
defer h.sigLock.Unlock()
if !h.signalsHandled {
log.Info("Setting up signal handlers")
h.swallowSignals()
h.signalsHandled = true
}
err := cmd.Run()
if err != nil {
err = fmt.Errorf("Error running '%s': %s\n%s\n%s", command, err, stdout, stderr)
}
return err
} | go | {
"resource": ""
} |
q11798 | WriteAndReload | train | func (h *HAproxy) WriteAndReload(state *catalog.ServicesState) error {
if h.ConfigFile == "" {
return fmt.Errorf("Trying to write HAproxy config, but no filename specified!")
}
outfile, err := os.Create(h.ConfigFile)
if err != nil {
return fmt.Errorf("Unable to write to %s! (%s)", h.ConfigFile, err.Error())
}
if err := h.WriteConfig(state, outfile); err != nil {
return err
}
if err = h.Verify(); err != nil {
return fmt.Errorf("Failed to verify HAproxy config! (%s)", err.Error())
}
return h.Reload()
} | go | {
"resource": ""
} |
q11799 | UpdateHandler | train | func UpdateHandler(response http.ResponseWriter, req *http.Request, rcvr *Receiver) {
defer req.Body.Close()
response.Header().Set("Content-Type", "application/json")
data, err := ioutil.ReadAll(req.Body)
if err != nil {
message, _ := json.Marshal(ApiErrors{[]string{err.Error()}})
response.WriteHeader(http.StatusInternalServerError)
response.Write(message)
return
}
var evt catalog.StateChangedEvent
err = json.Unmarshal(data, &evt)
if err != nil {
message, _ := json.Marshal(ApiErrors{[]string{err.Error()}})
response.WriteHeader(http.StatusInternalServerError)
response.Write(message)
return
}
rcvr.StateLock.Lock()
defer rcvr.StateLock.Unlock()
if rcvr.CurrentState == nil || rcvr.CurrentState.LastChanged.Before(evt.State.LastChanged) {
rcvr.CurrentState = &evt.State
rcvr.LastSvcChanged = &evt.ChangeEvent.Service
if ShouldNotify(evt.ChangeEvent.PreviousStatus, evt.ChangeEvent.Service.Status) {
if !rcvr.IsSubscribed(evt.ChangeEvent.Service.Name) {
return
}
if rcvr.OnUpdate == nil {
log.Errorf("No OnUpdate() callback registered!")
return
}
rcvr.EnqueueUpdate()
}
}
} | go | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.