_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q5000
|
updateAddresses
|
train
|
func (p *peerGroupChanges) updateAddresses() error {
var err error
if p.info.haSpace == "" {
err = p.updateAddressesFromInternal()
} else {
err = p.updateAddressesFromSpace()
}
return errors.Annotate(err, "updating member addresses")
}
|
go
|
{
"resource": ""
}
|
q5001
|
updateAddressesFromSpace
|
train
|
func (p *peerGroupChanges) updateAddressesFromSpace() error {
space := p.info.haSpace
var noAddresses []string
for _, id := range p.sortedMemberIds() {
m := p.info.machines[id]
addr, err := m.SelectMongoAddressFromSpace(p.info.mongoPort, space)
if err != nil {
if errors.IsNotFound(err) {
noAddresses = append(noAddresses, id)
msg := fmt.Sprintf("no addresses in configured juju-ha-space %q", space)
if err := m.stm.SetStatus(getStatusInfo(msg)); err != nil {
return errors.Trace(err)
}
continue
}
return errors.Trace(err)
}
if addr != p.desired.members[id].Address {
p.desired.members[id].Address = addr
p.desired.isChanged = true
}
}
if len(noAddresses) > 0 {
ids := strings.Join(noAddresses, ", ")
return fmt.Errorf("no usable Mongo addresses found in configured juju-ha-space %q for machines: %s", space, ids)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5002
|
sortedMemberIds
|
train
|
func (p *peerGroupChanges) sortedMemberIds() []string {
memberIds := make([]string, 0, len(p.desired.members))
for id := range p.desired.members {
memberIds = append(memberIds, id)
}
sortAsInts(memberIds)
return memberIds
}
|
go
|
{
"resource": ""
}
|
q5003
|
NewFileStorageReader
|
train
|
func NewFileStorageReader(path string) (reader storage.StorageReader, err error) {
var p string
if p, err = utils.NormalizePath(path); err != nil {
return nil, err
}
if p, err = filepath.Abs(p); err != nil {
return nil, err
}
fi, err := os.Stat(p)
if err != nil {
return nil, err
}
if !fi.Mode().IsDir() {
return nil, fmt.Errorf("specified source path is not a directory: %s", path)
}
return &fileStorageReader{p}, nil
}
|
go
|
{
"resource": ""
}
|
q5004
|
Get
|
train
|
func (f *fileStorageReader) Get(name string) (io.ReadCloser, error) {
if isInternalPath(name) {
return nil, &os.PathError{
Op: "Get",
Path: name,
Err: os.ErrNotExist,
}
}
filename := f.fullPath(name)
fi, err := os.Stat(filename)
if err != nil {
if os.IsNotExist(err) {
err = errors.NewNotFound(err, "")
}
return nil, err
} else if fi.IsDir() {
return nil, errors.NotFoundf("no such file with name %q", name)
}
file, err := os.Open(filename)
if err != nil {
return nil, err
}
return file, nil
}
|
go
|
{
"resource": ""
}
|
q5005
|
List
|
train
|
func (f *fileStorageReader) List(prefix string) ([]string, error) {
var names []string
if isInternalPath(prefix) {
return names, nil
}
prefix = filepath.Join(f.path, prefix)
dir := filepath.Dir(prefix)
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() && strings.HasPrefix(path, prefix) {
names = append(names, path[len(f.path)+1:])
}
return nil
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
sort.Strings(names)
return names, nil
}
|
go
|
{
"resource": ""
}
|
q5006
|
URL
|
train
|
func (f *fileStorageReader) URL(name string) (string, error) {
return utils.MakeFileURL(filepath.Join(f.path, name)), nil
}
|
go
|
{
"resource": ""
}
|
q5007
|
NewFacade
|
train
|
func NewFacade(
backend Backend,
pool Pool,
providers ProviderRegistry,
entityWatcher EntityWatcher,
statusSetter StatusSetter,
auth facade.Authorizer,
) (*Facade, error) {
if !auth.AuthController() {
return nil, common.ErrPerm
}
return &Facade{
backend: backend,
pool: pool,
providers: providers,
entityWatcher: entityWatcher,
statusSetter: statusSetter,
}, nil
}
|
go
|
{
"resource": ""
}
|
q5008
|
ModelTargetEnvironVersion
|
train
|
func (f *Facade) ModelTargetEnvironVersion(args params.Entities) (params.IntResults, error) {
result := params.IntResults{
Results: make([]params.IntResult, len(args.Entities)),
}
for i, arg := range args.Entities {
v, err := f.modelTargetEnvironVersion(arg)
if err != nil {
result.Results[i].Error = common.ServerError(err)
continue
}
result.Results[i].Result = v
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q5009
|
SetModelEnvironVersion
|
train
|
func (f *Facade) SetModelEnvironVersion(args params.SetModelEnvironVersions) (params.ErrorResults, error) {
result := params.ErrorResults{
Results: make([]params.ErrorResult, len(args.Models)),
}
for i, arg := range args.Models {
err := f.setModelEnvironVersion(arg)
if err != nil {
result.Results[i].Error = common.ServerError(err)
}
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q5010
|
SetModelStatus
|
train
|
func (f *Facade) SetModelStatus(args params.SetStatus) (params.ErrorResults, error) {
return f.statusSetter.SetStatus(args)
}
|
go
|
{
"resource": ""
}
|
q5011
|
Init
|
train
|
func (c *MetricsCommand) Init(args []string) error {
if !c.All && len(args) == 0 {
return errors.New("you need to specify at least one unit or application")
} else if c.All && len(args) > 0 {
return errors.New("cannot use --all with additional entities")
}
c.Tags = make([]string, len(args))
for i, arg := range args {
if names.IsValidUnit(arg) {
c.Tags[i] = names.NewUnitTag(arg).String()
} else if names.IsValidApplication(arg) {
c.Tags[i] = names.NewApplicationTag(arg).String()
} else {
return errors.Errorf("%q is not a valid unit or application", args[0])
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5012
|
Less
|
train
|
func (slice metricSlice) Less(i, j int) bool {
if slice[i].Metric == slice[j].Metric {
return renderLabels(slice[i].Labels) < renderLabels(slice[j].Labels)
}
return slice[i].Metric < slice[j].Metric
}
|
go
|
{
"resource": ""
}
|
q5013
|
Swap
|
train
|
func (slice metricSlice) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
|
go
|
{
"resource": ""
}
|
q5014
|
formatTabular
|
train
|
func formatTabular(writer io.Writer, value interface{}) error {
metrics, ok := value.([]metric)
if !ok {
return errors.Errorf("expected value of type %T, got %T", metrics, value)
}
table := uitable.New()
table.MaxColWidth = 50
table.Wrap = true
for _, col := range []int{1, 2, 3, 4} {
table.RightAlign(col)
}
table.AddRow("UNIT", "TIMESTAMP", "METRIC", "VALUE", "LABELS")
for _, m := range metrics {
table.AddRow(m.Unit, m.Timestamp.Format(time.RFC3339), m.Metric, m.Value, renderLabels(m.Labels))
}
_, err := fmt.Fprint(writer, table.String())
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q5015
|
Status
|
train
|
func (m *Machine) Status() (params.StatusResult, error) {
var results params.StatusResults
args := params.Entities{Entities: []params.Entity{
{Tag: m.tag.String()},
}}
err := m.facade.FacadeCall("Status", args, &results)
if err != nil {
return params.StatusResult{}, errors.Trace(err)
}
if len(results.Results) != 1 {
err := errors.Errorf("expected 1 result, got %d", len(results.Results))
return params.StatusResult{}, err
}
result := results.Results[0]
if result.Error != nil {
return params.StatusResult{}, result.Error
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q5016
|
IsManual
|
train
|
func (m *Machine) IsManual() (bool, error) {
var results params.BoolResults
args := params.Entities{Entities: []params.Entity{
{Tag: m.tag.String()},
}}
err := m.facade.FacadeCall("AreManuallyProvisioned", args, &results)
if err != nil {
return false, errors.Trace(err)
}
if len(results.Results) != 1 {
err := errors.Errorf("expected 1 result, got %d", len(results.Results))
return false, err
}
result := results.Results[0]
if result.Error != nil {
return false, result.Error
}
return result.Result, nil
}
|
go
|
{
"resource": ""
}
|
q5017
|
ProviderAddresses
|
train
|
func (m *Machine) ProviderAddresses() ([]network.Address, error) {
var results params.MachineAddressesResults
args := params.Entities{Entities: []params.Entity{
{Tag: m.tag.String()},
}}
err := m.facade.FacadeCall("ProviderAddresses", args, &results)
if err != nil {
return nil, errors.Trace(err)
}
if len(results.Results) != 1 {
err := errors.Errorf("expected 1 result, got %d", len(results.Results))
return nil, err
}
result := results.Results[0]
if result.Error != nil {
return nil, result.Error
}
return params.NetworkAddresses(result.Addresses...), nil
}
|
go
|
{
"resource": ""
}
|
q5018
|
SetProviderAddresses
|
train
|
func (m *Machine) SetProviderAddresses(addrs ...network.Address) error {
var result params.ErrorResults
args := params.SetMachinesAddresses{
MachineAddresses: []params.MachineAddresses{{
Tag: m.tag.String(),
Addresses: params.FromNetworkAddresses(addrs...),
}}}
err := m.facade.FacadeCall("SetProviderAddresses", args, &result)
if err != nil {
return err
}
return result.OneError()
}
|
go
|
{
"resource": ""
}
|
q5019
|
NewBakeryStorage
|
train
|
func (st *State) NewBakeryStorage() (bakerystorage.ExpirableStorage, error) {
return bakerystorage.New(bakerystorage.Config{
GetCollection: func() (mongo.Collection, func()) {
return st.db().GetCollection(bakeryStorageItemsC)
},
GetStorage: func(rootKeys *mgostorage.RootKeys, coll mongo.Collection, expireAfter time.Duration) bakery.Storage {
return rootKeys.NewStorage(coll.Writeable().Underlying(), mgostorage.Policy{
ExpiryDuration: expireAfter,
})
},
})
}
|
go
|
{
"resource": ""
}
|
q5020
|
NewStateCAASOperatorProvisionerAPI
|
train
|
func NewStateCAASOperatorProvisionerAPI(ctx facade.Context) (*API, error) {
authorizer := ctx.Auth()
resources := ctx.Resources()
broker, err := stateenvirons.GetNewCAASBrokerFunc(caas.New)(ctx.State())
if err != nil {
return nil, errors.Annotate(err, "getting caas client")
}
registry := stateenvirons.NewStorageProviderRegistry(broker)
pm := poolmanager.New(state.NewStateSettings(ctx.State()), registry)
return NewCAASOperatorProvisionerAPI(resources, authorizer, stateShim{ctx.State()}, pm, registry)
}
|
go
|
{
"resource": ""
}
|
q5021
|
NewCAASOperatorProvisionerAPI
|
train
|
func NewCAASOperatorProvisionerAPI(
resources facade.Resources,
authorizer facade.Authorizer,
st CAASOperatorProvisionerState,
storagePoolManager poolmanager.PoolManager,
registry storage.ProviderRegistry,
) (*API, error) {
if !authorizer.AuthController() {
return nil, common.ErrPerm
}
return &API{
PasswordChanger: common.NewPasswordChanger(st, common.AuthFuncForTagKind(names.ApplicationTagKind)),
LifeGetter: common.NewLifeGetter(st, common.AuthFuncForTagKind(names.ApplicationTagKind)),
APIAddresser: common.NewAPIAddresser(st, resources),
auth: authorizer,
resources: resources,
state: st,
storagePoolManager: storagePoolManager,
registry: registry,
}, nil
}
|
go
|
{
"resource": ""
}
|
q5022
|
CharmStorageParams
|
train
|
func CharmStorageParams(
controllerUUID string,
storageClassName string,
modelCfg *config.Config,
poolName string,
poolManager poolmanager.PoolManager,
registry storage.ProviderRegistry,
) (params.KubernetesFilesystemParams, error) {
// The defaults here are for operator storage.
// Workload storage will override these elsewhere.
var size uint64 = 1024
tags := tags.ResourceTags(
names.NewModelTag(modelCfg.UUID()),
names.NewControllerTag(controllerUUID),
modelCfg,
)
result := params.KubernetesFilesystemParams{
StorageName: "charm",
Size: size,
Provider: string(provider.K8s_ProviderType),
Tags: tags,
Attributes: make(map[string]interface{}),
}
// The storage key value from the model config might correspond
// to a storage pool, unless there's been a specific storage pool
// requested.
// First, blank out the fallback pool name used in previous
// versions of Juju.
if poolName == string(provider.K8s_ProviderType) {
poolName = ""
}
maybePoolName := poolName
if maybePoolName == "" {
maybePoolName = storageClassName
}
providerType, attrs, err := poolStorageProvider(poolManager, registry, maybePoolName)
if err != nil && (!errors.IsNotFound(err) || poolName != "") {
return params.KubernetesFilesystemParams{}, errors.Trace(err)
}
if err == nil {
result.Provider = string(providerType)
if len(attrs) > 0 {
result.Attributes = attrs
}
}
if _, ok := result.Attributes[provider.StorageClass]; !ok && result.Provider == string(provider.K8s_ProviderType) {
result.Attributes[provider.StorageClass] = storageClassName
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q5023
|
HandleCredentialError
|
train
|
func HandleCredentialError(err error, ctx context.ProviderCallContext) error {
MaybeInvalidateCredential(err, ctx)
return err
}
|
go
|
{
"resource": ""
}
|
q5024
|
generateOSElement
|
train
|
func generateOSElement(p domainParams) OS {
switch p.Arch() {
case arch.ARM64:
return OS{
Type: OSType{
Arch: "aarch64",
Machine: "virt",
Text: "hvm",
},
Loader: &NVRAMCode{
Text: p.Loader(),
ReadOnly: "yes",
Type: "pflash",
},
}
default:
return OS{Type: OSType{Text: "hvm"}}
}
}
|
go
|
{
"resource": ""
}
|
q5025
|
generateFeaturesElement
|
train
|
func generateFeaturesElement(p domainParams) *Features {
if p.Arch() == arch.ARM64 {
return &Features{GIC: &GIC{Version: "host"}}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5026
|
deviceID
|
train
|
func deviceID(i int) (string, error) {
if i < 0 || i > 25 {
return "", errors.Errorf("got %d but only support devices 0-25", i)
}
return fmt.Sprintf("vd%s", string('a'+i)), nil
}
|
go
|
{
"resource": ""
}
|
q5027
|
Close
|
train
|
func (s *migrationLoggingStrategy) Close() error {
err := errors.Annotate(
s.tracker.Close(),
"closing last-sent tracker",
)
s.releaser()
return err
}
|
go
|
{
"resource": ""
}
|
q5028
|
Validate
|
train
|
func (config Config) Validate() error {
if config.Logger == nil {
return errors.NotValidf("nil Logger")
}
if config.Facade == nil {
return errors.NotValidf("nil Facade")
}
if config.Broker == nil {
return errors.NotValidf("nil Broker")
}
if config.AgentConfig == nil {
return errors.NotValidf("nil AgentConfig")
}
if config.Tag == nil {
return errors.NotValidf("nil Tag")
}
if _, ok := config.Tag.(names.MachineTag); !ok {
return errors.NotValidf("Tag")
}
if config.GetMachineWatcher == nil {
return errors.NotValidf("nil GetMachineWatcher")
}
if config.GetRequiredLXDProfiles == nil {
return errors.NotValidf("nil GetRequiredLXDProfiles")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5029
|
NewEnvironWorker
|
train
|
func NewEnvironWorker(config Config) (worker.Worker, error) {
config.GetMachineWatcher = config.Facade.WatchMachines
config.GetRequiredLXDProfiles = func(modelName string) []string {
return []string{"default", "juju-" + modelName}
}
return newWorker(config)
}
|
go
|
{
"resource": ""
}
|
q5030
|
NewContainerWorker
|
train
|
func NewContainerWorker(config Config) (worker.Worker, error) {
m, err := config.Facade.Machine(config.Tag.(names.MachineTag))
if err != nil {
return nil, errors.Trace(err)
}
config.GetRequiredLXDProfiles = func(_ string) []string { return []string{"default"} }
config.GetMachineWatcher = m.WatchContainers
return newWorker(config)
}
|
go
|
{
"resource": ""
}
|
q5031
|
getMachine
|
train
|
func (w *mutaterWorker) getMachine(tag names.MachineTag) (instancemutater.MutaterMachine, error) {
m, err := w.facade.Machine(tag)
return m, err
}
|
go
|
{
"resource": ""
}
|
q5032
|
add
|
train
|
func (w *mutaterWorker) add(new worker.Worker) error {
return w.catacomb.Add(new)
}
|
go
|
{
"resource": ""
}
|
q5033
|
SetCanUninstall
|
train
|
func SetCanUninstall(a Agent) error {
tag := a.CurrentConfig().Tag()
if _, ok := tag.(names.MachineTag); !ok {
logger.Debugf("cannot uninstall non-machine agent %q", tag)
return nil
}
logger.Infof("marking agent ready for uninstall")
return ioutil.WriteFile(uninstallFile(a), nil, 0644)
}
|
go
|
{
"resource": ""
}
|
q5034
|
CanUninstall
|
train
|
func CanUninstall(a Agent) bool {
if _, err := os.Stat(uninstallFile(a)); err != nil {
logger.Debugf("agent not marked ready for uninstall")
return false
}
logger.Infof("agent already marked ready for uninstall")
return true
}
|
go
|
{
"resource": ""
}
|
q5035
|
newWaitWorker
|
train
|
func newWaitWorker(config Config, targetVersion int) (worker.Worker, error) {
watcher, err := config.Facade.WatchModelEnvironVersion(config.ModelTag)
if err != nil {
return nil, errors.Trace(err)
}
ww := waitWorker{
watcher: watcher,
facade: config.Facade,
modelTag: config.ModelTag,
gate: config.GateUnlocker,
targetVersion: targetVersion,
}
if err := catacomb.Invoke(catacomb.Plan{
Site: &ww.catacomb,
Init: []worker.Worker{watcher},
Work: ww.loop,
}); err != nil {
return nil, errors.Trace(err)
}
return &ww, nil
}
|
go
|
{
"resource": ""
}
|
q5036
|
newUpgradeWorker
|
train
|
func newUpgradeWorker(config Config, targetVersion int) (worker.Worker, error) {
currentVersion, err := config.Facade.ModelEnvironVersion(config.ModelTag)
if err != nil {
return nil, errors.Trace(err)
}
return jujuworker.NewSimpleWorker(func(<-chan struct{}) error {
// NOTE(axw) the abort channel is ignored, because upgrade
// steps are not interruptible. If we find they need to be
// interruptible, we should consider passing through a
// context.Context for cancellation, and cancelling it if
// the abort channel is signalled.
setVersion := func(v int) error {
return config.Facade.SetModelEnvironVersion(config.ModelTag, v)
}
setStatus := func(s status.Status, info string) error {
return config.Facade.SetModelStatus(config.ModelTag, s, info, nil)
}
if targetVersion > currentVersion {
if err := setStatus(status.Busy, fmt.Sprintf(
"upgrading environ from version %d to %d",
currentVersion, targetVersion,
)); err != nil {
return errors.Trace(err)
}
}
if err := runEnvironUpgradeSteps(
config.Environ,
config.ControllerTag,
config.ModelTag,
currentVersion,
targetVersion,
setVersion,
common.NewCloudCallContext(config.CredentialAPI, nil),
); err != nil {
info := fmt.Sprintf("failed to upgrade environ: %s", err)
if err := setStatus(status.Error, info); err != nil {
logger.Warningf("failed to update model status: %v", err)
}
return errors.Annotate(err, "upgrading environ")
}
if err := setStatus(status.Available, ""); err != nil {
return errors.Trace(err)
}
config.GateUnlocker.Unlock()
return nil
}), nil
}
|
go
|
{
"resource": ""
}
|
q5037
|
SetPasswords
|
train
|
func (c *Client) SetPasswords(appPasswords []ApplicationPassword) (params.ErrorResults, error) {
var result params.ErrorResults
args := params.EntityPasswords{Changes: make([]params.EntityPassword, len(appPasswords))}
for i, p := range appPasswords {
args.Changes[i] = params.EntityPassword{
Tag: names.NewApplicationTag(p.Name).String(), Password: p.Password,
}
}
err := c.facade.FacadeCall("SetPasswords", args, &result)
if err != nil {
return params.ErrorResults{}, err
}
if len(result.Results) != len(args.Changes) {
return params.ErrorResults{}, errors.Errorf("expected %d result(s), got %d", len(args.Changes), len(result.Results))
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q5038
|
NewCommandsResolver
|
train
|
func NewCommandsResolver(commands Commands, commandCompleted func(string)) resolver.Resolver {
return &commandsResolver{commands, commandCompleted}
}
|
go
|
{
"resource": ""
}
|
q5039
|
NextOp
|
train
|
func (s *commandsResolver) NextOp(
localState resolver.LocalState,
remoteState remotestate.Snapshot,
opFactory operation.Factory,
) (operation.Operation, error) {
if len(remoteState.Commands) == 0 {
return nil, resolver.ErrNoOperation
}
id := remoteState.Commands[0]
op, err := opFactory.NewCommands(s.commands.GetCommand(id))
if err != nil {
return nil, err
}
commandCompleted := func() {
s.commands.RemoveCommand(id)
s.commandCompleted(id)
}
return &commandCompleter{op, commandCompleted}, nil
}
|
go
|
{
"resource": ""
}
|
q5040
|
InstanceAvailabilityZoneNames
|
train
|
func (e *Environ) InstanceAvailabilityZoneNames(ctx envcontext.ProviderCallContext, ids []instance.Id) ([]string, error) {
instances, err := e.Instances(ctx, ids)
if err != nil && err != environs.ErrPartialInstances {
providerCommon.HandleCredentialError(err, ctx)
return nil, err
}
zones := []string{}
for _, inst := range instances {
oInst := inst.(*ociInstance)
zones = append(zones, oInst.availabilityZone())
}
if len(zones) < len(ids) {
return zones, environs.ErrPartialInstances
}
return zones, nil
}
|
go
|
{
"resource": ""
}
|
q5041
|
DeriveAvailabilityZones
|
train
|
func (e *Environ) DeriveAvailabilityZones(ctx envcontext.ProviderCallContext, args environs.StartInstanceParams) ([]string, error) {
return nil, nil
}
|
go
|
{
"resource": ""
}
|
q5042
|
Instances
|
train
|
func (e *Environ) Instances(ctx envcontext.ProviderCallContext, ids []instance.Id) ([]instances.Instance, error) {
if len(ids) == 0 {
return nil, nil
}
ociInstances, err := e.getOciInstances(ctx, ids...)
if err != nil && err != environs.ErrPartialInstances {
providerCommon.HandleCredentialError(err, ctx)
return nil, errors.Trace(err)
}
ret := []instances.Instance{}
for _, val := range ociInstances {
ret = append(ret, val)
}
if len(ret) < len(ids) {
return ret, environs.ErrPartialInstances
}
return ret, nil
}
|
go
|
{
"resource": ""
}
|
q5043
|
AdoptResources
|
train
|
func (e *Environ) AdoptResources(ctx envcontext.ProviderCallContext, controllerUUID string, fromVersion version.Number) error {
return errors.NotImplementedf("AdoptResources")
}
|
go
|
{
"resource": ""
}
|
q5044
|
ConstraintsValidator
|
train
|
func (e *Environ) ConstraintsValidator(ctx envcontext.ProviderCallContext) (constraints.Validator, error) {
// list of unsupported OCI provider constraints
unsupportedConstraints := []string{
constraints.Container,
constraints.VirtType,
constraints.Tags,
}
validator := constraints.NewValidator()
validator.RegisterUnsupported(unsupportedConstraints)
validator.RegisterVocabulary(constraints.Arch, []string{arch.AMD64})
logger.Infof("Returning constraints validator: %v", validator)
return validator, nil
}
|
go
|
{
"resource": ""
}
|
q5045
|
SetConfig
|
train
|
func (e *Environ) SetConfig(cfg *config.Config) error {
ecfg, err := e.p.newConfig(cfg)
if err != nil {
return err
}
e.ecfgMutex.Lock()
defer e.ecfgMutex.Unlock()
e.ecfgObj = ecfg
return nil
}
|
go
|
{
"resource": ""
}
|
q5046
|
ControllerInstances
|
train
|
func (e *Environ) ControllerInstances(ctx envcontext.ProviderCallContext, controllerUUID string) ([]instance.Id, error) {
tags := map[string]string{
tags.JujuController: controllerUUID,
tags.JujuIsController: "true",
}
instances, err := e.allInstances(ctx, tags)
if err != nil {
providerCommon.HandleCredentialError(err, ctx)
return nil, errors.Trace(err)
}
ids := []instance.Id{}
for _, val := range instances {
ids = append(ids, val.Id())
}
return ids, nil
}
|
go
|
{
"resource": ""
}
|
q5047
|
Destroy
|
train
|
func (e *Environ) Destroy(ctx envcontext.ProviderCallContext) error {
return common.Destroy(e, ctx)
}
|
go
|
{
"resource": ""
}
|
q5048
|
DestroyController
|
train
|
func (e *Environ) DestroyController(ctx envcontext.ProviderCallContext, controllerUUID string) error {
err := e.Destroy(ctx)
if err != nil {
providerCommon.HandleCredentialError(err, ctx)
logger.Errorf("Failed to destroy environment through controller: %s", errors.Trace(err))
}
instances, err := e.allControllerManagedInstances(ctx, controllerUUID)
if err != nil {
if err == environs.ErrNoInstances {
return nil
}
providerCommon.HandleCredentialError(err, ctx)
return errors.Trace(err)
}
ids := make([]instance.Id, len(instances))
for i, val := range instances {
ids[i] = val.Id()
}
err = e.StopInstances(ctx, ids...)
if err != nil {
providerCommon.HandleCredentialError(err, ctx)
return errors.Trace(err)
}
logger.Debugf("Cleaning up network resources")
err = e.cleanupNetworksAndSubnets(controllerUUID, "")
if err != nil {
providerCommon.HandleCredentialError(err, ctx)
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5049
|
getCloudInitConfig
|
train
|
func (e *Environ) getCloudInitConfig(series string, apiPort int) (cloudinit.CloudConfig, error) {
// TODO (gsamfira): remove this function when the above mention bug is fixed
cloudcfg, err := cloudinit.New(series)
if err != nil {
return nil, errors.Annotate(err, "cannot create cloudinit template")
}
if apiPort == 0 {
return cloudcfg, nil
}
operatingSystem, err := jujuseries.GetOSFromSeries(series)
if err != nil {
return nil, errors.Trace(err)
}
switch operatingSystem {
case os.Ubuntu:
fwCmd := fmt.Sprintf(
"/sbin/iptables -I INPUT -p tcp --dport %d -j ACCEPT", apiPort)
cloudcfg.AddRunCmd(fwCmd)
cloudcfg.AddScripts("/etc/init.d/netfilter-persistent save")
case os.CentOS:
fwCmd := fmt.Sprintf("firewall-cmd --zone=public --add-port=%d/tcp --permanent", apiPort)
cloudcfg.AddRunCmd(fwCmd)
cloudcfg.AddRunCmd("firewall-cmd --reload")
}
return cloudcfg, nil
}
|
go
|
{
"resource": ""
}
|
q5050
|
MaintainInstance
|
train
|
func (e *Environ) MaintainInstance(ctx envcontext.ProviderCallContext, args environs.StartInstanceParams) error {
return nil
}
|
go
|
{
"resource": ""
}
|
q5051
|
Config
|
train
|
func (e *Environ) Config() *config.Config {
e.ecfgMutex.Lock()
defer e.ecfgMutex.Unlock()
if e.ecfgObj == nil {
return nil
}
return e.ecfgObj.Config
}
|
go
|
{
"resource": ""
}
|
q5052
|
InstanceTypes
|
train
|
func (e *Environ) InstanceTypes(envcontext.ProviderCallContext, constraints.Value) (instances.InstanceTypesWithCostMetadata, error) {
return instances.InstanceTypesWithCostMetadata{}, errors.NotImplementedf("InstanceTypes")
}
|
go
|
{
"resource": ""
}
|
q5053
|
MustParseHardware
|
train
|
func MustParseHardware(args ...string) HardwareCharacteristics {
hc, err := ParseHardware(args...)
if err != nil {
panic(err)
}
return hc
}
|
go
|
{
"resource": ""
}
|
q5054
|
ParseHardware
|
train
|
func ParseHardware(args ...string) (HardwareCharacteristics, error) {
hc := HardwareCharacteristics{}
for _, arg := range args {
raws := strings.Split(strings.TrimSpace(arg), " ")
for _, raw := range raws {
if raw == "" {
continue
}
if err := hc.setRaw(raw); err != nil {
return HardwareCharacteristics{}, err
}
}
}
return hc, nil
}
|
go
|
{
"resource": ""
}
|
q5055
|
parseTags
|
train
|
func parseTags(s string) *[]string {
if s == "" {
return &[]string{}
}
tags := strings.Split(s, ",")
return &tags
}
|
go
|
{
"resource": ""
}
|
q5056
|
Write
|
train
|
func (w *LoggoWriter) Write(p []byte) (int, error) {
w.Logger.Logf(w.Level, "%s", p[:len(p)-1]) // omit trailing newline
return len(p), nil
}
|
go
|
{
"resource": ""
}
|
q5057
|
SampleCloudSpec
|
train
|
func SampleCloudSpec() environs.CloudSpec {
cred := cloud.NewCredential(cloud.UserPassAuthType, map[string]string{"username": "dummy", "password": "secret"})
return environs.CloudSpec{
Type: "dummy",
Name: "dummy",
Endpoint: "dummy-endpoint",
IdentityEndpoint: "dummy-identity-endpoint",
Region: "dummy-region",
StorageEndpoint: "dummy-storage-endpoint",
Credential: &cred,
}
}
|
go
|
{
"resource": ""
}
|
q5058
|
mongoInfo
|
train
|
func mongoInfo() mongo.MongoInfo {
if gitjujutesting.MgoServer.Addr() == "" {
panic("dummy environ state tests must be run with MgoTestPackage")
}
mongoPort := strconv.Itoa(gitjujutesting.MgoServer.Port())
addrs := []string{net.JoinHostPort("localhost", mongoPort)}
return mongo.MongoInfo{
Info: mongo.Info{
Addrs: addrs,
CACert: testing.CACert,
DisableTLS: !gitjujutesting.MgoServer.SSLEnabled(),
},
}
}
|
go
|
{
"resource": ""
}
|
q5059
|
Reset
|
train
|
func Reset(c *gc.C) {
logger.Infof("reset model")
dummy.mu.Lock()
dummy.ops = discardOperations
oldState := dummy.state
dummy.controllerState = nil
dummy.state = make(map[string]*environState)
dummy.newStatePolicy = stateenvirons.GetNewPolicyFunc()
dummy.supportsSpaces = true
dummy.supportsSpaceDiscovery = false
dummy.mu.Unlock()
// NOTE(axw) we must destroy the old states without holding
// the provider lock, or we risk deadlocking. Destroying
// state involves closing the embedded API server, which
// may require waiting on RPC calls that interact with the
// EnvironProvider (e.g. EnvironProvider.Open).
for _, s := range oldState {
if s.httpServer != nil {
logger.Debugf("closing httpServer")
s.httpServer.Close()
}
s.destroy()
}
if mongoAlive() {
err := retry.Call(retry.CallArgs{
Func: gitjujutesting.MgoServer.Reset,
// Only interested in retrying the intermittent
// 'unexpected message'.
IsFatalError: func(err error) bool {
return !strings.HasSuffix(err.Error(), "unexpected message")
},
Delay: time.Millisecond,
Clock: clock.WallClock,
Attempts: 5,
})
c.Assert(err, jc.ErrorIsNil)
}
}
|
go
|
{
"resource": ""
}
|
q5060
|
GetStateInAPIServer
|
train
|
func (e *environ) GetStateInAPIServer() *state.State {
st, err := e.state()
if err != nil {
panic(err)
}
return st.apiState
}
|
go
|
{
"resource": ""
}
|
q5061
|
GetStatePoolInAPIServer
|
train
|
func (e *environ) GetStatePoolInAPIServer() *state.StatePool {
st, err := e.state()
if err != nil {
panic(err)
}
return st.apiStatePool
}
|
go
|
{
"resource": ""
}
|
q5062
|
GetHubInAPIServer
|
train
|
func (e *environ) GetHubInAPIServer() *pubsub.StructuredHub {
st, err := e.state()
if err != nil {
panic(err)
}
return st.hub
}
|
go
|
{
"resource": ""
}
|
q5063
|
GetLeaseManagerInAPIServer
|
train
|
func (e *environ) GetLeaseManagerInAPIServer() corelease.Manager {
st, err := e.state()
if err != nil {
panic(err)
}
return st.leaseManager
}
|
go
|
{
"resource": ""
}
|
q5064
|
GetController
|
train
|
func (e *environ) GetController() *cache.Controller {
st, err := e.state()
if err != nil {
panic(err)
}
return st.controller
}
|
go
|
{
"resource": ""
}
|
q5065
|
newState
|
train
|
func newState(name string, ops chan<- Operation, newStatePolicy state.NewPolicyFunc) *environState {
buf := make([]byte, 8192)
buf = buf[:runtime.Stack(buf, false)]
s := &environState{
name: name,
ops: ops,
newStatePolicy: newStatePolicy,
insts: make(map[instance.Id]*dummyInstance),
creator: string(buf),
}
return s
}
|
go
|
{
"resource": ""
}
|
q5066
|
listenAPI
|
train
|
func (s *environState) listenAPI() int {
certPool, err := api.CreateCertPool(testing.CACert)
if err != nil {
panic(err)
}
tlsConfig := api.NewTLSConfig(certPool)
tlsConfig.ServerName = "juju-apiserver"
tlsConfig.Certificates = []tls.Certificate{*testing.ServerTLSCert}
s.mux = apiserverhttp.NewMux()
s.httpServer = httptest.NewUnstartedServer(s.mux)
s.httpServer.TLS = tlsConfig
return s.httpServer.Listener.Addr().(*net.TCPAddr).Port
}
|
go
|
{
"resource": ""
}
|
q5067
|
SetSupportsSpaces
|
train
|
func SetSupportsSpaces(supports bool) bool {
dummy.mu.Lock()
defer dummy.mu.Unlock()
current := dummy.supportsSpaces
dummy.supportsSpaces = supports
return current
}
|
go
|
{
"resource": ""
}
|
q5068
|
SetSupportsSpaceDiscovery
|
train
|
func SetSupportsSpaceDiscovery(supports bool) bool {
dummy.mu.Lock()
defer dummy.mu.Unlock()
current := dummy.supportsSpaceDiscovery
dummy.supportsSpaceDiscovery = supports
return current
}
|
go
|
{
"resource": ""
}
|
q5069
|
PrecheckInstance
|
train
|
func (*environ) PrecheckInstance(ctx context.ProviderCallContext, args environs.PrecheckInstanceParams) error {
if args.Placement != "" && args.Placement != "valid" {
return fmt.Errorf("%s placement is invalid", args.Placement)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5070
|
SupportsSpaceDiscovery
|
train
|
func (env *environ) SupportsSpaceDiscovery(ctx context.ProviderCallContext) (bool, error) {
if err := env.checkBroken("SupportsSpaceDiscovery"); err != nil {
return false, err
}
dummy.mu.Lock()
defer dummy.mu.Unlock()
if !dummy.supportsSpaceDiscovery {
return false, nil
}
return true, nil
}
|
go
|
{
"resource": ""
}
|
q5071
|
Spaces
|
train
|
func (env *environ) Spaces(ctx context.ProviderCallContext) ([]network.SpaceInfo, error) {
if err := env.checkBroken("Spaces"); err != nil {
return []network.SpaceInfo{}, err
}
return []network.SpaceInfo{{
Name: "foo",
ProviderId: network.Id("0"),
Subnets: []network.SubnetInfo{{
ProviderId: network.Id("1"),
AvailabilityZones: []string{"zone1"},
}, {
ProviderId: network.Id("2"),
AvailabilityZones: []string{"zone1"},
}}}, {
Name: "Another Foo 99!",
ProviderId: "1",
Subnets: []network.SubnetInfo{{
ProviderId: network.Id("3"),
AvailabilityZones: []string{"zone1"},
}}}, {
Name: "foo-",
ProviderId: "2",
Subnets: []network.SubnetInfo{{
ProviderId: network.Id("4"),
AvailabilityZones: []string{"zone1"},
}}}, {
Name: "---",
ProviderId: "3",
Subnets: []network.SubnetInfo{{
ProviderId: network.Id("5"),
AvailabilityZones: []string{"zone1"},
}}}}, nil
}
|
go
|
{
"resource": ""
}
|
q5072
|
AvailabilityZones
|
train
|
func (env *environ) AvailabilityZones(ctx context.ProviderCallContext) ([]common.AvailabilityZone, error) {
// TODO(dimitern): Fix this properly.
return []common.AvailabilityZone{
azShim{"zone1", true},
azShim{"zone2", false},
azShim{"zone3", true},
azShim{"zone4", true},
}, nil
}
|
go
|
{
"resource": ""
}
|
q5073
|
InstanceAvailabilityZoneNames
|
train
|
func (env *environ) InstanceAvailabilityZoneNames(ctx context.ProviderCallContext, ids []instance.Id) ([]string, error) {
if err := env.checkBroken("InstanceAvailabilityZoneNames"); err != nil {
return nil, errors.NotSupportedf("instance availability zones")
}
availabilityZones, err := env.AvailabilityZones(ctx)
if err != nil {
return nil, err
}
azMaxIndex := len(availabilityZones) - 1
azIndex := 0
returnValue := make([]string, len(ids))
for i := range ids {
if availabilityZones[azIndex].Available() {
returnValue[i] = availabilityZones[azIndex].Name()
} else {
// Based on knowledge of how the AZs are setup above
// in AvailabilityZones()
azIndex += 1
returnValue[i] = availabilityZones[azIndex].Name()
}
azIndex += 1
if azIndex == azMaxIndex {
azIndex = 0
}
}
return returnValue, nil
}
|
go
|
{
"resource": ""
}
|
q5074
|
Subnets
|
train
|
func (env *environ) Subnets(ctx context.ProviderCallContext, instId instance.Id, subnetIds []network.Id) ([]network.SubnetInfo, error) {
if err := env.checkBroken("Subnets"); err != nil {
return nil, err
}
estate, err := env.state()
if err != nil {
return nil, err
}
estate.mu.Lock()
defer estate.mu.Unlock()
if ok, _ := env.SupportsSpaceDiscovery(ctx); ok {
// Space discovery needs more subnets to work with.
return env.subnetsForSpaceDiscovery(estate)
}
allSubnets := []network.SubnetInfo{{
CIDR: "0.10.0.0/24",
ProviderId: "dummy-private",
AvailabilityZones: []string{"zone1", "zone2"},
}, {
CIDR: "0.20.0.0/24",
ProviderId: "dummy-public",
}}
// Filter result by ids, if given.
var result []network.SubnetInfo
for _, subId := range subnetIds {
switch subId {
case "dummy-private":
result = append(result, allSubnets[0])
case "dummy-public":
result = append(result, allSubnets[1])
}
}
if len(subnetIds) == 0 {
result = append([]network.SubnetInfo{}, allSubnets...)
}
if len(result) == 0 {
// No results, so just return them now.
estate.ops <- OpSubnets{
Env: env.name,
InstanceId: instId,
SubnetIds: subnetIds,
Info: result,
}
return result, nil
}
estate.ops <- OpSubnets{
Env: env.name,
InstanceId: instId,
SubnetIds: subnetIds,
Info: result,
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q5075
|
SetInstanceAddresses
|
train
|
func SetInstanceAddresses(inst instances.Instance, addrs []network.Address) {
inst0 := inst.(*dummyInstance)
inst0.mu.Lock()
inst0.addresses = append(inst0.addresses[:0], addrs...)
logger.Debugf("setting instance %q addresses to %v", inst0.Id(), addrs)
inst0.mu.Unlock()
}
|
go
|
{
"resource": ""
}
|
q5076
|
SetInstanceStatus
|
train
|
func SetInstanceStatus(inst instances.Instance, status string) {
inst0 := inst.(*dummyInstance)
inst0.mu.Lock()
inst0.status = status
inst0.mu.Unlock()
}
|
go
|
{
"resource": ""
}
|
q5077
|
SetInstanceBroken
|
train
|
func SetInstanceBroken(inst instances.Instance, methods ...string) {
inst0 := inst.(*dummyInstance)
inst0.mu.Lock()
inst0.broken = methods
inst0.mu.Unlock()
}
|
go
|
{
"resource": ""
}
|
q5078
|
SSHAddresses
|
train
|
func (*environ) SSHAddresses(ctx context.ProviderCallContext, addresses []network.Address) ([]network.Address, error) {
var rv []network.Address
for _, addr := range addresses {
if addr.Value != "100.100.100.100" {
rv = append(rv, addr)
}
}
return rv, nil
}
|
go
|
{
"resource": ""
}
|
q5079
|
SetAgentStatus
|
train
|
func (e *environ) SetAgentStatus(agent string, status presence.Status) {
estate, err := e.state()
if err != nil {
panic(err)
}
estate.presence.agent[agent] = status
}
|
go
|
{
"resource": ""
}
|
q5080
|
SeriesImage
|
train
|
func SeriesImage(
ctx context.ProviderCallContext,
series, stream, location string,
client compute.VirtualMachineImagesClient,
) (*instances.Image, error) {
seriesOS, err := jujuseries.GetOSFromSeries(series)
if err != nil {
return nil, errors.Trace(err)
}
var publisher, offering, sku string
switch seriesOS {
case os.Ubuntu:
publisher = ubuntuPublisher
offering = ubuntuOffering
sku, err = ubuntuSKU(ctx, series, stream, location, client)
if err != nil {
return nil, errors.Annotatef(err, "selecting SKU for %s", series)
}
case os.Windows:
switch series {
case "win81":
publisher = windowsPublisher
offering = windowsOffering
sku = "8.1-Enterprise-N"
case "win10":
publisher = windowsPublisher
offering = windowsOffering
sku = "10-Enterprise"
case "win2012":
publisher = windowsServerPublisher
offering = windowsServerOffering
sku = "2012-Datacenter"
case "win2012r2":
publisher = windowsServerPublisher
offering = windowsServerOffering
sku = "2012-R2-Datacenter"
default:
return nil, errors.NotSupportedf("deploying %s", series)
}
case os.CentOS:
publisher = centOSPublisher
offering = centOSOffering
switch series {
case "centos7":
sku = "7.3"
default:
return nil, errors.NotSupportedf("deploying %s", series)
}
default:
// TODO(axw) CentOS
return nil, errors.NotSupportedf("deploying %s", seriesOS)
}
return &instances.Image{
Id: fmt.Sprintf("%s:%s:%s:latest", publisher, offering, sku),
Arch: arch.AMD64,
VirtType: "Hyper-V",
}, nil
}
|
go
|
{
"resource": ""
}
|
q5081
|
controllerAddresses
|
train
|
func (st *State) controllerAddresses() ([]string, error) {
cinfo, err := st.ControllerInfo()
if err != nil {
return nil, errors.Trace(err)
}
var machines mongo.Collection
var closer SessionCloser
model, err := st.Model()
if err != nil {
return nil, errors.Trace(err)
}
if model.ModelTag() == cinfo.ModelTag {
machines, closer = st.db().GetCollection(machinesC)
} else {
machines, closer = st.db().GetCollectionFor(cinfo.ModelTag.Id(), machinesC)
}
defer closer()
type addressMachine struct {
Addresses []address
}
var allAddresses []addressMachine
// TODO(rog) 2013/10/14 index machines on jobs.
err = machines.Find(bson.D{{"jobs", JobManageModel}}).All(&allAddresses)
if err != nil {
return nil, err
}
if len(allAddresses) == 0 {
return nil, errors.New("no controller machines found")
}
apiAddrs := make([]string, 0, len(allAddresses))
for _, addrs := range allAddresses {
naddrs := networkAddresses(addrs.Addresses)
addr, ok := network.SelectControllerAddress(naddrs, false)
if ok {
apiAddrs = append(apiAddrs, addr.Value)
}
}
if len(apiAddrs) == 0 {
return nil, errors.New("no controller machines with addresses found")
}
return apiAddrs, nil
}
|
go
|
{
"resource": ""
}
|
q5082
|
Addresses
|
train
|
func (st *State) Addresses() ([]string, error) {
addrs, err := st.controllerAddresses()
if err != nil {
return nil, errors.Trace(err)
}
config, err := st.ControllerConfig()
if err != nil {
return nil, errors.Trace(err)
}
return appendPort(addrs, config.StatePort()), nil
}
|
go
|
{
"resource": ""
}
|
q5083
|
filterHostPortsForManagementSpace
|
train
|
func (st *State) filterHostPortsForManagementSpace(apiHostPorts [][]network.HostPort) ([][]network.HostPort, error) {
config, err := st.ControllerConfig()
if err != nil {
return nil, err
}
var hostPortsForAgents [][]network.HostPort
if mgmtSpace := config.JujuManagementSpace(); mgmtSpace != "" {
hostPortsForAgents = make([][]network.HostPort, len(apiHostPorts))
sp := network.SpaceName(mgmtSpace)
for i := range apiHostPorts {
if filtered, ok := network.SelectHostPortsBySpaceNames(apiHostPorts[i], sp); ok {
hostPortsForAgents[i] = filtered
} else {
hostPortsForAgents[i] = apiHostPorts[i]
}
}
} else {
hostPortsForAgents = apiHostPorts
}
return hostPortsForAgents, nil
}
|
go
|
{
"resource": ""
}
|
q5084
|
APIHostPortsForAgents
|
train
|
func (st *State) APIHostPortsForAgents() ([][]network.HostPort, error) {
isCAASCtrl, err := st.isCAASController()
if err != nil {
return nil, errors.Trace(err)
}
if isCAASCtrl {
// TODO(caas): add test for this once we have the replacement for Jujuconnsuite.
return st.apiHostPortsForCAAS(false)
}
hps, err := st.apiHostPortsForKey(apiHostPortsForAgentsKey)
if err != nil {
if err == mgo.ErrNotFound {
logger.Debugf("No document for %s; using %s", apiHostPortsForAgentsKey, apiHostPortsKey)
return st.APIHostPortsForClients()
}
return nil, errors.Trace(err)
}
return hps, nil
}
|
go
|
{
"resource": ""
}
|
q5085
|
apiHostPortsForKey
|
train
|
func (st *State) apiHostPortsForKey(key string) ([][]network.HostPort, error) {
var doc apiHostPortsDoc
controllers, closer := st.db().GetCollection(controllersC)
defer closer()
err := controllers.Find(bson.D{{"_id", key}}).One(&doc)
if err != nil {
return nil, err
}
return networkHostsPorts(doc.APIHostPorts), nil
}
|
go
|
{
"resource": ""
}
|
q5086
|
fromNetworkAddress
|
train
|
func fromNetworkAddress(netAddr network.Address, origin Origin) address {
return address{
Value: netAddr.Value,
AddressType: string(netAddr.Type),
Scope: string(netAddr.Scope),
Origin: string(origin),
SpaceName: string(netAddr.SpaceName),
}
}
|
go
|
{
"resource": ""
}
|
q5087
|
networkAddress
|
train
|
func (addr *address) networkAddress() network.Address {
return network.Address{
Value: addr.Value,
Type: network.AddressType(addr.AddressType),
Scope: network.Scope(addr.Scope),
SpaceName: network.SpaceName(addr.SpaceName),
}
}
|
go
|
{
"resource": ""
}
|
q5088
|
fromNetworkAddresses
|
train
|
func fromNetworkAddresses(netAddrs []network.Address, origin Origin) []address {
addrs := make([]address, len(netAddrs))
for i, netAddr := range netAddrs {
addrs[i] = fromNetworkAddress(netAddr, origin)
}
return addrs
}
|
go
|
{
"resource": ""
}
|
q5089
|
networkAddresses
|
train
|
func networkAddresses(addrs []address) []network.Address {
netAddrs := make([]network.Address, len(addrs))
for i, addr := range addrs {
netAddrs[i] = addr.networkAddress()
}
return netAddrs
}
|
go
|
{
"resource": ""
}
|
q5090
|
fromNetworkHostPort
|
train
|
func fromNetworkHostPort(netHostPort network.HostPort) hostPort {
return hostPort{
Value: netHostPort.Value,
AddressType: string(netHostPort.Type),
Scope: string(netHostPort.Scope),
Port: netHostPort.Port,
SpaceName: string(netHostPort.SpaceName),
}
}
|
go
|
{
"resource": ""
}
|
q5091
|
networkHostPort
|
train
|
func (hp *hostPort) networkHostPort() network.HostPort {
return network.HostPort{
Address: network.Address{
Value: hp.Value,
Type: network.AddressType(hp.AddressType),
Scope: network.Scope(hp.Scope),
SpaceName: network.SpaceName(hp.SpaceName),
},
Port: hp.Port,
}
}
|
go
|
{
"resource": ""
}
|
q5092
|
fromNetworkHostsPorts
|
train
|
func fromNetworkHostsPorts(netHostsPorts [][]network.HostPort) [][]hostPort {
hsps := make([][]hostPort, len(netHostsPorts))
for i, netHostPorts := range netHostsPorts {
hsps[i] = make([]hostPort, len(netHostPorts))
for j, netHostPort := range netHostPorts {
hsps[i][j] = fromNetworkHostPort(netHostPort)
}
}
return hsps
}
|
go
|
{
"resource": ""
}
|
q5093
|
networkHostsPorts
|
train
|
func networkHostsPorts(hsps [][]hostPort) [][]network.HostPort {
netHostsPorts := make([][]network.HostPort, len(hsps))
for i, hps := range hsps {
netHostsPorts[i] = make([]network.HostPort, len(hps))
for j, hp := range hps {
netHostsPorts[i][j] = hp.networkHostPort()
}
}
return netHostsPorts
}
|
go
|
{
"resource": ""
}
|
q5094
|
addressesEqual
|
train
|
func addressesEqual(a, b []network.Address) bool {
return reflect.DeepEqual(a, b)
}
|
go
|
{
"resource": ""
}
|
q5095
|
hostsPortsEqual
|
train
|
func hostsPortsEqual(a, b [][]network.HostPort) bool {
// Make a copy of all the values so we don't mutate the args in order
// to determine if they are the same while we mutate the slice to order them.
aPrime := dupeAndSort(a)
bPrime := dupeAndSort(b)
return reflect.DeepEqual(aPrime, bPrime)
}
|
go
|
{
"resource": ""
}
|
q5096
|
connRateMetric
|
train
|
func (l *throttlingListener) connRateMetric() int {
l.Lock()
defer l.Unlock()
var (
earliestConnTime *time.Time
connCount float64
)
// Figure out the most recent connection timestamp.
startIndex := l.nextSlot - 1
if startIndex < 0 {
startIndex = len(l.connAcceptTimes) - 1
}
latestConnTime := l.connAcceptTimes[startIndex]
if latestConnTime == nil {
return 0
}
// Loop backwards to get the earlier known connection timestamp.
for index := startIndex; index != l.nextSlot; {
if connTime := l.connAcceptTimes[index]; connTime == nil {
break
} else {
earliestConnTime = connTime
}
connCount++
// Stop if we have reached the maximum window in terms how long
// ago the earliest connection was, to avoid stale data skewing results.
if latestConnTime.Sub(*earliestConnTime) > l.lookbackWindow {
break
}
index--
if index < 0 {
index = len(l.connAcceptTimes) - 1
}
}
if connCount < 2 {
return 0
}
// We use as a metric how many connections per 10ms
connRate := connCount * float64(time.Second) / (1.0 + float64(latestConnTime.Sub(*earliestConnTime)))
logger.Tracef("server listener has received %d connections per second", int(connRate))
return int(connRate)
}
|
go
|
{
"resource": ""
}
|
q5097
|
pauseTime
|
train
|
func (l *throttlingListener) pauseTime() time.Duration {
rate := l.connRateMetric()
if rate <= l.lowerThreshold {
return l.minPause
}
if rate >= l.upperThreshold {
return l.maxPause
}
// rate is between min and max so interpolate.
pauseFactor := float64(rate-l.lowerThreshold) / float64(l.upperThreshold-l.lowerThreshold)
return l.minPause + time.Duration(float64(l.maxPause-l.minPause)*pauseFactor)
}
|
go
|
{
"resource": ""
}
|
q5098
|
Run
|
train
|
func (c *unexposeCommand) Run(_ *cmd.Context) error {
client, err := c.getAPI()
if err != nil {
return err
}
defer client.Close()
return block.ProcessBlockedError(client.Unexpose(c.ApplicationName), block.BlockChange)
}
|
go
|
{
"resource": ""
}
|
q5099
|
Set
|
train
|
func (m stringMap) Set(s string) error {
if *m.mapping == nil {
*m.mapping = map[string]string{}
}
// make a copy so the following code is less ugly with dereferencing.
mapping := *m.mapping
vals := strings.SplitN(s, "=", 2)
if len(vals) != 2 {
return errors.NewNotValid(nil, "badly formatted name value pair: "+s)
}
name, value := vals[0], vals[1]
if _, ok := mapping[name]; ok {
return errors.Errorf("duplicate name specified: %q", name)
}
mapping[name] = value
return nil
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.