_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q4900
NewSocketListener
train
func NewSocketListener(socketPath string, handler ConnectionHandler) (*socketListener, error) { listener, err := sockets.Listen(socketPath) if err != nil { return nil, errors.Trace(err) } sListener := &socketListener{listener: listener, handler: handler} sListener.t.Go(sListener.loop) return sListener, nil }
go
{ "resource": "" }
q4901
Stop
train
func (l *socketListener) Stop() error { l.t.Kill(nil) err := l.listener.Close() if err != nil { logger.Errorf("failed to close the collect-metrics listener: %v", err) } return l.t.Wait() }
go
{ "resource": "" }
q4902
NewPeriodicWorker
train
func NewPeriodicWorker(do jworker.PeriodicWorkerCall, period time.Duration, newTimer func(time.Duration) jworker.PeriodicTimer, stop func()) worker.Worker { return &periodicWorker{ Worker: jworker.NewPeriodicWorker(do, period, newTimer, jworker.Jitter(0.2)), stop: stop, } }
go
{ "resource": "" }
q4903
NewWorker
train
func NewWorker(config Config) (worker.Worker, error) { if err := config.Validate(); err != nil { return nil, errors.Trace(err) } mc, err := modelCredential(config.Facade) if err != nil { return nil, errors.Trace(err) } // This worker needs to monitor both the changes to the credential content that // this model uses as well as what credential the model uses. // It needs to be restarted if there is a change in either. mcw, err := config.Facade.WatchModelCredential() if err != nil { return nil, errors.Trace(err) } v := &validator{ validatorFacade: config.Facade, credential: mc, modelCredentialWatcher: mcw, } // The watcher needs to be added to the worker's catacomb plan // here in order to be controlled by this worker's lifecycle events: // for example, to be destroyed when this worker is destroyed, etc. // We also add the watcher to the Plan.Init collection to ensure that // the worker's Plan.Work method is executed after the watcher // is initialised and watcher's changes collection obtains the changes. // Watchers that are added using catacomb.Add method // miss out on a first call of Worker's Plan.Work method and can, thus, // be missing out on an initial change. plan := catacomb.Plan{ Site: &v.catacomb, Work: v.loop, Init: []worker.Worker{v.modelCredentialWatcher}, } if mc.CloudCredential != "" { var err error v.credentialWatcher, err = config.Facade.WatchCredential(mc.CloudCredential) if err != nil { return nil, errors.Trace(err) } plan.Init = append(plan.Init, v.credentialWatcher) } if err := catacomb.Invoke(plan); err != nil { return nil, errors.Trace(err) } return v, nil }
go
{ "resource": "" }
q4904
FilesystemParams
train
func FilesystemParams( f state.Filesystem, storageInstance state.StorageInstance, modelUUID, controllerUUID string, environConfig *config.Config, poolManager poolmanager.PoolManager, registry storage.ProviderRegistry, ) (params.FilesystemParams, error) { var pool string var size uint64 if stateFilesystemParams, ok := f.Params(); ok { pool = stateFilesystemParams.Pool size = stateFilesystemParams.Size } else { filesystemInfo, err := f.Info() if err != nil { return params.FilesystemParams{}, errors.Trace(err) } pool = filesystemInfo.Pool size = filesystemInfo.Size } filesystemTags, err := StorageTags(storageInstance, modelUUID, controllerUUID, environConfig) if err != nil { return params.FilesystemParams{}, errors.Annotate(err, "computing storage tags") } providerType, cfg, err := StoragePoolConfig(pool, poolManager, registry) if err != nil { return params.FilesystemParams{}, errors.Trace(err) } result := params.FilesystemParams{ f.Tag().String(), "", // volume tag size, string(providerType), cfg.Attrs(), filesystemTags, nil, // attachment params set by the caller } volumeTag, err := f.Volume() if err == nil { result.VolumeTag = volumeTag.String() } else if err != state.ErrNoBackingVolume { return params.FilesystemParams{}, errors.Trace(err) } return result, nil }
go
{ "resource": "" }
q4905
FilesystemToState
train
func FilesystemToState(v params.Filesystem) (names.FilesystemTag, state.FilesystemInfo, error) { filesystemTag, err := names.ParseFilesystemTag(v.FilesystemTag) if err != nil { return names.FilesystemTag{}, state.FilesystemInfo{}, errors.Trace(err) } return filesystemTag, state.FilesystemInfo{ v.Info.Size, "", // pool is set by state v.Info.FilesystemId, }, nil }
go
{ "resource": "" }
q4906
FilesystemFromState
train
func FilesystemFromState(f state.Filesystem) (params.Filesystem, error) { info, err := f.Info() if err != nil { return params.Filesystem{}, errors.Trace(err) } result := params.Filesystem{ f.FilesystemTag().String(), "", FilesystemInfoFromState(info), } volumeTag, err := f.Volume() if err == nil { result.VolumeTag = volumeTag.String() } else if err != state.ErrNoBackingVolume { return params.Filesystem{}, errors.Trace(err) } return result, nil }
go
{ "resource": "" }
q4907
FilesystemInfoFromState
train
func FilesystemInfoFromState(info state.FilesystemInfo) params.FilesystemInfo { return params.FilesystemInfo{ info.FilesystemId, info.Pool, info.Size, } }
go
{ "resource": "" }
q4908
FilesystemAttachmentToState
train
func FilesystemAttachmentToState(in params.FilesystemAttachment) (names.MachineTag, names.FilesystemTag, state.FilesystemAttachmentInfo, error) { machineTag, err := names.ParseMachineTag(in.MachineTag) if err != nil { return names.MachineTag{}, names.FilesystemTag{}, state.FilesystemAttachmentInfo{}, err } filesystemTag, err := names.ParseFilesystemTag(in.FilesystemTag) if err != nil { return names.MachineTag{}, names.FilesystemTag{}, state.FilesystemAttachmentInfo{}, err } info := state.FilesystemAttachmentInfo{ in.Info.MountPoint, in.Info.ReadOnly, } return machineTag, filesystemTag, info, nil }
go
{ "resource": "" }
q4909
FilesystemAttachmentFromState
train
func FilesystemAttachmentFromState(v state.FilesystemAttachment) (params.FilesystemAttachment, error) { info, err := v.Info() if err != nil { return params.FilesystemAttachment{}, errors.Trace(err) } return params.FilesystemAttachment{ v.Filesystem().String(), v.Host().String(), FilesystemAttachmentInfoFromState(info), }, nil }
go
{ "resource": "" }
q4910
FilesystemAttachmentInfoFromState
train
func FilesystemAttachmentInfoFromState(info state.FilesystemAttachmentInfo) params.FilesystemAttachmentInfo { return params.FilesystemAttachmentInfo{ info.MountPoint, info.ReadOnly, } }
go
{ "resource": "" }
q4911
ParseFilesystemAttachmentIds
train
func ParseFilesystemAttachmentIds(stringIds []string) ([]params.MachineStorageId, error) { ids := make([]params.MachineStorageId, len(stringIds)) for i, s := range stringIds { m, f, err := state.ParseFilesystemAttachmentId(s) if err != nil { return nil, err } ids[i] = params.MachineStorageId{ MachineTag: m.String(), AttachmentTag: f.String(), } } return ids, nil }
go
{ "resource": "" }
q4912
API2Result
train
func API2Result(r params.PayloadResult) (payload.Result, error) { result := payload.Result{ NotFound: r.NotFound, } id, err := api.API2ID(r.Tag) if err != nil { return result, errors.Trace(err) } result.ID = id if r.Payload != nil { pl, err := api.API2Payload(*r.Payload) if err != nil { return result, errors.Trace(err) } result.Payload = &pl } if r.Error != nil { result.Error = common.RestoreError(r.Error) } return result, nil }
go
{ "resource": "" }
q4913
Payloads2TrackArgs
train
func Payloads2TrackArgs(payloads []payload.Payload) params.TrackPayloadArgs { var args params.TrackPayloadArgs for _, pl := range payloads { fullPayload := payload.FullPayloadInfo{Payload: pl} arg := api.Payload2api(fullPayload) args.Payloads = append(args.Payloads, arg) } return args }
go
{ "resource": "" }
q4914
FullIDs2LookUpArgs
train
func FullIDs2LookUpArgs(fullIDs []string) params.LookUpPayloadArgs { var args params.LookUpPayloadArgs for _, fullID := range fullIDs { name, rawID := payload.ParseID(fullID) args.Args = append(args.Args, params.LookUpPayloadArg{ Name: name, ID: rawID, }) } return args }
go
{ "resource": "" }
q4915
IDs2SetStatusArgs
train
func IDs2SetStatusArgs(ids []string, status string) params.SetPayloadStatusArgs { var args params.SetPayloadStatusArgs for _, id := range ids { arg := params.SetPayloadStatusArg{ Status: status, } arg.Tag = names.NewPayloadTag(id).String() args.Args = append(args.Args, arg) } return args }
go
{ "resource": "" }
q4916
stateStepsFor221
train
func stateStepsFor221() []Step { return []Step{ &upgradeStep{ description: "add update-status hook config settings", targets: []Target{DatabaseMaster}, run: func(context Context) error { return context.State().AddUpdateStatusHookSettings() }, }, &upgradeStep{ description: "correct relation unit counts for subordinates", targets: []Target{DatabaseMaster}, run: func(context Context) error { return context.State().CorrectRelationUnitCounts() }, }, } }
go
{ "resource": "" }
q4917
Subnets
train
func (e *environ) Subnets(ctx context.ProviderCallContext, inst instance.Id, subnetIds []network.Id) ([]network.SubnetInfo, error) { // In GCE all the subnets are in all AZs. zones, err := e.zoneNames(ctx) if err != nil { return nil, errors.Trace(err) } ids := makeIncludeSet(subnetIds) var results []network.SubnetInfo if inst == instance.UnknownId { results, err = e.getMatchingSubnets(ctx, ids, zones) } else { results, err = e.getInstanceSubnets(ctx, inst, ids, zones) } if err != nil { return nil, errors.Trace(err) } if missing := ids.Missing(); len(missing) != 0 { return nil, errors.NotFoundf("subnets %v", formatMissing(missing)) } return results, nil }
go
{ "resource": "" }
q4918
NetworkInterfaces
train
func (e *environ) NetworkInterfaces(ctx context.ProviderCallContext, instId instance.Id) ([]network.InterfaceInfo, error) { insts, err := e.Instances(ctx, []instance.Id{instId}) if err != nil { return nil, errors.Trace(err) } envInst, ok := insts[0].(*environInstance) if !ok { // This shouldn't happen. return nil, errors.Errorf("couldn't extract google instance for %q", instId) } // In GCE all the subnets are in all AZs. zones, err := e.zoneNames(ctx) if err != nil { return nil, errors.Trace(err) } networks, err := e.networksByURL(ctx) if err != nil { return nil, errors.Trace(err) } googleInst := envInst.base ifaces := googleInst.NetworkInterfaces() var subnetURLs []string for _, iface := range ifaces { if iface.Subnetwork != "" { subnetURLs = append(subnetURLs, iface.Subnetwork) } } subnets, err := e.subnetsByURL(ctx, subnetURLs, networks, zones) if err != nil { return nil, errors.Trace(err) } // We know there'll be a subnet for each url requested, otherwise // there would have been an error. var results []network.InterfaceInfo for i, iface := range ifaces { details, err := findNetworkDetails(iface, subnets, networks) if err != nil { return nil, errors.Annotatef(err, "instance %q", instId) } results = append(results, network.InterfaceInfo{ DeviceIndex: i, CIDR: details.cidr, // The network interface has no id in GCE so it's // identified by the machine's id + its name. ProviderId: network.Id(fmt.Sprintf("%s/%s", instId, iface.Name)), ProviderSubnetId: details.subnet, ProviderNetworkId: details.network, AvailabilityZones: copyStrings(zones), InterfaceName: iface.Name, Address: network.NewScopedAddress(iface.NetworkIP, network.ScopeCloudLocal), InterfaceType: network.EthernetInterface, Disabled: false, NoAutoStart: false, ConfigType: network.ConfigDHCP, }) } return results, nil }
go
{ "resource": "" }
q4919
findNetworkDetails
train
func findNetworkDetails(iface compute.NetworkInterface, subnets subnetMap, networks networkMap) (networkDetails, error) { var result networkDetails if iface.Subnetwork == "" { // This interface is on a legacy network. netwk, ok := networks[iface.Network] if !ok { return result, errors.NotFoundf("network %q", iface.Network) } result.cidr = netwk.IPv4Range result.subnet = "" result.network = network.Id(netwk.Name) } else { subnet, ok := subnets[iface.Subnetwork] if !ok { return result, errors.NotFoundf("subnet %q", iface.Subnetwork) } result.cidr = subnet.CIDR result.subnet = subnet.ProviderId result.network = subnet.ProviderNetworkId } return result, nil }
go
{ "resource": "" }
q4920
AllocateContainerAddresses
train
func (e *environ) AllocateContainerAddresses(context.ProviderCallContext, instance.Id, names.MachineTag, []network.InterfaceInfo) ([]network.InterfaceInfo, error) { return nil, errors.NotSupportedf("container addresses") }
go
{ "resource": "" }
q4921
SSHAddresses
train
func (*environ) SSHAddresses(ctx context.ProviderCallContext, addresses []network.Address) ([]network.Address, error) { bestAddress, ok := network.SelectPublicAddress(addresses) if ok { return []network.Address{bestAddress}, nil } else { // fallback return addresses, nil } }
go
{ "resource": "" }
q4922
Include
train
func (s *includeSet) Include(item string) bool { if s.items.Contains(item) { s.items.Remove(item) return true } return false }
go
{ "resource": "" }
q4923
Validate
train
func (v KnownServiceValue) Validate() error { switch v { case SSHRule, JujuControllerRule, JujuApplicationOfferRule: return nil } return errors.NotValidf("known service %q", v) }
go
{ "resource": "" }
q4924
Get
train
func (d datastore) Get(path string, data interface{}) error { current := d[path] if current == nil { return errors.NotFoundf(path) } data = current return nil }
go
{ "resource": "" }
q4925
Put
train
func (d datastore) Put(path string, data interface{}) error { d[path] = data return nil }
go
{ "resource": "" }
q4926
PutReader
train
func (d *datastore) PutReader(path string, data io.Reader) error { buffer := []byte{} _, err := data.Read(buffer) if err != nil { return errors.Trace(err) } return d.Put(path, buffer) }
go
{ "resource": "" }
q4927
WithChannel
train
func (c FakeClient) WithChannel(channel params.Channel) *ChannelAwareFakeClient { return &ChannelAwareFakeClient{channel, c} }
go
{ "resource": "" }
q4928
Get
train
func (c ChannelAwareFakeClient) Get(path string, value interface{}) error { return c.charmstore.Get(path, value) }
go
{ "resource": "" }
q4929
AddCharm
train
func (r Repository) AddCharm(id *charm.URL, channel params.Channel, force bool) error { withRevision := r.addRevision(id) alreadyAdded := r.added[string(channel)] for _, charm := range alreadyAdded { if *withRevision == charm { return nil // TODO(tsm) check expected behaviour // // if force { // return nil // } else { // return errors.NewAlreadyExists(errors.NewErr("%v already added in channel %v", id, channel)) // } } } r.added[string(channel)] = append(alreadyAdded, *withRevision) return nil }
go
{ "resource": "" }
q4930
AddCharmWithAuthorization
train
func (r Repository) AddCharmWithAuthorization(id *charm.URL, channel params.Channel, macaroon *macaroon.Macaroon, force bool) error { return r.AddCharm(id, channel, force) }
go
{ "resource": "" }
q4931
AddLocalCharm
train
func (r Repository) AddLocalCharm(id *charm.URL, details charm.Charm, force bool) (*charm.URL, error) { return id, r.AddCharm(id, params.NoChannel, force) }
go
{ "resource": "" }
q4932
CharmInfo
train
func (r Repository) CharmInfo(charmURL string) (*charms.CharmInfo, error) { charmId, err := charm.ParseURL(charmURL) if err != nil { return nil, errors.Trace(err) } charmDetails, err := r.Get(charmId) if err != nil { return nil, errors.Trace(err) } info := charms.CharmInfo{ Revision: charmDetails.Revision(), URL: charmId.String(), Config: charmDetails.Config(), Meta: charmDetails.Meta(), Actions: charmDetails.Actions(), Metrics: charmDetails.Metrics(), } return &info, nil }
go
{ "resource": "" }
q4933
Resolve
train
func (r Repository) Resolve(ref *charm.URL) (canonRef *charm.URL, supportedSeries []string, err error) { return r.addRevision(ref), []string{"trusty", "wily", "quantal"}, nil }
go
{ "resource": "" }
q4934
Get
train
func (r Repository) Get(id *charm.URL) (charm.Charm, error) { withRevision := r.addRevision(id) charmData := r.charms[r.channel][*withRevision] if charmData == nil { return charmData, errors.NotFoundf("cannot retrieve \"%v\": charm", id.String()) } return charmData, nil }
go
{ "resource": "" }
q4935
GetBundle
train
func (r Repository) GetBundle(id *charm.URL) (charm.Bundle, error) { bundleData := r.bundles[r.channel][*id] if bundleData == nil { return nil, errors.NotFoundf(id.String()) } return bundleData, nil }
go
{ "resource": "" }
q4936
Publish
train
func (r Repository) Publish(id *charm.URL, channels []params.Channel, resources map[string]int) error { for _, channel := range channels { published := r.published[channel] published.Add(id.String()) r.published[channel] = published } return nil }
go
{ "resource": "" }
q4937
signature
train
func signature(r io.Reader) (hash []byte, err error) { h := sha512.New384() _, err = io.Copy(h, r) if err != nil { return nil, errors.Trace(err) } hash = []byte(fmt.Sprintf("%x", h.Sum(nil))) return hash, nil }
go
{ "resource": "" }
q4938
New
train
func New( modelUUID string, managedStorage blobstore.ManagedStorage, metadataCollection mongo.Collection, runner jujutxn.Runner, ) Storage { return &binaryStorage{ modelUUID: modelUUID, managedStorage: managedStorage, metadataCollection: metadataCollection, txnRunner: runner, } }
go
{ "resource": "" }
q4939
Add
train
func (s *binaryStorage) Add(r io.Reader, metadata Metadata) (resultErr error) { // Add the binary file to storage. path := fmt.Sprintf("tools/%s-%s", metadata.Version, metadata.SHA256) if err := s.managedStorage.PutForBucket(s.modelUUID, path, r, metadata.Size); err != nil { return errors.Annotate(err, "cannot store binary file") } defer func() { if resultErr == nil { return } err := s.managedStorage.RemoveForBucket(s.modelUUID, path) if err != nil { logger.Errorf("failed to remove binary blob: %v", err) } }() newDoc := metadataDoc{ Id: metadata.Version, Version: metadata.Version, Size: metadata.Size, SHA256: metadata.SHA256, Path: path, } // Add or replace metadata. If replacing, record the existing path so we // can remove it later. var oldPath string buildTxn := func(attempt int) ([]txn.Op, error) { op := txn.Op{ C: s.metadataCollection.Name(), Id: newDoc.Id, } // On the first attempt we assume we're adding new binary files. // Subsequent attempts to add files will fetch the existing // doc, record the old path, and attempt to update the // size, path and hash fields. if attempt == 0 { op.Assert = txn.DocMissing op.Insert = &newDoc } else { oldDoc, err := s.findMetadata(metadata.Version) if err != nil { return nil, err } oldPath = oldDoc.Path op.Assert = bson.D{{"path", oldPath}} if oldPath != path { op.Update = bson.D{{ "$set", bson.D{ {"size", metadata.Size}, {"sha256", metadata.SHA256}, {"path", path}, }, }} } } return []txn.Op{op}, nil } err := s.txnRunner.Run(buildTxn) if err != nil { return errors.Annotate(err, "cannot store binary metadata") } if oldPath != "" && oldPath != path { // Attempt to remove the old path. Failure is non-fatal. err := s.managedStorage.RemoveForBucket(s.modelUUID, oldPath) if err != nil { logger.Errorf("failed to remove old binary blob: %v", err) } else { logger.Debugf("removed old binary blob") } } return nil }
go
{ "resource": "" }
q4940
createFirewallRuleVm
train
func createFirewallRuleVm(envName string, machineId string, portRange network.IngressRule) string { ports := []string{} for p := portRange.FromPort; p <= portRange.ToPort; p++ { ports = append(ports, fmt.Sprintf("PORT %d", p)) } var portList string if len(ports) > 1 { portList = fmt.Sprintf("( %s )", strings.Join(ports, " AND ")) } else if len(ports) == 1 { portList = ports[0] } return fmt.Sprintf(firewallRuleVm, envName, machineId, strings.ToLower(portRange.Protocol), portList) }
go
{ "resource": "" }
q4941
PublicKeyForLocation
train
func (b BakeryServicePublicKeyLocator) PublicKeyForLocation(string) (*bakery.PublicKey, error) { return b.Service.PublicKey(), nil }
go
{ "resource": "" }
q4942
NewBakeryService
train
func NewBakeryService( st *state.State, store bakerystorage.ExpirableStorage, locator bakery.PublicKeyLocator, ) (*bakery.Service, *bakery.KeyPair, error) { key, err := bakery.GenerateKey() if err != nil { return nil, nil, errors.Annotate(err, "generating key for bakery service") } service, err := bakery.NewService(bakery.NewServiceParams{ Location: "juju model " + st.ModelUUID(), Store: store, Key: key, Locator: locator, }) if err != nil { return nil, nil, errors.Trace(err) } return service, key, nil }
go
{ "resource": "" }
q4943
ExpireStorageAfter
train
func (s *ExpirableStorageBakeryService) ExpireStorageAfter(t time.Duration) (authentication.ExpirableStorageBakeryService, error) { store := s.Store.ExpireAfter(t) service, err := bakery.NewService(bakery.NewServiceParams{ Location: s.Location(), Store: store, Key: s.Key, Locator: s.Locator, }) if err != nil { return nil, errors.Trace(err) } return &ExpirableStorageBakeryService{service, s.Key, store, s.Locator}, nil }
go
{ "resource": "" }
q4944
NewContainerSetupHandler
train
func NewContainerSetupHandler(params ContainerSetupParams) watcher.StringsHandler { return &ContainerSetup{ runner: params.Runner, machine: params.Machine, supportedContainers: params.SupportedContainers, provisioner: params.Provisioner, config: params.Config, workerName: params.WorkerName, machineLock: params.MachineLock, credentialAPI: params.CredentialAPI, getNetConfig: common.GetObservedNetworkConfig, } }
go
{ "resource": "" }
q4945
SetUp
train
func (cs *ContainerSetup) SetUp() (watcher watcher.StringsWatcher, err error) { // Set up the semaphores for each container type. cs.setupDone = make(map[instance.ContainerType]*int32, len(instance.ContainerTypes)) for _, containerType := range instance.ContainerTypes { zero := int32(0) cs.setupDone[containerType] = &zero } // Listen to all container lifecycle events on our machine. if watcher, err = cs.machine.WatchAllContainers(); err != nil { return nil, err } return watcher, nil }
go
{ "resource": "" }
q4946
Handle
train
func (cs *ContainerSetup) Handle(abort <-chan struct{}, containerIds []string) (resultError error) { // Consume the initial watcher event. if len(containerIds) == 0 { return nil } logger.Infof("initial container setup with ids: %v", containerIds) for _, id := range containerIds { containerType := state.ContainerTypeFromId(id) // If this container type has been dealt with, do nothing. if atomic.LoadInt32(cs.setupDone[containerType]) != 0 { continue } if err := cs.initialiseAndStartProvisioner(abort, containerType); err != nil { logger.Errorf("starting container provisioner for %v: %v", containerType, err) // Just because dealing with one type of container fails, we won't // exit the entire function because we still want to try and start // other container types. So we take note of and return the first // such error. if resultError == nil { resultError = err } } } return errors.Trace(resultError) }
go
{ "resource": "" }
q4947
initContainerDependencies
train
func (cs *ContainerSetup) initContainerDependencies(abort <-chan struct{}, containerType instance.ContainerType) error { initialiser := getContainerInitialiser(containerType) releaser, err := cs.acquireLock(fmt.Sprintf("%s container initialisation", containerType), abort) if err != nil { return errors.Annotate(err, "failed to acquire initialization lock") } defer releaser() if err := initialiser.Initialise(); err != nil { return errors.Trace(err) } // At this point, Initialiser likely has changed host network information, // so re-probe to have an accurate view. observedConfig, err := cs.observeNetwork() if err != nil { return errors.Annotate(err, "cannot discover observed network config") } if len(observedConfig) > 0 { machineTag := cs.machine.MachineTag() logger.Tracef("updating observed network config for %q %s containers to %#v", machineTag, containerType, observedConfig) if err := cs.provisioner.SetHostMachineNetworkConfig(machineTag, observedConfig); err != nil { return errors.Trace(err) } } return nil }
go
{ "resource": "" }
q4948
startProvisionerWorker
train
func startProvisionerWorker( runner *worker.Runner, containerType instance.ContainerType, provisioner *apiprovisioner.State, config agent.Config, broker environs.InstanceBroker, toolsFinder ToolsFinder, distributionGroupFinder DistributionGroupFinder, credentialAPI workercommon.CredentialAPI, ) error { workerName := fmt.Sprintf("%s-provisioner", containerType) // The provisioner task is created after a container record has // already been added to the machine. It will see that the // container does not have an instance yet and create one. return runner.StartWorker(workerName, func() (worker.Worker, error) { w, err := NewContainerProvisioner(containerType, provisioner, config, broker, toolsFinder, distributionGroupFinder, credentialAPI, ) if err != nil { return nil, errors.Trace(err) } return w, nil }) }
go
{ "resource": "" }
q4949
Validate
train
func (config Config) Validate() error { if config.ModelWatcher == nil { return errors.NotValidf("nil ModelWatcher") } if config.ModelGetter == nil { return errors.NotValidf("nil ModelGetter") } if config.NewModelWorker == nil { return errors.NotValidf("nil NewModelWorker") } if config.ErrorDelay <= 0 { return errors.NotValidf("non-positive ErrorDelay") } return nil }
go
{ "resource": "" }
q4950
StreamDebugLog
train
func StreamDebugLog(source base.StreamConnector, args DebugLogParams) (<-chan LogMessage, error) { // TODO(babbageclunk): this isn't cancellable - if the caller stops // reading from the channel (because it has an error, for example), // the goroutine will be leaked. This is OK when used from the command // line, but is a problem if it happens in jujud. Change it to accept // a stop channel and use a read deadline so that the client can stop // it. https://pad.lv/1644084 // Prepare URL query attributes. attrs := args.URLQuery() connection, err := source.ConnectStream("/log", attrs) if err != nil { return nil, errors.Trace(err) } messages := make(chan LogMessage) go func() { defer close(messages) for { var msg params.LogMessage err := connection.ReadJSON(&msg) if err != nil { return } messages <- LogMessage{ Entity: msg.Entity, Timestamp: msg.Timestamp, Severity: msg.Severity, Module: msg.Module, Location: msg.Location, Message: msg.Message, } } }() return messages, nil }
go
{ "resource": "" }
q4951
Prepare
train
func (d *deploy) Prepare(state State) (*State, error) { if err := d.checkAlreadyDone(state); err != nil { return nil, errors.Trace(err) } info, err := d.callbacks.GetArchiveInfo(d.charmURL) if err != nil { return nil, errors.Trace(err) } if err := d.deployer.Stage(info, d.abort); err != nil { return nil, errors.Trace(err) } // note: yes, this *should* be in Prepare, not Execute. Before we can safely // write out local state referencing the charm url (by returning the new // State to the Executor, below), we have to register our interest in that // charm on the controller. If we neglected to do so, the operation could // race with a new application-charm-url change on the controller, and lead to // failures on resume in which we try to obtain archive info for a charm that // has already been removed from the controller. if err := d.callbacks.SetCurrentCharm(d.charmURL); err != nil { return nil, errors.Trace(err) } return d.getState(state, Pending), nil }
go
{ "resource": "" }
q4952
Execute
train
func (d *deploy) Execute(state State) (*State, error) { if err := d.deployer.Deploy(); err == charm.ErrConflict { return nil, NewDeployConflictError(d.charmURL) } else if err != nil { return nil, errors.Trace(err) } return d.getState(state, Done), nil }
go
{ "resource": "" }
q4953
Commit
train
func (d *deploy) Commit(state State) (*State, error) { change := &stateChange{ Kind: RunHook, } if hookInfo := d.interruptedHook(state); hookInfo != nil { change.Hook = hookInfo change.Step = Pending } else { change.Hook = &hook.Info{Kind: deployHookKinds[d.kind]} change.Step = Queued } return change.apply(state), nil }
go
{ "resource": "" }
q4954
Updates
train
func (sr ApplicationResources) Updates() ([]resource.Resource, error) { storeResources, err := sr.alignStoreResources() if err != nil { return nil, errors.Trace(err) } var updates []resource.Resource for i, res := range sr.Resources { if res.Origin != resource.OriginStore { continue } csRes := storeResources[i] // If the revision is the same then all the other info must be. if res.Revision == csRes.Revision { continue } updates = append(updates, csRes) } return updates, nil }
go
{ "resource": "" }
q4955
NewMockApiextensionsV1beta1Interface
train
func NewMockApiextensionsV1beta1Interface(ctrl *gomock.Controller) *MockApiextensionsV1beta1Interface { mock := &MockApiextensionsV1beta1Interface{ctrl: ctrl} mock.recorder = &MockApiextensionsV1beta1InterfaceMockRecorder{mock} return mock }
go
{ "resource": "" }
q4956
CustomResourceDefinitions
train
func (m *MockApiextensionsV1beta1Interface) CustomResourceDefinitions() v1beta10.CustomResourceDefinitionInterface { ret := m.ctrl.Call(m, "CustomResourceDefinitions") ret0, _ := ret[0].(v1beta10.CustomResourceDefinitionInterface) return ret0 }
go
{ "resource": "" }
q4957
CustomResourceDefinitions
train
func (mr *MockApiextensionsV1beta1InterfaceMockRecorder) CustomResourceDefinitions() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CustomResourceDefinitions", reflect.TypeOf((*MockApiextensionsV1beta1Interface)(nil).CustomResourceDefinitions)) }
go
{ "resource": "" }
q4958
RESTClient
train
func (m *MockApiextensionsV1beta1Interface) RESTClient() rest.Interface { ret := m.ctrl.Call(m, "RESTClient") ret0, _ := ret[0].(rest.Interface) return ret0 }
go
{ "resource": "" }
q4959
NewMockCustomResourceDefinitionInterface
train
func NewMockCustomResourceDefinitionInterface(ctrl *gomock.Controller) *MockCustomResourceDefinitionInterface { mock := &MockCustomResourceDefinitionInterface{ctrl: ctrl} mock.recorder = &MockCustomResourceDefinitionInterfaceMockRecorder{mock} return mock }
go
{ "resource": "" }
q4960
Validate
train
func (a Access) Validate() error { switch a { case NoAccess, AdminAccess, ReadAccess, WriteAccess, LoginAccess, AddModelAccess, SuperuserAccess: return nil } return errors.NotValidf("access level %s", a) }
go
{ "resource": "" }
q4961
ValidateModelAccess
train
func ValidateModelAccess(access Access) error { switch access { case ReadAccess, WriteAccess, AdminAccess: return nil } return errors.NotValidf("%q model access", access) }
go
{ "resource": "" }
q4962
ValidateOfferAccess
train
func ValidateOfferAccess(access Access) error { switch access { case ReadAccess, ConsumeAccess, AdminAccess: return nil } return errors.NotValidf("%q offer access", access) }
go
{ "resource": "" }
q4963
ValidateCloudAccess
train
func ValidateCloudAccess(access Access) error { switch access { case AddModelAccess, AdminAccess: return nil } return errors.NotValidf("%q cloud access", access) }
go
{ "resource": "" }
q4964
ValidateControllerAccess
train
func ValidateControllerAccess(access Access) error { switch access { case LoginAccess, SuperuserAccess: return nil } return errors.NotValidf("%q controller access", access) }
go
{ "resource": "" }
q4965
EqualOrGreaterModelAccessThan
train
func (a Access) EqualOrGreaterModelAccessThan(access Access) bool { v1, v2 := a.modelValue(), access.modelValue() if v1 < 0 || v2 < 0 { return false } return v1 >= v2 }
go
{ "resource": "" }
q4966
EqualOrGreaterControllerAccessThan
train
func (a Access) EqualOrGreaterControllerAccessThan(access Access) bool { v1, v2 := a.controllerValue(), access.controllerValue() if v1 < 0 || v2 < 0 { return false } return v1 >= v2 }
go
{ "resource": "" }
q4967
EqualOrGreaterCloudAccessThan
train
func (a Access) EqualOrGreaterCloudAccessThan(access Access) bool { v1, v2 := a.cloudValue(), access.cloudValue() if v1 < 0 || v2 < 0 { return false } return v1 >= v2 }
go
{ "resource": "" }
q4968
EqualOrGreaterOfferAccessThan
train
func (a Access) EqualOrGreaterOfferAccessThan(access Access) bool { v1, v2 := a.offerValue(), access.offerValue() if v1 < 0 || v2 < 0 { return false } return v1 >= v2 }
go
{ "resource": "" }
q4969
NewShowCommand
train
func NewShowCommand() cmd.Command { cmd := &showCommand{} cmd.newAPIFunc = func() (StorageShowAPI, error) { return cmd.NewStorageAPI() } return modelcmd.Wrap(cmd) }
go
{ "resource": "" }
q4970
ListSpaces
train
func (api *API) ListSpaces() ([]params.Space, error) { var response params.ListSpacesResults err := api.facade.FacadeCall("ListSpaces", nil, &response) if params.IsCodeNotSupported(err) { return response.Results, errors.NewNotSupported(nil, err.Error()) } return response.Results, err }
go
{ "resource": "" }
q4971
ReloadSpaces
train
func (api *API) ReloadSpaces() error { if api.facade.BestAPIVersion() < 3 { return errors.NewNotSupported(nil, "Controller does not support reloading spaces") } err := api.facade.FacadeCall("ReloadSpaces", nil, nil) if params.IsCodeNotSupported(err) { return errors.NewNotSupported(nil, err.Error()) } return err }
go
{ "resource": "" }
q4972
newAPIContext
train
func newAPIContext(ctxt *cmd.Context, opts *AuthOpts, store jujuclient.CookieStore, controllerName string) (*apiContext, error) { jar0, err := store.CookieJar(controllerName) if err != nil { return nil, errors.Trace(err) } // The JUJU_USER_DOMAIN environment variable specifies // the preferred user domain when discharging third party caveats. // We set up a cookie jar that will send it to all sites because // we don't know where the third party might be. jar := &domainCookieJar{ CookieJar: jar0, domain: os.Getenv("JUJU_USER_DOMAIN"), } var visitors []httpbakery.Visitor if ctxt != nil && opts != nil && opts.NoBrowser { filler := &form.IOFiller{ In: ctxt.Stdin, Out: ctxt.Stdout, } newVisitor := ussologin.NewVisitor("juju", filler, jujuclient.NewTokenStore()) visitors = append(visitors, newVisitor) } else { visitors = append(visitors, httpbakery.WebBrowserVisitor) } return &apiContext{ jar: jar, webPageVisitor: httpbakery.NewMultiVisitor(visitors...), }, nil }
go
{ "resource": "" }
q4973
NewBakeryClient
train
func (ctx *apiContext) NewBakeryClient() *httpbakery.Client { client := httpbakery.NewClient() client.Jar = ctx.jar client.WebPageVisitor = ctx.webPageVisitor return client }
go
{ "resource": "" }
q4974
Close
train
func (ctxt *apiContext) Close() error { if err := ctxt.jar.Save(); err != nil { return errors.Annotatef(err, "cannot save cookie jar") } return nil }
go
{ "resource": "" }
q4975
Cookies
train
func (j *domainCookieJar) Cookies(u *url.URL) []*http.Cookie { cookies := j.CookieJar.Cookies(u) if j.domain == "" { return cookies } // Allow the site to override if it wants to. for _, c := range cookies { if c.Name == domainCookieName { return cookies } } return append(cookies, &http.Cookie{ Name: domainCookieName, Value: j.domain, }) }
go
{ "resource": "" }
q4976
manifestSender
train
func (h *charmsHandler) manifestSender(w http.ResponseWriter, r *http.Request, bundle *charm.CharmArchive) error { manifest, err := bundle.Manifest() if err != nil { return errors.Annotatef(err, "unable to read manifest in %q", bundle.Path) } return errors.Trace(sendStatusAndJSON(w, http.StatusOK, &params.CharmsResponse{ Files: manifest.SortedValues(), })) }
go
{ "resource": "" }
q4977
archiveEntrySender
train
func (h *charmsHandler) archiveEntrySender(filePath string, serveIcon bool) bundleContentSenderFunc { return func(w http.ResponseWriter, r *http.Request, bundle *charm.CharmArchive) error { contents, err := common.CharmArchiveEntry(bundle.Path, filePath, serveIcon) if err != nil { return errors.Trace(err) } ctype := mime.TypeByExtension(filepath.Ext(filePath)) if ctype != "" { // Older mime.types may map .js to x-javascript. // Map it to javascript for consistency. if ctype == params.ContentTypeXJS { ctype = params.ContentTypeJS } w.Header().Set("Content-Type", ctype) } w.Header().Set("Content-Length", strconv.Itoa(len(contents))) w.WriteHeader(http.StatusOK) io.Copy(w, bytes.NewReader(contents)) return nil } }
go
{ "resource": "" }
q4978
archiveSender
train
func (h *charmsHandler) archiveSender(w http.ResponseWriter, r *http.Request, bundle *charm.CharmArchive) error { // Note that http.ServeFile's error responses are not our standard JSON // responses (they are the usual textual error messages as produced // by http.Error), but there's not a great deal we can do about that, // except accept non-JSON error responses in the client, because // http.ServeFile does not provide a way of customizing its // error responses. http.ServeFile(w, r, bundle.Path) return nil }
go
{ "resource": "" }
q4979
processUploadedArchive
train
func (h *charmsHandler) processUploadedArchive(path string) error { // Open the archive as a zip. f, err := os.OpenFile(path, os.O_RDWR, 0644) if err != nil { return err } defer f.Close() fi, err := f.Stat() if err != nil { return err } zipr, err := zip.NewReader(f, fi.Size()) if err != nil { return errors.Annotate(err, "cannot open charm archive") } // Find out the root dir prefix from the archive. rootDir, err := h.findArchiveRootDir(zipr) if err != nil { return errors.Annotate(err, "cannot read charm archive") } if rootDir == "." { // Normal charm, just use charm.ReadCharmArchive). return nil } // There is one or more subdirs, so we need extract it to a temp // dir and then read it as a charm dir. tempDir, err := ioutil.TempDir("", "charm-extract") if err != nil { return errors.Annotate(err, "cannot create temp directory") } defer os.RemoveAll(tempDir) if err := ziputil.Extract(zipr, tempDir, rootDir); err != nil { return errors.Annotate(err, "cannot extract charm archive") } dir, err := charm.ReadCharmDir(tempDir) if err != nil { return errors.Annotate(err, "cannot read extracted archive") } // Now repackage the dir as a bundle at the original path. if err := f.Truncate(0); err != nil { return err } if err := dir.ArchiveTo(f); err != nil { return err } return nil }
go
{ "resource": "" }
q4980
findArchiveRootDir
train
func (h *charmsHandler) findArchiveRootDir(zipr *zip.Reader) (string, error) { paths, err := ziputil.Find(zipr, "metadata.yaml") if err != nil { return "", err } switch len(paths) { case 0: return "", errors.Errorf("invalid charm archive: missing metadata.yaml") case 1: default: sort.Sort(byDepth(paths)) if depth(paths[0]) == depth(paths[1]) { return "", errors.Errorf("invalid charm archive: ambiguous root directory") } } return filepath.Dir(paths[0]), nil }
go
{ "resource": "" }
q4981
repackageAndUploadCharm
train
func (h *charmsHandler) repackageAndUploadCharm(st *state.State, archive *charm.CharmArchive, curl *charm.URL) error { // Create a temp dir to contain the extracted charm dir. tempDir, err := ioutil.TempDir("", "charm-download") if err != nil { return errors.Annotate(err, "cannot create temp directory") } defer os.RemoveAll(tempDir) extractPath := filepath.Join(tempDir, "extracted") // Expand and repack it with the revision specified by curl. archive.SetRevision(curl.Revision) if err := archive.ExpandTo(extractPath); err != nil { return errors.Annotate(err, "cannot extract uploaded charm") } charmDir, err := charm.ReadCharmDir(extractPath) if err != nil { return errors.Annotate(err, "cannot read extracted charm") } // Try to get the version details here. // read just the first line of the file. var version string versionPath := filepath.Join(extractPath, "version") if file, err := os.Open(versionPath); err == nil { scanner := bufio.NewScanner(file) scanner.Scan() file.Close() if err := scanner.Err(); err != nil { return errors.Annotate(err, "cannot read version file") } revLine := scanner.Text() // bzr revision info starts with "revision-id: " so strip that. revLine = strings.TrimPrefix(revLine, "revision-id: ") version = fmt.Sprintf("%.100s", revLine) } else if !os.IsNotExist(err) { return errors.Annotate(err, "cannot open version file") } // Bundle the charm and calculate its sha256 hash at the same time. var repackagedArchive bytes.Buffer hash := sha256.New() err = charmDir.ArchiveTo(io.MultiWriter(hash, &repackagedArchive)) if err != nil { return errors.Annotate(err, "cannot repackage uploaded charm") } bundleSHA256 := hex.EncodeToString(hash.Sum(nil)) info := application.CharmArchive{ ID: curl, Charm: archive, Data: &repackagedArchive, Size: int64(repackagedArchive.Len()), SHA256: bundleSHA256, CharmVersion: version, } // Store the charm archive in environment storage. shim := application.NewStateShim(st) return application.StoreCharmArchive(shim, info) }
go
{ "resource": "" }
q4982
sendJSONError
train
func sendJSONError(w http.ResponseWriter, req *http.Request, err error) error { logger.Errorf("returning error from %s %s: %s", req.Method, req.URL, errors.Details(err)) perr, status := common.ServerErrorAndStatus(err) return errors.Trace(sendStatusAndJSON(w, status, &params.CharmsResponse{ Error: perr.Message, ErrorCode: perr.Code, ErrorInfo: perr.Info, })) }
go
{ "resource": "" }
q4983
sendBundleContent
train
func sendBundleContent( w http.ResponseWriter, r *http.Request, archivePath string, sender bundleContentSenderFunc, ) error { bundle, err := charm.ReadCharmArchive(archivePath) if err != nil { return errors.Annotatef(err, "unable to read archive in %q", archivePath) } // The bundleContentSenderFunc will set up and send an appropriate response. if err := sender(w, r, bundle); err != nil { return errors.Trace(err) } return nil }
go
{ "resource": "" }
q4984
NewStorageAPI
train
func (c *StorageCommandBase) NewStorageAPI() (*storage.Client, error) { root, err := c.NewAPIRoot() if err != nil { return nil, err } return storage.NewClient(root), nil }
go
{ "resource": "" }
q4985
formatStorageDetails
train
func formatStorageDetails(storages []params.StorageDetails) (map[string]StorageInfo, error) { if len(storages) == 0 { return nil, nil } output := make(map[string]StorageInfo) for _, details := range storages { storageTag, storageInfo, err := createStorageInfo(details) if err != nil { return nil, errors.Trace(err) } output[storageTag.Id()] = storageInfo } return output, nil }
go
{ "resource": "" }
q4986
NewMockBaseWatcher
train
func NewMockBaseWatcher(ctrl *gomock.Controller) *MockBaseWatcher { mock := &MockBaseWatcher{ctrl: ctrl} mock.recorder = &MockBaseWatcherMockRecorder{mock} return mock }
go
{ "resource": "" }
q4987
Dead
train
func (m *MockBaseWatcher) Dead() <-chan struct{} { ret := m.ctrl.Call(m, "Dead") ret0, _ := ret[0].(<-chan struct{}) return ret0 }
go
{ "resource": "" }
q4988
Dead
train
func (mr *MockBaseWatcherMockRecorder) Dead() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Dead", reflect.TypeOf((*MockBaseWatcher)(nil).Dead)) }
go
{ "resource": "" }
q4989
WatchCollection
train
func (m *MockBaseWatcher) WatchCollection(arg0 string, arg1 chan<- watcher.Change) { m.ctrl.Call(m, "WatchCollection", arg0, arg1) }
go
{ "resource": "" }
q4990
WatchCollectionWithFilter
train
func (m *MockBaseWatcher) WatchCollectionWithFilter(arg0 string, arg1 chan<- watcher.Change, arg2 func(interface{}) bool) { m.ctrl.Call(m, "WatchCollectionWithFilter", arg0, arg1, arg2) }
go
{ "resource": "" }
q4991
WatchCollectionWithFilter
train
func (mr *MockBaseWatcherMockRecorder) WatchCollectionWithFilter(arg0, arg1, arg2 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchCollectionWithFilter", reflect.TypeOf((*MockBaseWatcher)(nil).WatchCollectionWithFilter), arg0, arg1, arg2) }
go
{ "resource": "" }
q4992
WatchMulti
train
func (m *MockBaseWatcher) WatchMulti(arg0 string, arg1 []interface{}, arg2 chan<- watcher.Change) error { ret := m.ctrl.Call(m, "WatchMulti", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 }
go
{ "resource": "" }
q4993
New
train
func New(config Config) (ExpirableStorage, error) { if err := config.Validate(); err != nil { return nil, errors.Annotate(err, "validating config") } return &storage{ config: config, rootKeys: mgostorage.NewRootKeys(5), }, nil }
go
{ "resource": "" }
q4994
getLogMessage
train
func (info *peerGroupInfo) getLogMessage() string { lines := []string{ fmt.Sprintf("calculating desired peer group\ndesired voting members: (maxId: %d)", info.maxMemberId), } template := "\n %#v: rs_id=%d, rs_addr=%s" ids := make([]string, 0, len(info.recognised)) for id := range info.recognised { ids = append(ids, id) } sortAsInts(ids) for _, id := range ids { rm := info.recognised[id] lines = append(lines, fmt.Sprintf(template, info.machines[id], rm.Id, rm.Address)) } if len(info.extra) > 0 { lines = append(lines, "\nother members:") template := "\n rs_id=%d, rs_addr=%s, tags=%v, vote=%t" for _, em := range info.extra { vote := em.Votes != nil && *em.Votes > 0 lines = append(lines, fmt.Sprintf(template, em.Id, em.Address, em.Tags, vote)) } } return strings.Join(lines, "") }
go
{ "resource": "" }
q4995
initNewReplicaSet
train
func (p *peerGroupChanges) initNewReplicaSet() map[string]*replicaset.Member { rs := make(map[string]*replicaset.Member, len(p.info.recognised)) for id := range p.info.recognised { // Local-scoped variable required here, // or the same pointer to the loop variable is used each time. m := p.info.recognised[id] rs[id] = &m } return rs }
go
{ "resource": "" }
q4996
checkExtraMembers
train
func (p *peerGroupChanges) checkExtraMembers() error { // Note: (jam 2018-04-18) With the new "juju remove-machine --force" it is much easier to get into this situation // because an active controller that is in the replicaset would get removed while it still had voting rights. // Given that Juju is in control of the replicaset we don't really just 'accept' that some other machine has a vote. // *maybe* we could allow non-voting members that would be used by 3rd parties to provide a warm database backup. // But I think the right answer is probably to downgrade unknown members from voting. for _, member := range p.info.extra { if isVotingMember(&member) { return fmt.Errorf("voting non-machine member %v found in peer group", member) } } if len(p.info.extra) > 0 { p.desired.isChanged = true } return nil }
go
{ "resource": "" }
q4997
reviewPeerGroupChanges
train
func (p *peerGroupChanges) reviewPeerGroupChanges() { currVoters := 0 for _, m := range p.desired.members { if isVotingMember(m) { currVoters += 1 } } keptVoters := currVoters - len(p.toRemoveVote) if keptVoters == 0 { // to keep no voters means to step down the primary without a replacement, which is not possible. // So restore the current primary. Once there is another member to work with after reconfiguring, we will then // be able to ask the current primary to step down, and then we can finally remove it. var tempToRemove []string for _, id := range p.toRemoveVote { isPrimary := isPrimaryMember(p.info, id) if !isPrimary { tempToRemove = append(tempToRemove, id) } else { logger.Debugf("asked to remove all voters, preserving primary voter %q", id) p.desired.stepDownPrimary = false } } p.toRemoveVote = tempToRemove } newCount := keptVoters + len(p.toAddVote) if (newCount)%2 == 1 { logger.Debugf("number of voters is odd") // if this is true we will create an odd number of voters return } if len(p.toAddVote) > 0 { last := p.toAddVote[len(p.toAddVote)-1] logger.Debugf("number of voters would be even, not adding %q to maintain odd", last) p.toAddVote = p.toAddVote[:len(p.toAddVote)-1] return } // we must remove an extra peer // make sure we don't pick the primary to be removed. for i, id := range p.toKeepVoting { if !isPrimaryMember(p.info, id) { p.toRemoveVote = append(p.toRemoveVote, id) logger.Debugf("removing vote from %q to maintain odd number of voters", id) if i == len(p.toKeepVoting)-1 { p.toKeepVoting = p.toKeepVoting[:i] } else { p.toKeepVoting = append(p.toKeepVoting[:i], p.toKeepVoting[i+1:]...) } break } } }
go
{ "resource": "" }
q4998
adjustVotes
train
func (p *peerGroupChanges) adjustVotes() { setVoting := func(memberIds []string, voting bool) { for _, id := range memberIds { setMemberVoting(p.desired.members[id], voting) p.desired.machineVoting[id] = voting } } if len(p.toAddVote) > 0 || len(p.toRemoveVote) > 0 || len(p.toKeepCreateNonVotingMember) > 0 { p.desired.isChanged = true } setVoting(p.toAddVote, true) setVoting(p.toRemoveVote, false) setVoting(p.toKeepCreateNonVotingMember, false) }
go
{ "resource": "" }
q4999
createNonVotingMember
train
func (p *peerGroupChanges) createNonVotingMember() { for _, id := range p.toKeepCreateNonVotingMember { logger.Debugf("create member with id %q", id) p.info.maxMemberId++ member := &replicaset.Member{ Tags: map[string]string{ jujuMachineKey: id, }, Id: p.info.maxMemberId, } setMemberVoting(member, false) p.desired.members[id] = member } for _, id := range p.toKeepNonVoting { if p.desired.members[id] != nil { continue } logger.Debugf("create member with id %q", id) p.info.maxMemberId++ member := &replicaset.Member{ Tags: map[string]string{ jujuMachineKey: id, }, Id: p.info.maxMemberId, } setMemberVoting(member, false) p.desired.members[id] = member } }
go
{ "resource": "" }