_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q5100
String
train
func (m stringMap) String() string { pairs := make([]string, 0, len(*m.mapping)) for name, value := range *m.mapping { pairs = append(pairs, name+"="+value) } return strings.Join(pairs, ";") }
go
{ "resource": "" }
q5101
CurrentConfig
train
func (a *modelAgent) CurrentConfig() agent.Config { return &modelAgentConfig{ Config: a.Agent.CurrentConfig(), modelUUID: a.modelUUID, controllerUUID: a.controllerUUID, } }
go
{ "resource": "" }
q5102
APIInfo
train
func (c *modelAgentConfig) APIInfo() (*api.Info, bool) { info, ok := c.Config.APIInfo() if !ok { return nil, false } info.ModelTag = names.NewModelTag(c.modelUUID) return info, true }
go
{ "resource": "" }
q5103
NewWorker
train
func NewWorker(config Config) (worker.Worker, error) { if err := config.Validate(); err != nil { return nil, errors.Trace(err) } paths := NewPaths(config.DataDir, names.NewApplicationTag(config.Application)) deployer, err := jujucharm.NewDeployer( paths.State.CharmDir, paths.State.DeployerDir, jujucharm.NewBundlesDir(paths.State.BundlesDir, config.Downloader), ) if err != nil { return nil, errors.Annotatef(err, "cannot create deployer") } op := &caasOperator{ config: config, paths: paths, deployer: deployer, runner: worker.NewRunner(worker.RunnerParams{ Clock: config.Clock, // One of the uniter workers failing should not // prevent the others from running. IsFatal: func(error) bool { return false }, // For any failures, try again in 3 seconds. RestartDelay: 3 * time.Second, }), } if err := catacomb.Invoke(catacomb.Plan{ Site: &op.catacomb, Work: op.loop, Init: []worker.Worker{op.runner}, }); err != nil { return nil, errors.Trace(err) } return op, nil }
go
{ "resource": "" }
q5104
Juju1xEnvConfigExists
train
func Juju1xEnvConfigExists() bool { dir := OldJujuHomeDir() if dir == "" { return false } _, err := os.Stat(filepath.Join(dir, "environments.yaml")) return err == nil }
go
{ "resource": "" }
q5105
OldJujuHomeDir
train
func OldJujuHomeDir() string { JujuHomeDir := os.Getenv(oldJujuHomeEnvKey) if JujuHomeDir == "" { if runtime.GOOS == "windows" { JujuHomeDir = oldJujuHomeWin() } else { JujuHomeDir = oldJujuHomeLinux() } } return JujuHomeDir }
go
{ "resource": "" }
q5106
Prune
train
func (s *Facade) Prune(maxHistoryTime time.Duration, maxHistoryMB int) error { p := params.StatusHistoryPruneArgs{ MaxHistoryTime: maxHistoryTime, MaxHistoryMB: maxHistoryMB, } return s.facade.FacadeCall("Prune", p, nil) }
go
{ "resource": "" }
q5107
NewMockSummary
train
func NewMockSummary(ctrl *gomock.Controller) *MockSummary { mock := &MockSummary{ctrl: ctrl} mock.recorder = &MockSummaryMockRecorder{mock} return mock }
go
{ "resource": "" }
q5108
Collect
train
func (mr *MockSummaryMockRecorder) Collect(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Collect", reflect.TypeOf((*MockSummary)(nil).Collect), arg0) }
go
{ "resource": "" }
q5109
Observe
train
func (m *MockSummary) Observe(arg0 float64) { m.ctrl.Call(m, "Observe", arg0) }
go
{ "resource": "" }
q5110
Handle
train
func (l *handler) Handle(c net.Conn, abort <-chan struct{}) error { defer c.Close() // TODO(fwereade): 2016-03-17 lp:1558657 err := c.SetDeadline(time.Now().Add(spool.DefaultTimeout)) if err != nil { return errors.Annotate(err, "failed to set the deadline") } err = l.config.charmdir.Visit(func() error { return l.do(c) }, abort) if err != nil { fmt.Fprintf(c, "error: %v\n", err.Error()) } else { fmt.Fprintf(c, "ok\n") } return errors.Trace(err) }
go
{ "resource": "" }
q5111
NewProvisionerWorker
train
func NewProvisionerWorker(config Config) (worker.Worker, error) { p := &provisioner{ provisionerFacade: config.Facade, broker: config.Broker, modelTag: config.ModelTag, agentConfig: config.AgentConfig, clock: config.Clock, } err := catacomb.Invoke(catacomb.Plan{ Site: &p.catacomb, Work: p.loop, }) return p, err }
go
{ "resource": "" }
q5112
ensureOperators
train
func (p *provisioner) ensureOperators(apps []string) error { var appPasswords []apicaasprovisioner.ApplicationPassword operatorConfig := make([]*caas.OperatorConfig, len(apps)) for i, app := range apps { opState, err := p.broker.OperatorExists(app) if err != nil { return errors.Annotatef(err, "failed to find operator for %q", app) } if opState.Exists && opState.Terminating { // We can't deploy an app while a previous version is terminating. // TODO(caas) - the remove application process should block until app terminated // TODO(caas) - consider making this async, but ok for now as it's a corner case if err := p.waitForOperatorTerminated(app); err != nil { return errors.Annotatef(err, "operator for %q was terminating and there was an error waiting for it to stop", app) } opState.Exists = false } // If the operator does not exist already, we need to create an initial // password for it. var password string if !opState.Exists { if password, err = utils.RandomPassword(); err != nil { return errors.Trace(err) } appPasswords = append(appPasswords, apicaasprovisioner.ApplicationPassword{Name: app, Password: password}) } config, err := p.makeOperatorConfig(app, password) if err != nil { return errors.Annotatef(err, "failed to generate operator config for %q", app) } operatorConfig[i] = config } // If we did create any passwords for new operators, first they need // to be saved so the agent can login when it starts up. if len(appPasswords) > 0 { errorResults, err := p.provisionerFacade.SetPasswords(appPasswords) if err != nil { return errors.Annotate(err, "failed to set application api passwords") } if err := errorResults.Combine(); err != nil { return errors.Annotate(err, "failed to set application api passwords") } } // Now that any new config/passwords are done, create or update // the operators themselves. var errorStrings []string for i, app := range apps { if err := p.ensureOperator(app, operatorConfig[i]); err != nil { errorStrings = append(errorStrings, err.Error()) continue } } if errorStrings != nil { err := errors.New(strings.Join(errorStrings, "\n")) return errors.Annotate(err, "failed to provision all operators") } return nil }
go
{ "resource": "" }
q5113
Manifold
train
func Manifold(config ManifoldConfig) dependency.Manifold { return dependency.Manifold{ Inputs: []string{ config.AgentName, config.ClockName, config.ControllerPortName, config.StateName, }, Start: config.start, } }
go
{ "resource": "" }
q5114
Run
train
func (c *enableHACommand) Run(ctx *cmd.Context) error { controllerName, err := c.ControllerName() if err != nil { return errors.Trace(err) } if err := common.ValidateIaasController(c.CommandBase, c.Info().Name, controllerName, c.ClientStore()); err != nil { return errors.Trace(err) } c.Constraints, err = common.ParseConstraints(ctx, c.ConstraintsStr) if err != nil { return err } haClient, err := c.newHAClientFunc() if err != nil { return err } defer haClient.Close() enableHAResult, err := haClient.EnableHA( c.NumControllers, c.Constraints, c.Placement, ) if err != nil { return block.ProcessBlockedError(err, block.BlockChange) } result := availabilityInfo{ Added: machineTagsToIds(enableHAResult.Added...), Removed: machineTagsToIds(enableHAResult.Removed...), Maintained: machineTagsToIds(enableHAResult.Maintained...), Promoted: machineTagsToIds(enableHAResult.Promoted...), Demoted: machineTagsToIds(enableHAResult.Demoted...), Converted: machineTagsToIds(enableHAResult.Converted...), } return c.out.Write(ctx, result) }
go
{ "resource": "" }
q5115
machineTagsToIds
train
func machineTagsToIds(tags ...string) []string { var result []string for _, rawTag := range tags { tag, err := names.ParseTag(rawTag) if err != nil { continue } result = append(result, tag.Id()) } return result }
go
{ "resource": "" }
q5116
OpenResource
train
func (ro *resourceOpener) OpenResource(name string) (o resource.Opened, err error) { if ro.unit == nil { return resource.Opened{}, errors.Errorf("missing unit") } app, err := ro.unit.Application() if err != nil { return resource.Opened{}, errors.Trace(err) } cURL, _ := ro.unit.CharmURL() id := csclient.CharmID{ URL: cURL, Channel: app.Channel(), } csOpener := newCharmstoreOpener(ro.st) client, err := csOpener.NewClient() if err != nil { return resource.Opened{}, errors.Trace(err) } cache := &charmstoreEntityCache{ st: ro.res, userID: ro.userID, unit: ro.unit, applicationID: ro.unit.ApplicationName(), } res, reader, err := charmstore.GetResource(charmstore.GetResourceArgs{ Client: client, Cache: cache, CharmID: id, Name: name, }) if err != nil { return resource.Opened{}, errors.Trace(err) } opened := resource.Opened{ Resource: res, ReadCloser: reader, } return opened, nil }
go
{ "resource": "" }
q5117
Validate
train
func (config Config) Validate() error { if config.Facade == nil { return errors.NotValidf("nil Facade") } if config.CredentialAPI == nil { return errors.NotValidf("nil CredentialAPI") } if config.Destroyer == nil { return errors.NotValidf("nil Destroyer") } return nil }
go
{ "resource": "" }
q5118
NewUndertaker
train
func NewUndertaker(config Config) (*Undertaker, error) { if err := config.Validate(); err != nil { return nil, errors.Trace(err) } u := &Undertaker{ config: config, } err := catacomb.Invoke(catacomb.Plan{ Site: &u.catacomb, Work: u.run, }) if err != nil { return nil, errors.Trace(err) } u.setCallCtx(common.NewCloudCallContext(config.CredentialAPI, u.catacomb.Dying)) return u, nil }
go
{ "resource": "" }
q5119
ImportModel
train
func ImportModel(importer StateImporter, getClaimer ClaimerFunc, bytes []byte) (*state.Model, *state.State, error) { model, err := description.Deserialize(bytes) if err != nil { return nil, nil, errors.Trace(err) } dbModel, dbState, err := importer.Import(model) if err != nil { return nil, nil, errors.Trace(err) } config, err := dbState.ControllerConfig() if err != nil { return nil, nil, errors.Trace(err) } // If we're using legacy-leases we get the claimer from the new // state - otherwise use the function passed in. // var claimer leadership.Claimer if config.Features().Contains(feature.LegacyLeases) { claimer = dbState.LeadershipClaimer() } else { claimer, err = getClaimer(dbModel.UUID()) if err != nil { return nil, nil, errors.Annotate(err, "getting leadership claimer") } } logger.Debugf("importing leadership") for _, application := range model.Applications() { if application.Leader() == "" { continue } // When we import a new model, we need to give the leaders // some time to settle. We don't want to have leader switches // just because we migrated a model, so this time needs to be // long enough to make sure we cover the time taken to migrate // a reasonable sized model. We don't yet know how long this // is going to be, but we need something. // TODO(babbageclunk): Handle this better - maybe a way to // suppress leadership expiries for a model until it's // finished importing? logger.Debugf("%q is the leader for %q", application.Leader(), application.Name()) err := claimer.ClaimLeadership( application.Name(), application.Leader(), state.InitialLeaderClaimTime, ) if err != nil { return nil, nil, errors.Annotatef( err, "claiming leadership for %q", application.Leader(), ) } } return dbModel, dbState, nil }
go
{ "resource": "" }
q5120
Validate
train
func (c *UploadBinariesConfig) Validate() error { if c.CharmDownloader == nil { return errors.NotValidf("missing CharmDownloader") } if c.CharmUploader == nil { return errors.NotValidf("missing CharmUploader") } if c.ToolsDownloader == nil { return errors.NotValidf("missing ToolsDownloader") } if c.ToolsUploader == nil { return errors.NotValidf("missing ToolsUploader") } if c.ResourceDownloader == nil { return errors.NotValidf("missing ResourceDownloader") } if c.ResourceUploader == nil { return errors.NotValidf("missing ResourceUploader") } return nil }
go
{ "resource": "" }
q5121
UploadBinaries
train
func UploadBinaries(config UploadBinariesConfig) error { if err := config.Validate(); err != nil { return errors.Trace(err) } if err := uploadCharms(config); err != nil { return errors.Trace(err) } if err := uploadTools(config); err != nil { return errors.Trace(err) } if err := uploadResources(config); err != nil { return errors.Trace(err) } return nil }
go
{ "resource": "" }
q5122
Recorder
train
func (f *factory) Recorder(declaredMetrics map[string]corecharm.Metric, charmURL, unitTag string) (MetricRecorder, error) { return NewJSONMetricRecorder(MetricRecorderConfig{ SpoolDir: f.spoolDir, Metrics: declaredMetrics, CharmURL: charmURL, UnitTag: unitTag, }) }
go
{ "resource": "" }
q5123
Manifold
train
func Manifold(config ManifoldConfig) dependency.Manifold { manifold := engine.AgentManifold(engine.AgentManifoldConfig(config), newWorker) manifold.Output = outputFunc return manifold }
go
{ "resource": "" }
q5124
newWorker
train
func newWorker(a agent.Agent) (worker.Worker, error) { metricsSpoolDir := a.CurrentConfig().MetricsSpoolDir() err := checkSpoolDir(metricsSpoolDir) if err != nil { return nil, errors.Annotatef(err, "error checking spool directory %q", metricsSpoolDir) } w := &spoolWorker{factory: newFactory(metricsSpoolDir)} w.tomb.Go(func() error { <-w.tomb.Dying() return nil }) return w, nil }
go
{ "resource": "" }
q5125
FormatPayload
train
func FormatPayload(payload payload.FullPayloadInfo) FormattedPayload { var labels []string if len(payload.Labels) > 0 { labels = make([]string, len(payload.Labels)) copy(labels, payload.Labels) } return FormattedPayload{ Unit: payload.Unit, Machine: payload.Machine, ID: payload.ID, Type: payload.Type, Class: payload.Name, Labels: labels, // TODO(ericsnow) Explicitly convert to a string? Status: payload.Status, } }
go
{ "resource": "" }
q5126
EnsureCachedImage
train
func (mock *MockContainer) EnsureCachedImage(params kvm.StartParams) error { imageCacheCalls++ mock.StartParams = params return nil }
go
{ "resource": "" }
q5127
Stop
train
func (mock *MockContainer) Stop() error { if !mock.started { return fmt.Errorf("container is not running") } mock.started = false mock.factory.notify(Stopped, mock.name) return nil }
go
{ "resource": "" }
q5128
SourcePrecheck
train
func SourcePrecheck( backend PrecheckBackend, modelPresence ModelPresence, controllerPresence ModelPresence, ) error { ctx := precheckContext{backend, modelPresence} if err := ctx.checkModel(); err != nil { return errors.Trace(err) } if err := ctx.checkMachines(); err != nil { return errors.Trace(err) } appUnits, err := ctx.checkApplications() if err != nil { return errors.Trace(err) } if err := ctx.checkRelations(appUnits); err != nil { return errors.Trace(err) } if cleanupNeeded, err := backend.NeedsCleanup(); err != nil { return errors.Annotate(err, "checking cleanups") } else if cleanupNeeded { return errors.New("cleanup needed") } // Check the source controller. controllerBackend, err := backend.ControllerBackend() if err != nil { return errors.Trace(err) } controllerCtx := precheckContext{controllerBackend, controllerPresence} if err := controllerCtx.checkController(); err != nil { return errors.Annotate(err, "controller") } return nil }
go
{ "resource": "" }
q5129
TargetPrecheck
train
func TargetPrecheck(backend PrecheckBackend, pool Pool, modelInfo coremigration.ModelInfo, presence ModelPresence) error { if err := modelInfo.Validate(); err != nil { return errors.Trace(err) } // This check is necessary because there is a window between the // REAP phase and then end of the DONE phase where a model's // documents have been deleted but the migration isn't quite done // yet. Migrating a model back into the controller during this // window can upset the migrationmaster worker. // // See also https://lpad.tv/1611391 if migrating, err := backend.IsMigrationActive(modelInfo.UUID); err != nil { return errors.Annotate(err, "checking for active migration") } else if migrating { return errors.New("model is being migrated out of target controller") } controllerVersion, err := backend.AgentVersion() if err != nil { return errors.Annotate(err, "retrieving model version") } if controllerVersion.Compare(modelInfo.AgentVersion) < 0 { return errors.Errorf("model has higher version than target controller (%s > %s)", modelInfo.AgentVersion, controllerVersion) } if !controllerVersionCompatible(modelInfo.ControllerAgentVersion, controllerVersion) { return errors.Errorf("source controller has higher version than target controller (%s > %s)", modelInfo.ControllerAgentVersion, controllerVersion) } controllerCtx := precheckContext{backend, presence} if err := controllerCtx.checkController(); err != nil { return errors.Trace(err) } // Check for conflicts with existing models modelUUIDs, err := backend.AllModelUUIDs() if err != nil { return errors.Annotate(err, "retrieving models") } for _, modelUUID := range modelUUIDs { model, release, err := pool.GetModel(modelUUID) if err != nil { return errors.Trace(err) } defer release() // If the model is importing then it's probably left behind // from a previous migration attempt. It will be removed // before the next import. if model.UUID() == modelInfo.UUID && model.MigrationMode() != state.MigrationModeImporting { return errors.Errorf("model with same UUID already exists (%s)", modelInfo.UUID) } if model.Name() == modelInfo.Name && model.Owner() == modelInfo.Owner { return errors.Errorf("model named %q already exists", model.Name()) } } return nil }
go
{ "resource": "" }
q5130
IsMetricsDataError
train
func IsMetricsDataError(err error) bool { _, ok := errors.Cause(err).(*errMetricsData) return ok }
go
{ "resource": "" }
q5131
APIMetricBatch
train
func APIMetricBatch(batch MetricBatch) params.MetricBatchParam { metrics := make([]params.Metric, len(batch.Metrics)) for i, metric := range batch.Metrics { metrics[i] = params.Metric{ Key: metric.Key, Value: metric.Value, Time: metric.Time, Labels: metric.Labels, } } return params.MetricBatchParam{ Tag: batch.UnitTag, Batch: params.MetricBatch{ UUID: batch.UUID, CharmURL: batch.CharmURL, Created: batch.Created, Metrics: metrics, }, } }
go
{ "resource": "" }
q5132
NewJSONMetricRecorder
train
func NewJSONMetricRecorder(config MetricRecorderConfig) (rec *JSONMetricRecorder, rErr error) { mbUUID, err := utils.NewUUID() if err != nil { return nil, errors.Trace(err) } recorder := &JSONMetricRecorder{ spoolDir: config.SpoolDir, uuid: mbUUID, charmURL: config.CharmURL, // TODO(fwereade): 2016-03-17 lp:1558657 created: time.Now().UTC(), validMetrics: config.Metrics, unitTag: config.UnitTag, } if err := recorder.open(); err != nil { return nil, errors.Trace(err) } return recorder, nil }
go
{ "resource": "" }
q5133
Close
train
func (m *JSONMetricRecorder) Close() error { m.lock.Lock() defer m.lock.Unlock() err := m.file.Close() if err != nil { return errors.Trace(err) } // We have an exclusive lock on this metric batch here, because // metricsFile.Close was able to rename the final filename atomically. // // Now write the meta file so that JSONMetricReader discovers a finished // pair of files. err = m.recordMetaData() if err != nil { return errors.Trace(err) } return nil }
go
{ "resource": "" }
q5134
AddMetric
train
func (m *JSONMetricRecorder) AddMetric( key, value string, created time.Time, labels map[string]string) (err error) { defer func() { if err != nil { err = &errMetricsData{err} } }() err = m.validateMetric(key, value) if err != nil { return errors.Trace(err) } m.lock.Lock() defer m.lock.Unlock() return errors.Trace(m.enc.Encode(jujuc.Metric{ Key: key, Value: value, Time: created, Labels: labels, })) }
go
{ "resource": "" }
q5135
IsDeclaredMetric
train
func (m *JSONMetricRecorder) IsDeclaredMetric(key string) bool { _, ok := m.validMetrics[key] return ok }
go
{ "resource": "" }
q5136
NewJSONMetricReader
train
func NewJSONMetricReader(spoolDir string) (*JSONMetricReader, error) { if _, err := os.Stat(spoolDir); err != nil { return nil, errors.Annotatef(err, "failed to open spool directory %q", spoolDir) } return &JSONMetricReader{ dir: spoolDir, }, nil }
go
{ "resource": "" }
q5137
Read
train
func (r *JSONMetricReader) Read() (_ []MetricBatch, err error) { defer func() { if err != nil { err = &errMetricsData{err} } }() var batches []MetricBatch walker := func(path string, info os.FileInfo, err error) error { if err != nil { return errors.Trace(err) } if info.IsDir() && path != r.dir { return filepath.SkipDir } else if !strings.HasSuffix(info.Name(), ".meta") { return nil } batch, err := decodeBatch(path) if err != nil { return errors.Trace(err) } batch.Metrics, err = decodeMetrics(filepath.Join(r.dir, batch.UUID)) if err != nil { return errors.Trace(err) } if len(batch.Metrics) > 0 { batches = append(batches, batch) } return nil } if err := filepath.Walk(r.dir, walker); err != nil { return nil, errors.Trace(err) } return batches, nil }
go
{ "resource": "" }
q5138
Remove
train
func (r *JSONMetricReader) Remove(uuid string) error { metaFile := filepath.Join(r.dir, fmt.Sprintf("%s.meta", uuid)) dataFile := filepath.Join(r.dir, uuid) err := os.Remove(metaFile) if err != nil && !os.IsNotExist(err) { return errors.Trace(err) } err = os.Remove(dataFile) if err != nil { return errors.Trace(err) } return nil }
go
{ "resource": "" }
q5139
AddPendingResources
train
func (cl *deployClient) AddPendingResources(applicationID string, chID charmstore.CharmID, csMac *macaroon.Macaroon, resources []charmresource.Resource) ([]string, error) { return cl.Client.AddPendingResources(client.AddPendingResourcesArgs{ ApplicationID: applicationID, CharmID: chID, CharmStoreMacaroon: csMac, Resources: resources, }) }
go
{ "resource": "" }
q5140
checkOperation
train
func (rc *rawConn) checkOperation(projectID string, op *compute.Operation) (*compute.Operation, error) { var call opDoer if op.Zone != "" { zoneName := path.Base(op.Zone) call = rc.ZoneOperations.Get(projectID, zoneName, op.Name) } else if op.Region != "" { region := path.Base(op.Region) call = rc.RegionOperations.Get(projectID, region, op.Name) } else { call = rc.GlobalOperations.Get(projectID, op.Name) } operation, err := doOpCall(call) if err != nil { return nil, errors.Annotatef(err, "request for GCE operation %q failed", op.Name) } return operation, nil }
go
{ "resource": "" }
q5141
ListMachineTypes
train
func (rc *rawConn) ListMachineTypes(projectID, zone string) (*compute.MachineTypeList, error) { op := rc.MachineTypes.List(projectID, zone) machines, err := op.Do() if err != nil { return nil, errors.Annotatef(err, "listing machine types for project %q and zone %q", projectID, zone) } return machines, nil }
go
{ "resource": "" }
q5142
Validate
train
func (c *Config) Validate() error { if c.Logger == nil { return errors.NotValidf("missing logger") } if c.WatcherFactory == nil { return errors.NotValidf("missing watcher factory") } if c.PrometheusRegisterer == nil { return errors.NotValidf("missing prometheus registerer") } if c.Cleanup == nil { return errors.NotValidf("missing cleanup func") } return nil }
go
{ "resource": "" }
q5143
NewWorker
train
func NewWorker(config Config) (worker.Worker, error) { if err := config.Validate(); err != nil { return nil, errors.Trace(err) } w := &cacheWorker{ config: config, changes: make(chan interface{}), } controller, err := cache.NewController( cache.ControllerConfig{ Changes: w.changes, Notify: config.Notify, }) if err != nil { return nil, errors.Trace(err) } w.controller = controller if err := catacomb.Invoke(catacomb.Plan{ Site: &w.catacomb, Work: w.loop, Init: []worker.Worker{w.controller}, }); err != nil { return nil, errors.Trace(err) } return w, nil }
go
{ "resource": "" }
q5144
runForAllModelStates
train
func runForAllModelStates(pool *StatePool, runner func(st *State) error) error { st := pool.SystemState() models, closer := st.db().GetCollection(modelsC) defer closer() var modelDocs []bson.M err := models.Find(nil).Select(bson.M{"_id": 1}).All(&modelDocs) if err != nil { return errors.Annotate(err, "failed to read models") } for _, modelDoc := range modelDocs { modelUUID := modelDoc["_id"].(string) model, err := pool.Get(modelUUID) if err != nil { return errors.Annotatef(err, "failed to open model %q", modelUUID) } defer func() { model.Release() }() if err := runner(model.State); err != nil { return errors.Annotatef(err, "model UUID %q", modelUUID) } } return nil }
go
{ "resource": "" }
q5145
readBsonDField
train
func readBsonDField(d bson.D, name string) (interface{}, bool) { for i := range d { field := &d[i] if field.Name == name { return field.Value, true } } return nil, false }
go
{ "resource": "" }
q5146
replaceBsonDField
train
func replaceBsonDField(d bson.D, name string, value interface{}) error { for i, field := range d { if field.Name == name { newField := field newField.Value = value d[i] = newField return nil } } return errors.NotFoundf("field %q", name) }
go
{ "resource": "" }
q5147
RenameAddModelPermission
train
func RenameAddModelPermission(pool *StatePool) error { st := pool.SystemState() coll, closer := st.db().GetRawCollection(permissionsC) defer closer() upgradesLogger.Infof("migrating addmodel permission") iter := coll.Find(bson.M{"access": "addmodel"}).Iter() defer iter.Close() var ops []txn.Op var doc bson.M for iter.Next(&doc) { id, ok := doc["_id"] if !ok { return errors.New("no id found in permission doc") } ops = append(ops, txn.Op{ C: permissionsC, Id: id, Assert: txn.DocExists, Update: bson.D{{"$set", bson.D{{"access", "add-model"}}}}, }) } if err := iter.Close(); err != nil { return errors.Trace(err) } return st.runRawTransaction(ops) }
go
{ "resource": "" }
q5148
StripLocalUserDomain
train
func StripLocalUserDomain(pool *StatePool) error { st := pool.SystemState() var ops []txn.Op more, err := stripLocalFromFields(st, cloudCredentialsC, "_id", "owner") if err != nil { return err } ops = append(ops, more...) more, err = stripLocalFromFields(st, modelsC, "owner", "cloud-credential") if err != nil { return err } ops = append(ops, more...) more, err = stripLocalFromFields(st, usermodelnameC, "_id") if err != nil { return err } ops = append(ops, more...) more, err = stripLocalFromFields(st, controllerUsersC, "_id", "user", "createdby") if err != nil { return err } ops = append(ops, more...) more, err = stripLocalFromFields(st, modelUsersC, "_id", "user", "createdby") if err != nil { return err } ops = append(ops, more...) more, err = stripLocalFromFields(st, permissionsC, "_id", "subject-global-key") if err != nil { return err } ops = append(ops, more...) more, err = stripLocalFromFields(st, modelUserLastConnectionC, "_id", "user") if err != nil { return err } ops = append(ops, more...) return st.runRawTransaction(ops) }
go
{ "resource": "" }
q5149
AddMigrationAttempt
train
func AddMigrationAttempt(pool *StatePool) error { st := pool.SystemState() coll, closer := st.db().GetRawCollection(migrationsC) defer closer() query := coll.Find(bson.M{"attempt": bson.M{"$exists": false}}) query = query.Select(bson.M{"_id": 1}) iter := query.Iter() defer iter.Close() var ops []txn.Op var doc bson.M for iter.Next(&doc) { id := doc["_id"] attempt, err := extractMigrationAttempt(id) if err != nil { upgradesLogger.Warningf("%s (skipping)", err) continue } ops = append(ops, txn.Op{ C: migrationsC, Id: id, Assert: txn.DocExists, Update: bson.D{{"$set", bson.D{{"attempt", attempt}}}}, }) } if err := iter.Close(); err != nil { return errors.Annotate(err, "iterating migrations") } return errors.Trace(st.runRawTransaction(ops)) }
go
{ "resource": "" }
q5150
AddLocalCharmSequences
train
func AddLocalCharmSequences(pool *StatePool) error { st := pool.SystemState() charmsColl, closer := st.db().GetRawCollection(charmsC) defer closer() query := bson.M{ "url": bson.M{"$regex": "^local:"}, } var docs []bson.M err := charmsColl.Find(query).Select(bson.M{ "_id": 1, "life": 1, }).All(&docs) if err != nil { return errors.Trace(err) } // model UUID -> charm URL base -> max revision maxRevs := make(map[string]map[string]int) var deadIds []string for _, doc := range docs { id, ok := doc["_id"].(string) if !ok { upgradesLogger.Errorf("invalid charm id: %v", doc["_id"]) continue } modelUUID, urlStr, ok := splitDocID(id) if !ok { upgradesLogger.Errorf("unable to split charm _id: %v", id) continue } url, err := charm.ParseURL(urlStr) if err != nil { upgradesLogger.Errorf("unable to parse charm URL: %v", err) continue } if _, exists := maxRevs[modelUUID]; !exists { maxRevs[modelUUID] = make(map[string]int) } baseURL := url.WithRevision(-1).String() curRev := maxRevs[modelUUID][baseURL] if url.Revision > curRev { maxRevs[modelUUID][baseURL] = url.Revision } if life, ok := doc["life"].(int); !ok { upgradesLogger.Errorf("invalid life for charm: %s", id) continue } else if life == int(Dead) { deadIds = append(deadIds, id) } } sequences, closer := st.db().GetRawCollection(sequenceC) defer closer() for modelUUID, modelRevs := range maxRevs { for baseURL, maxRevision := range modelRevs { name := charmRevSeqName(baseURL) updater := newDbSeqUpdater(sequences, modelUUID, name) err := updater.ensure(maxRevision + 1) if err != nil { return errors.Annotatef(err, "setting sequence %s", name) } } } // Remove dead charm documents var ops []txn.Op for _, id := range deadIds { ops = append(ops, txn.Op{ C: charmsC, Id: id, Remove: true, }) } err = st.runRawTransaction(ops) return errors.Annotate(err, "removing dead charms") }
go
{ "resource": "" }
q5151
UpdateLegacyLXDCloudCredentials
train
func UpdateLegacyLXDCloudCredentials( st *State, endpoint string, credential cloud.Credential, ) error { cloudOps, err := updateLegacyLXDCloudsOps(st, endpoint) if err != nil { return errors.Trace(err) } credOps, err := updateLegacyLXDCredentialsOps(st, credential) if err != nil { return errors.Trace(err) } return st.db().RunTransaction(append(cloudOps, credOps...)) }
go
{ "resource": "" }
q5152
UpgradeNoProxyDefaults
train
func UpgradeNoProxyDefaults(pool *StatePool) error { st := pool.SystemState() var ops []txn.Op coll, closer := st.db().GetRawCollection(settingsC) defer closer() iter := coll.Find(bson.D{}).Iter() defer iter.Close() var doc settingsDoc for iter.Next(&doc) { noProxyVal := doc.Settings[config.NoProxyKey] noProxy, ok := noProxyVal.(string) if !ok { continue } noProxy = upgradeNoProxy(noProxy) doc.Settings[config.NoProxyKey] = noProxy ops = append(ops, txn.Op{ C: settingsC, Id: doc.DocID, Assert: txn.DocExists, Update: bson.M{"$set": bson.M{"settings": doc.Settings}}, }) } if err := iter.Close(); err != nil { return errors.Trace(err) } if len(ops) > 0 { return errors.Trace(st.runRawTransaction(ops)) } return nil }
go
{ "resource": "" }
q5153
RemoveNilValueApplicationSettings
train
func RemoveNilValueApplicationSettings(pool *StatePool) error { st := pool.SystemState() coll, closer := st.db().GetRawCollection(settingsC) defer closer() iter := coll.Find(bson.M{"_id": bson.M{"$regex": "^.*:a#.*"}}).Iter() defer iter.Close() var ops []txn.Op var doc settingsDoc for iter.Next(&doc) { settingsChanged := false for key, value := range doc.Settings { if value != nil { continue } settingsChanged = true delete(doc.Settings, key) } if settingsChanged { ops = append(ops, txn.Op{ C: settingsC, Id: doc.DocID, Assert: txn.DocExists, Update: bson.M{"$set": bson.M{"settings": doc.Settings}}, }) } } if err := iter.Close(); err != nil { return errors.Trace(err) } if len(ops) > 0 { return errors.Trace(st.runRawTransaction(ops)) } return nil }
go
{ "resource": "" }
q5154
AddControllerLogCollectionsSizeSettings
train
func AddControllerLogCollectionsSizeSettings(pool *StatePool) error { st := pool.SystemState() coll, closer := st.db().GetRawCollection(controllersC) defer closer() var doc settingsDoc if err := coll.FindId(controllerSettingsGlobalKey).One(&doc); err != nil { if err == mgo.ErrNotFound { return nil } return errors.Trace(err) } var ops []txn.Op settingsChanged := maybeUpdateSettings(doc.Settings, controller.MaxLogsAge, fmt.Sprintf("%vh", controller.DefaultMaxLogsAgeDays*24)) settingsChanged = maybeUpdateSettings(doc.Settings, controller.MaxLogsSize, fmt.Sprintf("%vM", controller.DefaultMaxLogCollectionMB)) || settingsChanged settingsChanged = maybeUpdateSettings(doc.Settings, controller.MaxTxnLogSize, fmt.Sprintf("%vM", controller.DefaultMaxTxnLogCollectionMB)) || settingsChanged if settingsChanged { ops = append(ops, txn.Op{ C: controllersC, Id: doc.DocID, Assert: txn.DocExists, Update: bson.M{"$set": bson.M{"settings": doc.Settings}}, }) } if len(ops) > 0 { return errors.Trace(st.runRawTransaction(ops)) } return nil }
go
{ "resource": "" }
q5155
applyToAllModelSettings
train
func applyToAllModelSettings(st *State, change func(*settingsDoc) (bool, error)) error { uuids, err := st.AllModelUUIDs() if err != nil { return errors.Trace(err) } coll, closer := st.db().GetRawCollection(settingsC) defer closer() var ids []string for _, uuid := range uuids { ids = append(ids, uuid+":e") } iter := coll.Find(bson.M{"_id": bson.M{"$in": ids}}).Iter() defer iter.Close() var ops []txn.Op var doc settingsDoc for iter.Next(&doc) { settingsChanged, err := change(&doc) if err != nil { return errors.Trace(err) } if settingsChanged { ops = append(ops, txn.Op{ C: settingsC, Id: doc.DocID, Assert: txn.DocExists, Update: bson.M{"$set": bson.M{"settings": doc.Settings}}, }) } } if err := iter.Close(); err != nil { return errors.Trace(err) } if len(ops) > 0 { return errors.Trace(st.runRawTransaction(ops)) } return nil }
go
{ "resource": "" }
q5156
AddStatusHistoryPruneSettings
train
func AddStatusHistoryPruneSettings(pool *StatePool) error { st := pool.SystemState() err := applyToAllModelSettings(st, func(doc *settingsDoc) (bool, error) { settingsChanged := maybeUpdateSettings(doc.Settings, config.MaxStatusHistoryAge, config.DefaultStatusHistoryAge) settingsChanged = maybeUpdateSettings(doc.Settings, config.MaxStatusHistorySize, config.DefaultStatusHistorySize) || settingsChanged return settingsChanged, nil }) if err != nil { return errors.Trace(err) } return nil }
go
{ "resource": "" }
q5157
AddActionPruneSettings
train
func AddActionPruneSettings(pool *StatePool) error { st := pool.SystemState() err := applyToAllModelSettings(st, func(doc *settingsDoc) (bool, error) { settingsChanged := maybeUpdateSettings(doc.Settings, config.MaxActionResultsAge, config.DefaultActionResultsAge) settingsChanged = maybeUpdateSettings(doc.Settings, config.MaxActionResultsSize, config.DefaultActionResultsSize) || settingsChanged return settingsChanged, nil }) if err != nil { return errors.Trace(err) } return nil }
go
{ "resource": "" }
q5158
AddUpdateStatusHookSettings
train
func AddUpdateStatusHookSettings(pool *StatePool) error { st := pool.SystemState() err := applyToAllModelSettings(st, func(doc *settingsDoc) (bool, error) { settingsChanged := maybeUpdateSettings(doc.Settings, config.UpdateStatusHookInterval, config.DefaultUpdateStatusHookInterval) return settingsChanged, nil }) if err != nil { return errors.Trace(err) } return nil }
go
{ "resource": "" }
q5159
SplitLogCollections
train
func SplitLogCollections(pool *StatePool) error { st := pool.SystemState() session := st.MongoSession() db := session.DB(logsDB) oldLogs := db.C("logs") // If we haven't seen any particular model, we need to initialise // the logs collection with the right indices. seen := set.NewStrings() iter := oldLogs.Find(nil).Iter() defer iter.Close() var doc bson.M for iter.Next(&doc) { modelUUID := doc["e"].(string) newCollName := logCollectionName(modelUUID) newLogs := db.C(newCollName) if !seen.Contains(newCollName) { if err := InitDbLogs(session, modelUUID); err != nil { return errors.Annotatef(err, "failed to init new logs collection %q", newCollName) } seen.Add(newCollName) } delete(doc, "e") // old model uuid if err := newLogs.Insert(doc); err != nil { // In the case of a restart, we may have already moved // some of these rows, in which case we'd get a duplicate // id error (this is OK). if !mgo.IsDup(err) { return errors.Annotate(err, "failed to insert log record") } } doc = nil } if err := iter.Close(); err != nil { return errors.Trace(err) } // drop the old collection if err := oldLogs.DropCollection(); err != nil { // If the namespace is already missing, that's fine. if isMgoNamespaceNotFound(err) { return nil } return errors.Annotate(err, "failed to drop old logs collection") } return nil }
go
{ "resource": "" }
q5160
MoveOldAuditLog
train
func MoveOldAuditLog(pool *StatePool) error { st := pool.SystemState() names, err := st.MongoSession().DB("juju").CollectionNames() if err != nil { return errors.Trace(err) } if !set.NewStrings(names...).Contains("audit.log") { // No audit log collection to move. return nil } coll, closer := st.db().GetRawCollection("audit.log") defer closer() rows, err := coll.Count() if err != nil { return errors.Trace(err) } if rows == 0 { return errors.Trace(coll.DropCollection()) } session := st.MongoSession() renameCommand := bson.D{ {"renameCollection", "juju.audit.log"}, {"to", "juju.old-audit.log"}, } return errors.Trace(session.Run(renameCommand, nil)) }
go
{ "resource": "" }
q5161
DeleteCloudImageMetadata
train
func DeleteCloudImageMetadata(pool *StatePool) error { st := pool.SystemState() coll, closer := st.db().GetRawCollection(cloudimagemetadataC) defer closer() bulk := coll.Bulk() bulk.Unordered() bulk.RemoveAll(bson.D{{"source", bson.D{{"$ne", "custom"}}}}) _, err := bulk.Run() return errors.Annotate(err, "deleting cloud image metadata records") }
go
{ "resource": "" }
q5162
MoveMongoSpaceToHASpaceConfig
train
func MoveMongoSpaceToHASpaceConfig(pool *StatePool) error { st := pool.SystemState() // Holds Mongo space fields removed from controllersDoc. type controllersUpgradeDoc struct { MongoSpaceName string `bson:"mongo-space-name"` MongoSpaceState string `bson:"mongo-space-state"` } var doc controllersUpgradeDoc controllerColl, controllerCloser := st.db().GetRawCollection(controllersC) defer controllerCloser() err := controllerColl.Find(bson.D{{"_id", modelGlobalKey}}).One(&doc) if err != nil { return errors.Annotate(err, "retrieving controller info doc") } mongoSpace := doc.MongoSpaceName if doc.MongoSpaceState == "valid" && mongoSpace != "" { settings, err := readSettings(st.db(), controllersC, controllerSettingsGlobalKey) if err != nil { return errors.Annotate(err, "cannot get controller config") } // In the unlikely event that there is already a juju-ha-space // configuration setting, we do not copy over it with the old Mongo // space name. if haSpace, ok := settings.Get(controller.JujuHASpace); ok { upgradesLogger.Debugf("not copying mongo-space-name %q to juju-ha-space - already set to %q", mongoSpace, haSpace) } else { settings.Set(controller.JujuHASpace, mongoSpace) if _, err = settings.Write(); err != nil { return errors.Annotate(err, "writing controller info") } } } err = controllerColl.UpdateId(modelGlobalKey, bson.M{"$unset": bson.M{ "mongo-space-name": 1, "mongo-space-state": 1, }}) return errors.Annotate(err, "removing mongo-space-state and mongo-space-name") }
go
{ "resource": "" }
q5163
CreateMissingApplicationConfig
train
func CreateMissingApplicationConfig(pool *StatePool) error { st := pool.SystemState() settingsColl, settingsCloser := st.db().GetRawCollection(settingsC) defer settingsCloser() var applicationConfigIDs []struct { ID string `bson:"_id"` } settingsColl.Find(bson.M{ "_id": bson.M{"$regex": bson.RegEx{"#application$", ""}}}).All(&applicationConfigIDs) allIDs := set.NewStrings() for _, id := range applicationConfigIDs { allIDs.Add(id.ID) } appsColl, appsCloser := st.db().GetRawCollection(applicationsC) defer appsCloser() var applicationNames []struct { Name string `bson:"name"` ModelUUID string `bson:"model-uuid"` } appsColl.Find(nil).All(&applicationNames) var newAppConfigOps []txn.Op emptySettings := make(map[string]interface{}) for _, app := range applicationNames { appConfID := fmt.Sprintf("%s:%s", app.ModelUUID, applicationConfigKey(app.Name)) if !allIDs.Contains(appConfID) { newOp := createSettingsOp(settingsC, appConfID, emptySettings) // createSettingsOp assumes you're using a model-specific state, which will auto-inject the ModelUUID // since we're doing this globally, cast it to the underlying type and add it. newOp.Insert.(*settingsDoc).ModelUUID = app.ModelUUID newAppConfigOps = append(newAppConfigOps, newOp) } } err := st.db().RunRawTransaction(newAppConfigOps) if err != nil { return errors.Annotate(err, "writing application configs") } return nil }
go
{ "resource": "" }
q5164
RemoveVotingMachineIds
train
func RemoveVotingMachineIds(pool *StatePool) error { st := pool.SystemState() controllerColl, controllerCloser := st.db().GetRawCollection(controllersC) defer controllerCloser() // The votingmachineids field is just a denormalization of Machine.WantsVote() so we can just // remove it as being redundant err := controllerColl.UpdateId(modelGlobalKey, bson.M{"$unset": bson.M{"votingmachineids": 1}}) if err != nil { return errors.Annotate(err, "removing votingmachineids") } return nil }
go
{ "resource": "" }
q5165
AddCloudModelCounts
train
func AddCloudModelCounts(pool *StatePool) error { st := pool.SystemState() cloudsColl, closer := st.db().GetCollection(cloudsC) defer closer() var clouds []cloudDoc err := cloudsColl.Find(nil).All(&clouds) if err != nil { return errors.Trace(err) } modelsColl, closer := st.db().GetCollection(modelsC) defer closer() refCountColl, closer := st.db().GetCollection(globalRefcountsC) defer closer() var updateOps []txn.Op for _, c := range clouds { n, err := modelsColl.Find(bson.D{{"cloud", c.Name}}).Count() if err != nil { return errors.Trace(err) } _, currentCount, err := countCloudModelRefOp(st, c.Name) if err != nil { return errors.Trace(err) } if n != currentCount { op, err := nsRefcounts.CreateOrIncRefOp(refCountColl, cloudModelRefCountKey(c.Name), n-currentCount) if err != nil { return errors.Trace(err) } updateOps = append(updateOps, op) } } return st.db().RunTransaction(updateOps) }
go
{ "resource": "" }
q5166
UpgradeContainerImageStreamDefault
train
func UpgradeContainerImageStreamDefault(pool *StatePool) error { st := pool.SystemState() err := applyToAllModelSettings(st, func(doc *settingsDoc) (bool, error) { ciStreamVal, keySet := doc.Settings[config.ContainerImageStreamKey] if keySet { if ciStream, _ := ciStreamVal.(string); ciStream != "" { return false, nil } } doc.Settings[config.ContainerImageStreamKey] = "released" return true, nil }) if err != nil { return errors.Trace(err) } return nil }
go
{ "resource": "" }
q5167
ReplicaSetMembers
train
func ReplicaSetMembers(pool *StatePool) ([]replicaset.Member, error) { return replicaset.CurrentMembers(pool.SystemState().MongoSession()) }
go
{ "resource": "" }
q5168
LegacyLeases
train
func LegacyLeases(pool *StatePool, localTime time.Time) (map[corelease.Key]corelease.Info, error) { st := pool.SystemState() reader, err := globalclock.NewReader(globalclock.ReaderConfig{ Config: globalclock.Config{ Collection: globalClockC, Mongo: &environMongo{state: st}, }, }) if err != nil { return nil, errors.Trace(err) } globalTime, err := reader.Now() // This needs to be the raw collection so we see all leases across // models. leaseCollection, closer := st.db().GetRawCollection(leasesC) defer closer() iter := leaseCollection.Find(nil).Iter() results := make(map[corelease.Key]corelease.Info) var doc struct { Namespace string `bson:"namespace"` ModelUUID string `bson:"model-uuid"` Name string `bson:"name"` Holder string `bson:"holder"` Start int64 `bson:"start"` Duration time.Duration `bson:"duration"` } for iter.Next(&doc) { startTime := time.Unix(0, doc.Start) globalExpiry := startTime.Add(doc.Duration) remaining := globalExpiry.Sub(globalTime) localExpiry := localTime.Add(remaining) key := corelease.Key{ Namespace: doc.Namespace, ModelUUID: doc.ModelUUID, Lease: doc.Name, } results[key] = corelease.Info{ Holder: doc.Holder, Expiry: localExpiry, Trapdoor: nil, } } if err := iter.Close(); err != nil { return nil, errors.Trace(err) } return results, nil }
go
{ "resource": "" }
q5169
MigrateAddModelPermissions
train
func MigrateAddModelPermissions(pool *StatePool) error { st := pool.SystemState() controllerInfo, err := st.ControllerInfo() if err != nil { return errors.Trace(err) } coll, closer := st.db().GetRawCollection(permissionsC) defer closer() query := bson.M{ "_id": bson.M{"$regex": "^" + controllerKey(st.ControllerUUID()) + "#us#.*"}, "access": "add-model", } iter := coll.Find(query).Iter() var doc struct { DocId string `bson:"_id"` ObjectGlobalKey string `bson:"object-global-key"` SubjectGlobalKey string `bson:"subject-global-key"` Access string `bson:"access"` } var ops []txn.Op // Set all the existng controller add-model permissions back to login. // Create a new cloud permission for add-model. for iter.Next(&doc) { ops = append(ops, txn.Op{ C: permissionsC, Id: doc.DocId, Assert: txn.DocExists, Update: bson.M{"$set": bson.M{"access": "login"}}, }) ops = append(ops, createPermissionOp(cloudGlobalKey(controllerInfo.CloudName), doc.SubjectGlobalKey, permission.AddModelAccess)) } if err := iter.Close(); err != nil { return errors.Trace(err) } if len(ops) > 0 { return errors.Trace(st.runRawTransaction(ops)) } return nil }
go
{ "resource": "" }
q5170
SetEnableDiskUUIDOnVsphere
train
func SetEnableDiskUUIDOnVsphere(pool *StatePool) error { return errors.Trace(applyToAllModelSettings(pool.SystemState(), func(doc *settingsDoc) (bool, error) { typeVal, found := doc.Settings["type"] if !found { return false, nil } typeStr, ok := typeVal.(string) if !ok || typeStr != "vsphere" { return false, nil } _, found = doc.Settings["enable-disk-uuid"] if found { // If the config option's already been set don't change // it. return false, nil } doc.Settings["enable-disk-uuid"] = false return true, nil })) }
go
{ "resource": "" }
q5171
UpdateInheritedControllerConfig
train
func UpdateInheritedControllerConfig(pool *StatePool) error { st := pool.SystemState() model, err := st.Model() if err != nil { return errors.Trace(err) } key := cloudGlobalKey(model.Cloud()) var ops []txn.Op coll, closer := st.db().GetRawCollection(globalSettingsC) defer closer() iter := coll.FindId("controller").Iter() defer iter.Close() var doc settingsDoc for iter.Next(&doc) { ops = append(ops, txn.Op{ C: globalSettingsC, Id: doc.DocID, Remove: true, Assert: txn.DocExists, }) doc.DocID = key ops = append(ops, txn.Op{ C: globalSettingsC, Id: key, Insert: doc, Assert: txn.DocMissing, }) } if err := iter.Close(); err != nil { return errors.Trace(err) } if len(ops) > 0 { err = errors.Trace(st.runRawTransaction(ops)) return err } return nil }
go
{ "resource": "" }
q5172
EnsureDefaultModificationStatus
train
func EnsureDefaultModificationStatus(pool *StatePool) error { st := pool.SystemState() db := st.db() machineCol, machineCloser := db.GetRawCollection(machinesC) defer machineCloser() machineIter := machineCol.Find(nil).Iter() defer machineIter.Close() statusCol, statusCloser := db.GetRawCollection(statusesC) defer statusCloser() var ops []txn.Op var machine machineDoc updatedTime := st.clock().Now().UnixNano() for machineIter.Next(&machine) { // Since we are using a raw collection, we need to manually // ensure that we prefix the IDs with the model-uuid. localID := machineGlobalModificationKey(machine.Id) key := ensureModelUUID(machine.ModelUUID, localID) // We only need to migrate machines that don't have a modification // status document. So we need to first check if there is one, before // creating a txn.Op for the missing document. var doc statusDoc err := statusCol.Find(bson.D{{"_id", key}}).Select(bson.D{{"_id", 1}}).One(&doc) if err == nil { continue } else if err != mgo.ErrNotFound { return errors.Trace(err) } rawDoc := statusDoc{ ModelUUID: machine.ModelUUID, Status: status.Idle, Updated: updatedTime, } ops = append(ops, txn.Op{ C: statusesC, Id: key, Assert: txn.DocMissing, Insert: rawDoc, }) } if err := machineIter.Close(); err != nil { return errors.Trace(err) } if len(ops) > 0 { return errors.Trace(st.runRawTransaction(ops)) } return nil }
go
{ "resource": "" }
q5173
EnsureApplicationDeviceConstraints
train
func EnsureApplicationDeviceConstraints(pool *StatePool) error { st := pool.SystemState() db := st.db() applicationCol, applicationCloser := db.GetRawCollection(applicationsC) defer applicationCloser() applicationIter := applicationCol.Find(nil).Iter() defer applicationIter.Close() constraintsCol, constraintsCloser := db.GetRawCollection(deviceConstraintsC) defer constraintsCloser() var ops []txn.Op var application applicationDoc for applicationIter.Next(&application) { // Since we are using a raw collection, we need to manually // ensure that we prefix the IDs with the model-uuid. localID := applicationDeviceConstraintsKey(application.Name, application.CharmURL) key := ensureModelUUID(application.ModelUUID, localID) // We only need to migrate applications that don't have a device // constraints document. So we need to first check if there is one, before // creating a txn.Op for the missing document. var doc statusDoc err := constraintsCol.Find(bson.D{{"_id", key}}).Select(bson.D{{"_id", 1}}).One(&doc) if err == nil { continue } else if err != mgo.ErrNotFound { return errors.Trace(err) } ops = append(ops, txn.Op{ C: deviceConstraintsC, Id: key, Assert: txn.DocMissing, Insert: deviceConstraintsDoc{}, }) } if err := applicationIter.Close(); err != nil { return errors.Trace(err) } if len(ops) > 0 { return errors.Trace(st.runRawTransaction(ops)) } return nil }
go
{ "resource": "" }
q5174
RemoveInstanceCharmProfileDataCollection
train
func RemoveInstanceCharmProfileDataCollection(pool *StatePool) error { db := pool.SystemState().MongoSession().DB(jujuDB) instanceCharmProfileData := db.C("instanceCharmProfileData") if err := instanceCharmProfileData.DropCollection(); err != nil { // If the namespace is already missing, that's fine. if isMgoNamespaceNotFound(err) { return nil } return errors.Annotate(err, "failed to drop instanceCharmProfileData collection") } return nil }
go
{ "resource": "" }
q5175
CreateVolumes
train
func (s *VolumeSource) CreateVolumes(ctx context.ProviderCallContext, params []storage.VolumeParams) ([]storage.CreateVolumesResult, error) { s.MethodCall(s, "CreateVolumes", ctx, params) if s.CreateVolumesFunc != nil { return s.CreateVolumesFunc(ctx, params) } return nil, errors.NotImplementedf("CreateVolumes") }
go
{ "resource": "" }
q5176
ListVolumes
train
func (s *VolumeSource) ListVolumes(ctx context.ProviderCallContext) ([]string, error) { s.MethodCall(s, "ListVolumes", ctx) if s.ListVolumesFunc != nil { return s.ListVolumesFunc(ctx) } return nil, nil }
go
{ "resource": "" }
q5177
DescribeVolumes
train
func (s *VolumeSource) DescribeVolumes(ctx context.ProviderCallContext, volIds []string) ([]storage.DescribeVolumesResult, error) { s.MethodCall(s, "DescribeVolumes", ctx, volIds) if s.DescribeVolumesFunc != nil { return s.DescribeVolumesFunc(ctx, volIds) } return nil, errors.NotImplementedf("DescribeVolumes") }
go
{ "resource": "" }
q5178
DestroyVolumes
train
func (s *VolumeSource) DestroyVolumes(ctx context.ProviderCallContext, volIds []string) ([]error, error) { s.MethodCall(s, "DestroyVolumes", ctx, volIds) if s.DestroyVolumesFunc != nil { return s.DestroyVolumesFunc(ctx, volIds) } return nil, errors.NotImplementedf("DestroyVolumes") }
go
{ "resource": "" }
q5179
ReleaseVolumes
train
func (s *VolumeSource) ReleaseVolumes(ctx context.ProviderCallContext, volIds []string) ([]error, error) { s.MethodCall(s, "ReleaseVolumes", ctx, volIds) if s.ReleaseVolumesFunc != nil { return s.ReleaseVolumesFunc(ctx, volIds) } return nil, errors.NotImplementedf("ReleaseVolumes") }
go
{ "resource": "" }
q5180
ValidateVolumeParams
train
func (s *VolumeSource) ValidateVolumeParams(params storage.VolumeParams) error { s.MethodCall(s, "ValidateVolumeParams", params) if s.ValidateVolumeParamsFunc != nil { return s.ValidateVolumeParamsFunc(params) } return nil }
go
{ "resource": "" }
q5181
AttachVolumes
train
func (s *VolumeSource) AttachVolumes(ctx context.ProviderCallContext, params []storage.VolumeAttachmentParams) ([]storage.AttachVolumesResult, error) { s.MethodCall(s, "AttachVolumes", ctx, params) if s.AttachVolumesFunc != nil { return s.AttachVolumesFunc(ctx, params) } return nil, errors.NotImplementedf("AttachVolumes") }
go
{ "resource": "" }
q5182
DetachVolumes
train
func (s *VolumeSource) DetachVolumes(ctx context.ProviderCallContext, params []storage.VolumeAttachmentParams) ([]error, error) { s.MethodCall(s, "DetachVolumes", ctx, params) if s.DetachVolumesFunc != nil { return s.DetachVolumesFunc(ctx, params) } return nil, errors.NotImplementedf("DetachVolumes") }
go
{ "resource": "" }
q5183
NewAgentEntityWatcher
train
func NewAgentEntityWatcher(st state.EntityFinder, resources facade.Resources, getCanWatch GetAuthFunc) *AgentEntityWatcher { return &AgentEntityWatcher{ st: st, resources: resources, getCanWatch: getCanWatch, } }
go
{ "resource": "" }
q5184
Watch
train
func (a *AgentEntityWatcher) Watch(args params.Entities) (params.NotifyWatchResults, error) { result := params.NotifyWatchResults{ Results: make([]params.NotifyWatchResult, len(args.Entities)), } if len(args.Entities) == 0 { return result, nil } canWatch, err := a.getCanWatch() if err != nil { return params.NotifyWatchResults{}, errors.Trace(err) } for i, entity := range args.Entities { tag, err := names.ParseTag(entity.Tag) if err != nil { result.Results[i].Error = ServerError(ErrPerm) continue } err = ErrPerm watcherId := "" if canWatch(tag) { watcherId, err = a.watchEntity(tag) } result.Results[i].NotifyWatcherId = watcherId result.Results[i].Error = ServerError(err) } return result, nil }
go
{ "resource": "" }
q5185
dumpMetadata
train
func (c *CommandBase) dumpMetadata(ctx *cmd.Context, result *params.BackupsMetadataResult) { // TODO: (hml) 2018-04-26 // fix how --quiet and --verbose are handled with backup/restore commands // should be ctx.Verbosef() here fmt.Fprintf(ctx.Stdout, "backup ID: %q\n", result.ID) fmt.Fprintf(ctx.Stdout, "checksum: %q\n", result.Checksum) fmt.Fprintf(ctx.Stdout, "checksum format: %q\n", result.ChecksumFormat) fmt.Fprintf(ctx.Stdout, "size (B): %d\n", result.Size) fmt.Fprintf(ctx.Stdout, "stored: %v\n", result.Stored) fmt.Fprintf(ctx.Stdout, "started: %v\n", result.Started) fmt.Fprintf(ctx.Stdout, "finished: %v\n", result.Finished) fmt.Fprintf(ctx.Stdout, "notes: %q\n", result.Notes) fmt.Fprintf(ctx.Stdout, "model ID: %q\n", result.Model) fmt.Fprintf(ctx.Stdout, "machine ID: %q\n", result.Machine) fmt.Fprintf(ctx.Stdout, "created on host: %q\n", result.Hostname) fmt.Fprintf(ctx.Stdout, "juju version: %v\n", result.Version) }
go
{ "resource": "" }
q5186
Validate
train
func (cfg ProviderConfig) Validate() error { if cfg.NewStorageClient == nil { return errors.NotValidf("nil NewStorageClient") } if cfg.RetryClock == nil { return errors.NotValidf("nil RetryClock") } if cfg.RandomWindowsAdminPassword == nil { return errors.NotValidf("nil RandomWindowsAdminPassword") } if cfg.GenerateSSHKey == nil { return errors.NotValidf("nil GenerateSSHKey") } if cfg.ServicePrincipalCreator == nil { return errors.NotValidf("nil ServicePrincipalCreator") } if cfg.AzureCLI == nil { return errors.NotValidf("nil AzureCLI") } return nil }
go
{ "resource": "" }
q5187
NewEnvironProvider
train
func NewEnvironProvider(config ProviderConfig) (*azureEnvironProvider, error) { if err := config.Validate(); err != nil { return nil, errors.Annotate(err, "validating environ provider configuration") } return &azureEnvironProvider{ environProviderCredentials: environProviderCredentials{ servicePrincipalCreator: config.ServicePrincipalCreator, azureCLI: config.AzureCLI, }, config: config, }, nil }
go
{ "resource": "" }
q5188
NewLeadershipPinningAPI
train
func NewLeadershipPinningAPI(caller base.APICaller) *LeadershipPinningAPI { facadeCaller := base.NewFacadeCaller( caller, leadershipFacade, ) return NewLeadershipPinningAPIFromFacade(facadeCaller) }
go
{ "resource": "" }
q5189
PinnedLeadership
train
func (a *LeadershipPinningAPI) PinnedLeadership() (map[string][]names.Tag, error) { var callResult params.PinnedLeadershipResult err := a.facade.FacadeCall("PinnedLeadership", nil, &callResult) if err != nil { return nil, errors.Trace(err) } pinned := make(map[string][]names.Tag, len(callResult.Result)) for app, entities := range callResult.Result { entityTags := make([]names.Tag, len(entities)) for i, e := range entities { tag, err := names.ParseTag(e) if err != nil { return nil, errors.Trace(err) } entityTags[i] = tag } pinned[app] = entityTags } return pinned, nil }
go
{ "resource": "" }
q5190
PinMachineApplications
train
func (a *LeadershipPinningAPI) PinMachineApplications() (map[string]error, error) { res, err := a.pinMachineAppsOps("PinMachineApplications") return res, errors.Trace(err) }
go
{ "resource": "" }
q5191
pinMachineAppsOps
train
func (a *LeadershipPinningAPI) pinMachineAppsOps(callName string) (map[string]error, error) { var callResult params.PinApplicationsResults err := a.facade.FacadeCall(callName, nil, &callResult) if err != nil { return nil, errors.Trace(err) } callResults := callResult.Results result := make(map[string]error, len(callResults)) for _, res := range callResults { var appErr error if res.Error != nil { appErr = res.Error } result[res.ApplicationName] = appErr } return result, nil }
go
{ "resource": "" }
q5192
NewUniterResolver
train
func NewUniterResolver(cfg ResolverConfig) resolver.Resolver { return &uniterResolver{ config: cfg, retryHookTimerStarted: false, } }
go
{ "resource": "" }
q5193
nextOpConflicted
train
func (s *uniterResolver) nextOpConflicted( localState resolver.LocalState, remoteState remotestate.Snapshot, opFactory operation.Factory, ) (operation.Operation, error) { if remoteState.ResolvedMode != params.ResolvedNone { if err := s.config.ClearResolved(); err != nil { return nil, errors.Trace(err) } return opFactory.NewResolvedUpgrade(localState.CharmURL) } if remoteState.ForceCharmUpgrade && charmModified(localState, remoteState) { return opFactory.NewRevertUpgrade(remoteState.CharmURL) } return nil, resolver.ErrWaiting }
go
{ "resource": "" }
q5194
NextOp
train
func (NopResolver) NextOp(resolver.LocalState, remotestate.Snapshot, operation.Factory) (operation.Operation, error) { return nil, resolver.ErrNoOperation }
go
{ "resource": "" }
q5195
Lockdown
train
func (f *fortress) Lockdown(abort Abort) error { return f.allowGuests(false, abort) }
go
{ "resource": "" }
q5196
Visit
train
func (f *fortress) Visit(visit Visit, abort Abort) error { result := make(chan error) select { case <-f.tomb.Dying(): return ErrShutdown case <-abort: return ErrAborted case f.guestTickets <- guestTicket{visit, result}: return <-result } }
go
{ "resource": "" }
q5197
allowGuests
train
func (f *fortress) allowGuests(allowGuests bool, abort Abort) error { result := make(chan error) select { case <-f.tomb.Dying(): return ErrShutdown case f.guardTickets <- guardTicket{allowGuests, abort, result}: return <-result } }
go
{ "resource": "" }
q5198
loop
train
func (f *fortress) loop() error { var active sync.WaitGroup defer active.Wait() // guestTickets will be set on Unlock and cleared at the start of Lockdown. var guestTickets <-chan guestTicket for { select { case <-f.tomb.Dying(): return tomb.ErrDying case ticket := <-guestTickets: active.Add(1) go ticket.complete(active.Done) case ticket := <-f.guardTickets: // guard ticket requests are idempotent; it's not worth building // the extra mechanism needed to (1) complain about abuse but // (2) remain comprehensible and functional in the face of aborted // Lockdowns. if ticket.allowGuests { guestTickets = f.guestTickets } else { guestTickets = nil } go ticket.complete(active.Wait) } } }
go
{ "resource": "" }
q5199
GenerateClientCertificate
train
func GenerateClientCertificate() (*Certificate, error) { cert, key, err := shared.GenerateMemCert(true) if err != nil { return nil, errors.Trace(err) } return NewCertificate(cert, key), nil }
go
{ "resource": "" }