_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q4000
setControllerAccess
train
func (st *State) setControllerAccess(access permission.Access, userGlobalKey string) error { if err := permission.ValidateControllerAccess(access); err != nil { return errors.Trace(err) } op := updatePermissionOp(controllerKey(st.ControllerUUID()), userGlobalKey, access) err := st.db().RunTransaction([]txn.Op{op}) if err == txn.ErrAborted { return errors.NotFoundf("existing permissions") } return errors.Trace(err) }
go
{ "resource": "" }
q4001
controllerUser
train
func (st *State) controllerUser(user names.UserTag) (userAccessDoc, error) { controllerUser := userAccessDoc{} controllerUsers, closer := st.db().GetCollection(controllerUsersC) defer closer() username := strings.ToLower(user.Id()) err := controllerUsers.FindId(username).One(&controllerUser) if err == mgo.ErrNotFound { return userAccessDoc{}, errors.NotFoundf("controller user %q", user.Id()) } // DateCreated is inserted as UTC, but read out as local time. So we // convert it back to UTC here. controllerUser.DateCreated = controllerUser.DateCreated.UTC() return controllerUser, nil }
go
{ "resource": "" }
q4002
removeControllerUser
train
func (st *State) removeControllerUser(user names.UserTag) error { ops := removeControllerUserOps(st.ControllerUUID(), user) err := st.db().RunTransaction(ops) if err == txn.ErrAborted { err = errors.NewNotFound(nil, fmt.Sprintf("controller user %q does not exist", user.Id())) } if err != nil { return errors.Trace(err) } return nil }
go
{ "resource": "" }
q4003
Info
train
func (c *Client) Info(id string) (*params.BackupsMetadataResult, error) { var result params.BackupsMetadataResult args := params.BackupsInfoArgs{ID: id} if err := c.facade.FacadeCall("Info", args, &result); err != nil { return nil, errors.Trace(err) } return &result, nil }
go
{ "resource": "" }
q4004
Get
train
func (api *APIv5) Get(args params.ApplicationGet) (params.ApplicationGetResults, error) { results, err := api.getConfig(args, describe) if err != nil { return params.ApplicationGetResults{}, err } results.ApplicationConfig = nil return results, nil }
go
{ "resource": "" }
q4005
Get
train
func (api *APIv4) Get(args params.ApplicationGet) (params.ApplicationGetResults, error) { results, err := api.getConfig(args, describeV4) if err != nil { return params.ApplicationGetResults{}, err } results.ApplicationConfig = nil return results, nil }
go
{ "resource": "" }
q4006
AddUser
train
func (m *Model) AddUser(spec UserAccessSpec) (permission.UserAccess, error) { if err := permission.ValidateModelAccess(spec.Access); err != nil { return permission.UserAccess{}, errors.Annotate(err, "adding model user") } target := userAccessTarget{ uuid: m.UUID(), globalKey: modelGlobalKey, } return m.st.addUserAccess(spec, target) }
go
{ "resource": "" }
q4007
AddControllerUser
train
func (st *State) AddControllerUser(spec UserAccessSpec) (permission.UserAccess, error) { if err := permission.ValidateControllerAccess(spec.Access); err != nil { return permission.UserAccess{}, errors.Annotate(err, "adding controller user") } return st.addUserAccess(spec, userAccessTarget{globalKey: controllerGlobalKey}) }
go
{ "resource": "" }
q4008
userAccessID
train
func userAccessID(user names.UserTag) string { username := user.Id() return strings.ToLower(username) }
go
{ "resource": "" }
q4009
NewModelUserAccess
train
func NewModelUserAccess(st *State, userDoc userAccessDoc) (permission.UserAccess, error) { perm, err := st.userPermission(modelKey(userDoc.ObjectUUID), userGlobalKey(strings.ToLower(userDoc.UserName))) if err != nil { return permission.UserAccess{}, errors.Annotate(err, "obtaining model permission") } return newUserAccess(perm, userDoc, names.NewModelTag(userDoc.ObjectUUID)), nil }
go
{ "resource": "" }
q4010
NewControllerUserAccess
train
func NewControllerUserAccess(st *State, userDoc userAccessDoc) (permission.UserAccess, error) { perm, err := st.userPermission(controllerKey(st.ControllerUUID()), userGlobalKey(strings.ToLower(userDoc.UserName))) if err != nil { return permission.UserAccess{}, errors.Annotate(err, "obtaining controller permission") } return newUserAccess(perm, userDoc, names.NewControllerTag(userDoc.ObjectUUID)), nil }
go
{ "resource": "" }
q4011
UserPermission
train
func (st *State) UserPermission(subject names.UserTag, target names.Tag) (permission.Access, error) { if err := st.userMayHaveAccess(subject); err != nil { return "", errors.Trace(err) } switch target.Kind() { case names.ModelTagKind, names.ControllerTagKind: access, err := st.UserAccess(subject, target) if err != nil { return "", errors.Trace(err) } return access.Access, nil case names.ApplicationOfferTagKind: offerUUID, err := applicationOfferUUID(st, target.Id()) if err != nil { return "", errors.Trace(err) } return st.GetOfferAccess(offerUUID, subject) case names.CloudTagKind: return st.GetCloudAccess(target.Id(), subject) default: return "", errors.NotValidf("%q as a target", target.Kind()) } }
go
{ "resource": "" }
q4012
UserAccess
train
func (st *State) UserAccess(subject names.UserTag, target names.Tag) (permission.UserAccess, error) { if err := st.userMayHaveAccess(subject); err != nil { return permission.UserAccess{}, errors.Trace(err) } var ( userDoc userAccessDoc err error ) switch target.Kind() { case names.ModelTagKind: userDoc, err = st.modelUser(target.Id(), subject) if err == nil { return NewModelUserAccess(st, userDoc) } case names.ControllerTagKind: userDoc, err = st.controllerUser(subject) if err == nil { return NewControllerUserAccess(st, userDoc) } default: return permission.UserAccess{}, errors.NotValidf("%q as a target", target.Kind()) } return permission.UserAccess{}, errors.Trace(err) }
go
{ "resource": "" }
q4013
RemoveUserAccess
train
func (st *State) RemoveUserAccess(subject names.UserTag, target names.Tag) error { switch target.Kind() { case names.ModelTagKind: return errors.Trace(st.removeModelUser(subject)) case names.ControllerTagKind: return errors.Trace(st.removeControllerUser(subject)) } return errors.NotValidf("%q as a target", target.Kind()) }
go
{ "resource": "" }
q4014
populateMachineMaps
train
func (task *provisionerTask) populateMachineMaps(ids []string) error { task.instances = make(map[instance.Id]instances.Instance) instances, err := task.broker.AllInstances(task.cloudCallCtx) if err != nil { return errors.Annotate(err, "failed to get all instances from broker") } for _, i := range instances { task.instances[i.Id()] = i } // Update the machines map with new data for each of the machines in the // change list. machineTags := make([]names.MachineTag, len(ids)) for i, id := range ids { machineTags[i] = names.NewMachineTag(id) } machines, err := task.machineGetter.Machines(machineTags...) if err != nil { return errors.Annotatef(err, "failed to get machines %v", ids) } task.machinesMutex.Lock() defer task.machinesMutex.Unlock() for i, result := range machines { switch { case result.Err == nil: task.machines[result.Machine.Id()] = result.Machine case params.IsCodeNotFoundOrCodeUnauthorized(result.Err): logger.Debugf("machine %q not found in state", ids[i]) delete(task.machines, ids[i]) default: return errors.Annotatef(result.Err, "failed to get machine %v", ids[i]) } } return nil }
go
{ "resource": "" }
q4015
pendingOrDeadOrMaintain
train
func (task *provisionerTask) pendingOrDeadOrMaintain(ids []string) (pending, dead, maintain []apiprovisioner.MachineProvisioner, err error) { task.machinesMutex.RLock() defer task.machinesMutex.RUnlock() for _, id := range ids { machine, found := task.machines[id] if !found { logger.Infof("machine %q not found", id) continue } var classification MachineClassification classification, err = classifyMachine(machine) if err != nil { return // return the error } switch classification { case Pending: pending = append(pending, machine) case Dead: dead = append(dead, machine) case Maintain: maintain = append(maintain, machine) } } logger.Tracef("pending machines: %v", pending) logger.Tracef("dead machines: %v", dead) return }
go
{ "resource": "" }
q4016
findUnknownInstances
train
func (task *provisionerTask) findUnknownInstances(stopping []instances.Instance) ([]instances.Instance, error) { // Make a copy of the instances we know about. taskInstances := make(map[instance.Id]instances.Instance) for k, v := range task.instances { taskInstances[k] = v } task.machinesMutex.RLock() defer task.machinesMutex.RUnlock() for _, m := range task.machines { instId, err := m.InstanceId() switch { case err == nil: delete(taskInstances, instId) case params.IsCodeNotProvisioned(err): case params.IsCodeNotFoundOrCodeUnauthorized(err): default: return nil, err } } // Now remove all those instances that we are stopping already as we // know about those and don't want to include them in the unknown list. for _, inst := range stopping { delete(taskInstances, inst.Id()) } var unknown []instances.Instance for _, inst := range taskInstances { unknown = append(unknown, inst) } return unknown, nil }
go
{ "resource": "" }
q4017
instancesForDeadMachines
train
func (task *provisionerTask) instancesForDeadMachines(deadMachines []apiprovisioner.MachineProvisioner) []instances.Instance { var instances []instances.Instance for _, machine := range deadMachines { instId, err := machine.InstanceId() if err == nil { keep, _ := machine.KeepInstance() if keep { logger.Debugf("machine %v is dead but keep-instance is true", instId) continue } inst, found := task.instances[instId] // If the instance is not found we can't stop it. if found { instances = append(instances, inst) } } } return instances }
go
{ "resource": "" }
q4018
populateAvailabilityZoneMachines
train
func (task *provisionerTask) populateAvailabilityZoneMachines() error { task.machinesMutex.Lock() defer task.machinesMutex.Unlock() if len(task.availabilityZoneMachines) > 0 { return nil } zonedEnv, ok := task.broker.(providercommon.ZonedEnviron) if !ok { return nil } // In this case, AvailabilityZoneAllocations() will return all of the "available" // availability zones and their instance allocations. availabilityZoneInstances, err := providercommon.AvailabilityZoneAllocations( zonedEnv, task.cloudCallCtx, []instance.Id{}) if err != nil { return err } instanceMachines := make(map[instance.Id]string) for _, machine := range task.machines { instId, err := machine.InstanceId() if err != nil { continue } instanceMachines[instId] = machine.Id() } // convert instances IDs to machines IDs to aid distributing // not yet created instances across availability zones. task.availabilityZoneMachines = make([]*AvailabilityZoneMachine, len(availabilityZoneInstances)) for i, instances := range availabilityZoneInstances { machineIds := set.NewStrings() for _, instanceId := range instances.Instances { if id, ok := instanceMachines[instanceId]; ok { machineIds.Add(id) } } task.availabilityZoneMachines[i] = &AvailabilityZoneMachine{ ZoneName: instances.ZoneName, MachineIds: machineIds, FailedMachineIds: set.NewStrings(), ExcludedMachineIds: set.NewStrings(), } } return nil }
go
{ "resource": "" }
q4019
populateDistributionGroupZoneMap
train
func (task *provisionerTask) populateDistributionGroupZoneMap(machineIds []string) []*AvailabilityZoneMachine { var dgAvailabilityZoneMachines []*AvailabilityZoneMachine dgSet := set.NewStrings(machineIds...) for _, azm := range task.availabilityZoneMachines { dgAvailabilityZoneMachines = append(dgAvailabilityZoneMachines, &AvailabilityZoneMachine{ azm.ZoneName, azm.MachineIds.Intersection(dgSet), azm.FailedMachineIds, azm.ExcludedMachineIds, }) } return dgAvailabilityZoneMachines }
go
{ "resource": "" }
q4020
machineAvailabilityZoneDistribution
train
func (task *provisionerTask) machineAvailabilityZoneDistribution( machineId string, distGroupMachineIds []string, cons constraints.Value, ) (string, error) { task.machinesMutex.Lock() defer task.machinesMutex.Unlock() if len(task.availabilityZoneMachines) == 0 { return "", nil } // Assign an initial zone to a machine based on lowest population, // accommodating any supplied zone constraints. // If the machine has a distribution group, assign based on lowest zone // population of the distribution group machine. var machineZone string if len(distGroupMachineIds) > 0 { dgZoneMap := azMachineFilterSort(task.populateDistributionGroupZoneMap(distGroupMachineIds)).FilterZones(cons) sort.Sort(dgZoneMap) for _, dgZoneMachines := range dgZoneMap { if !dgZoneMachines.FailedMachineIds.Contains(machineId) && !dgZoneMachines.ExcludedMachineIds.Contains(machineId) { machineZone = dgZoneMachines.ZoneName for _, azm := range task.availabilityZoneMachines { if azm.ZoneName == dgZoneMachines.ZoneName { azm.MachineIds.Add(machineId) break } } break } } } else { zoneMap := azMachineFilterSort(task.availabilityZoneMachines).FilterZones(cons) sort.Sort(zoneMap) for _, zoneMachines := range zoneMap { if !zoneMachines.FailedMachineIds.Contains(machineId) && !zoneMachines.ExcludedMachineIds.Contains(machineId) { machineZone = zoneMachines.ZoneName zoneMachines.MachineIds.Add(machineId) break } } } if machineZone == "" { return machineZone, errors.NotFoundf("suitable availability zone for machine %v", machineId) } return machineZone, nil }
go
{ "resource": "" }
q4021
FilterZones
train
func (a azMachineFilterSort) FilterZones(cons constraints.Value) azMachineFilterSort { if !cons.HasZones() { return a } logger.Debugf("applying availability zone constraints: %s", strings.Join(*cons.Zones, ", ")) filtered := a[:0] for _, azm := range a { for _, zone := range *cons.Zones { if azm.ZoneName == zone { filtered = append(filtered, azm) break } } } return filtered }
go
{ "resource": "" }
q4022
startMachines
train
func (task *provisionerTask) startMachines(machines []apiprovisioner.MachineProvisioner) error { if len(machines) == 0 { return nil } // Get the distributionGroups for each machine now to avoid // successive calls to DistributionGroupByMachineId which will // return the same data. machineTags := make([]names.MachineTag, len(machines)) for i, machine := range machines { machineTags[i] = machine.MachineTag() } machineDistributionGroups, err := task.distributionGroupFinder.DistributionGroupByMachineId(machineTags...) if err != nil { return err } var wg sync.WaitGroup errMachines := make([]error, len(machines)) for i, m := range machines { if machineDistributionGroups[i].Err != nil { task.setErrorStatus( "fetching distribution groups for machine %q: %v", m, machineDistributionGroups[i].Err, ) continue } wg.Add(1) go func(machine apiprovisioner.MachineProvisioner, dg []string, index int) { defer wg.Done() if err := task.startMachine(machine, dg); err != nil { task.removeMachineFromAZMap(machine) errMachines[index] = err } }(m, machineDistributionGroups[i].MachineIds, i) } wg.Wait() select { case <-task.catacomb.Dying(): return task.catacomb.ErrDying() default: } var errorStrings []string for _, err := range errMachines { if err != nil { errorStrings = append(errorStrings, err.Error()) } } if errorStrings != nil { return errors.New(strings.Join(errorStrings, "\n")) } return nil }
go
{ "resource": "" }
q4023
setupToStartMachine
train
func (task *provisionerTask) setupToStartMachine(machine apiprovisioner.MachineProvisioner, version *version.Number) ( environs.StartInstanceParams, error, ) { pInfo, err := machine.ProvisioningInfo() if err != nil { return environs.StartInstanceParams{}, errors.Annotatef(err, "fetching provisioning info for machine %q", machine) } instanceCfg, err := task.constructInstanceConfig(machine, task.auth, pInfo) if err != nil { return environs.StartInstanceParams{}, errors.Annotatef(err, "creating instance config for machine %q", machine) } assocProvInfoAndMachCfg(pInfo, instanceCfg) var arch string if pInfo.Constraints.Arch != nil { arch = *pInfo.Constraints.Arch } possibleTools, err := task.toolsFinder.FindTools( *version, pInfo.Series, arch, ) if err != nil { return environs.StartInstanceParams{}, errors.Annotatef(err, "cannot find agent binaries for machine %q", machine) } startInstanceParams, err := task.constructStartInstanceParams( task.controllerUUID, machine, instanceCfg, pInfo, possibleTools, ) if err != nil { return environs.StartInstanceParams{}, errors.Annotatef(err, "cannot construct params for machine %q", machine) } return startInstanceParams, nil }
go
{ "resource": "" }
q4024
populateExcludedMachines
train
func (task *provisionerTask) populateExcludedMachines(machineId string, startInstanceParams environs.StartInstanceParams) error { zonedEnv, ok := task.broker.(providercommon.ZonedEnviron) if !ok { return nil } derivedZones, err := zonedEnv.DeriveAvailabilityZones(task.cloudCallCtx, startInstanceParams) if err != nil { return errors.Trace(err) } if len(derivedZones) == 0 { return nil } task.machinesMutex.Lock() defer task.machinesMutex.Unlock() useZones := set.NewStrings(derivedZones...) for _, zoneMachines := range task.availabilityZoneMachines { if !useZones.Contains(zoneMachines.ZoneName) { zoneMachines.ExcludedMachineIds.Add(machineId) } } return nil }
go
{ "resource": "" }
q4025
gatherCharmLXDProfiles
train
func (task *provisionerTask) gatherCharmLXDProfiles(instanceId, machineTag string, machineProfiles []string) []string { if names.IsContainerMachine(machineTag) { if manager, ok := task.broker.(container.LXDProfileNameRetriever); ok { if profileNames, err := manager.LXDProfileNames(instanceId); err == nil { return lxdprofile.LXDProfileNames(profileNames) } } else { logger.Tracef("failed to gather profile names, broker didn't conform to LXDProfileNameRetriever") } } return machineProfiles }
go
{ "resource": "" }
q4026
markMachineFailedInAZ
train
func (task *provisionerTask) markMachineFailedInAZ(machine apiprovisioner.MachineProvisioner, zone string) (bool, error) { if zone == "" { return false, errors.New("no zone provided") } task.machinesMutex.Lock() defer task.machinesMutex.Unlock() azRemaining := false for _, zoneMachines := range task.availabilityZoneMachines { if zone == zoneMachines.ZoneName { zoneMachines.MachineIds.Remove(machine.Id()) zoneMachines.FailedMachineIds.Add(machine.Id()) if azRemaining { break } } if !zoneMachines.FailedMachineIds.Contains(machine.Id()) && !zoneMachines.ExcludedMachineIds.Contains(machine.Id()) { azRemaining = true } } return azRemaining, nil }
go
{ "resource": "" }
q4027
removeMachineFromAZMap
train
func (task *provisionerTask) removeMachineFromAZMap(machine apiprovisioner.MachineProvisioner) { machineId := machine.Id() task.machinesMutex.Lock() defer task.machinesMutex.Unlock() for _, zoneMachines := range task.availabilityZoneMachines { zoneMachines.MachineIds.Remove(machineId) zoneMachines.FailedMachineIds.Remove(machineId) } }
go
{ "resource": "" }
q4028
GetCredentials
train
func GetCredentials( ctx *cmd.Context, store jujuclient.CredentialGetter, args GetCredentialsParams, ) (_ *cloud.Credential, chosenCredentialName, regionName string, _ error) { credential, credentialName, defaultRegion, err := credentialByName( store, args.Cloud.Name, args.CredentialName, ) if err != nil { return nil, "", "", errors.Trace(err) } regionName = args.CloudRegion if regionName == "" { regionName = defaultRegion } cloudEndpoint := args.Cloud.Endpoint cloudStorageEndpoint := args.Cloud.StorageEndpoint cloudIdentityEndpoint := args.Cloud.IdentityEndpoint if regionName != "" { region, err := cloud.RegionByName(args.Cloud.Regions, regionName) if err != nil { return nil, "", "", errors.Trace(err) } cloudEndpoint = region.Endpoint cloudStorageEndpoint = region.StorageEndpoint cloudIdentityEndpoint = region.IdentityEndpoint } // Finalize credential against schemas supported by the provider. provider, err := environs.Provider(args.Cloud.Type) if err != nil { return nil, "", "", errors.Trace(err) } credential, err = FinalizeFileContent(credential, provider) if err != nil { return nil, "", "", AnnotateWithFinalizationError(err, credentialName, args.Cloud.Name) } credential, err = provider.FinalizeCredential( ctx, environs.FinalizeCredentialParams{ Credential: *credential, CloudEndpoint: cloudEndpoint, CloudStorageEndpoint: cloudStorageEndpoint, CloudIdentityEndpoint: cloudIdentityEndpoint, }, ) if err != nil { return nil, "", "", AnnotateWithFinalizationError(err, credentialName, args.Cloud.Name) } return credential, credentialName, regionName, nil }
go
{ "resource": "" }
q4029
FinalizeFileContent
train
func FinalizeFileContent(credential *cloud.Credential, provider environs.EnvironProvider) (*cloud.Credential, error) { readFile := func(f string) ([]byte, error) { f, err := utils.NormalizePath(f) if err != nil { return nil, errors.Trace(err) } return ioutil.ReadFile(f) } var err error credential, err = cloud.FinalizeCredential( *credential, provider.CredentialSchemas(), readFile, ) if err != nil { return nil, err } return credential, nil }
go
{ "resource": "" }
q4030
credentialByName
train
func credentialByName( store jujuclient.CredentialGetter, cloudName, credentialName string, ) (_ *cloud.Credential, credentialNameUsed string, defaultRegion string, _ error) { cloudCredentials, err := store.CredentialForCloud(cloudName) if err != nil { return nil, "", "", errors.Annotate(err, "loading credentials") } if credentialName == "" { credentialName = cloudCredentials.DefaultCredential if credentialName == "" { // No credential specified, but there's more than one. if len(cloudCredentials.AuthCredentials) > 1 { return nil, "", "", ErrMultipleCredentials } // No credential specified, so use the default for the cloud. for credentialName = range cloudCredentials.AuthCredentials { } } } credential, ok := cloudCredentials.AuthCredentials[credentialName] if !ok { return nil, "", "", errors.NotFoundf( "%q credential for cloud %q", credentialName, cloudName, ) } return &credential, credentialName, cloudCredentials.DefaultRegion, nil }
go
{ "resource": "" }
q4031
DetectCredential
train
func DetectCredential(cloudName string, provider environs.EnvironProvider) (*cloud.CloudCredential, error) { detected, err := provider.DetectCredentials() if err != nil { return nil, errors.Annotatef( err, "detecting credentials for %q cloud provider", cloudName, ) } logger.Tracef("provider detected credentials: %v", detected) if len(detected.AuthCredentials) == 0 { return nil, errors.NotFoundf("credentials for cloud %q", cloudName) } if len(detected.AuthCredentials) > 1 { return nil, ErrMultipleCredentials } return detected, nil }
go
{ "resource": "" }
q4032
RegisterCredentials
train
func RegisterCredentials(provider environs.EnvironProvider, args RegisterCredentialsParams) (map[string]*cloud.CloudCredential, error) { if register, ok := provider.(environs.ProviderCredentialsRegister); ok { found, err := register.RegisterCredentials(args.Cloud) if err != nil { return nil, errors.Annotatef( err, "registering credentials for provider", ) } logger.Tracef("provider registered credentials: %v", found) if len(found) == 0 { return nil, errors.NotFoundf("credentials for provider") } return found, errors.Trace(err) } return nil, nil }
go
{ "resource": "" }
q4033
NewMockAPICalls
train
func NewMockAPICalls(ctrl *gomock.Controller) *MockAPICalls { mock := &MockAPICalls{ctrl: ctrl} mock.recorder = &MockAPICallsMockRecorder{mock} return mock }
go
{ "resource": "" }
q4034
ContainerConfig
train
func (m *MockAPICalls) ContainerConfig() (params.ContainerConfig, error) { ret := m.ctrl.Call(m, "ContainerConfig") ret0, _ := ret[0].(params.ContainerConfig) ret1, _ := ret[1].(error) return ret0, ret1 }
go
{ "resource": "" }
q4035
ContainerConfig
train
func (mr *MockAPICallsMockRecorder) ContainerConfig() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerConfig", reflect.TypeOf((*MockAPICalls)(nil).ContainerConfig)) }
go
{ "resource": "" }
q4036
GetContainerInterfaceInfo
train
func (mr *MockAPICallsMockRecorder) GetContainerInterfaceInfo(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContainerInterfaceInfo", reflect.TypeOf((*MockAPICalls)(nil).GetContainerInterfaceInfo), arg0) }
go
{ "resource": "" }
q4037
GetContainerProfileInfo
train
func (m *MockAPICalls) GetContainerProfileInfo(arg0 names_v2.MachineTag) ([]*provisioner.LXDProfileResult, error) { ret := m.ctrl.Call(m, "GetContainerProfileInfo", arg0) ret0, _ := ret[0].([]*provisioner.LXDProfileResult) ret1, _ := ret[1].(error) return ret0, ret1 }
go
{ "resource": "" }
q4038
HostChangesForContainer
train
func (m *MockAPICalls) HostChangesForContainer(arg0 names_v2.MachineTag) ([]network.DeviceToBridge, int, error) { ret := m.ctrl.Call(m, "HostChangesForContainer", arg0) ret0, _ := ret[0].([]network.DeviceToBridge) ret1, _ := ret[1].(int) ret2, _ := ret[2].(error) return ret0, ret1, ret2 }
go
{ "resource": "" }
q4039
PrepareContainerInterfaceInfo
train
func (m *MockAPICalls) PrepareContainerInterfaceInfo(arg0 names_v2.MachineTag) ([]network.InterfaceInfo, error) { ret := m.ctrl.Call(m, "PrepareContainerInterfaceInfo", arg0) ret0, _ := ret[0].([]network.InterfaceInfo) ret1, _ := ret[1].(error) return ret0, ret1 }
go
{ "resource": "" }
q4040
ReleaseContainerAddresses
train
func (m *MockAPICalls) ReleaseContainerAddresses(arg0 names_v2.MachineTag) error { ret := m.ctrl.Call(m, "ReleaseContainerAddresses", arg0) ret0, _ := ret[0].(error) return ret0 }
go
{ "resource": "" }
q4041
SetHostMachineNetworkConfig
train
func (m *MockAPICalls) SetHostMachineNetworkConfig(arg0 names_v2.MachineTag, arg1 []params.NetworkConfig) error { ret := m.ctrl.Call(m, "SetHostMachineNetworkConfig", arg0, arg1) ret0, _ := ret[0].(error) return ret0 }
go
{ "resource": "" }
q4042
sequence
train
func sequence(mb modelBackend, name string) (int, error) { sequences, closer := mb.db().GetCollection(sequenceC) defer closer() query := sequences.FindId(name) inc := mgo.Change{ Update: bson.M{ "$set": bson.M{ "name": name, "model-uuid": mb.modelUUID(), }, "$inc": bson.M{"counter": 1}, }, Upsert: true, } result := &sequenceDoc{} _, err := query.Apply(inc, result) if err != nil { return -1, fmt.Errorf("cannot increment %q sequence number: %v", name, err) } return result.Counter, nil }
go
{ "resource": "" }
q4043
sequenceWithMin
train
func sequenceWithMin(mb modelBackend, name string, minVal int) (int, error) { sequences, closer := mb.db().GetRawCollection(sequenceC) defer closer() updater := newDbSeqUpdater(sequences, mb.modelUUID(), name) return updateSeqWithMin(updater, minVal) }
go
{ "resource": "" }
q4044
Sequences
train
func (st *State) Sequences() (map[string]int, error) { sequences, closer := st.db().GetCollection(sequenceC) defer closer() var docs []sequenceDoc if err := sequences.Find(nil).All(&docs); err != nil { return nil, errors.Trace(err) } result := make(map[string]int) for _, doc := range docs { result[doc.Name] = doc.Counter } return result, nil }
go
{ "resource": "" }
q4045
updateSeqWithMin
train
func updateSeqWithMin(sequence seqUpdater, minVal int) (int, error) { for try := 0; try < maxSeqRetries; try++ { curVal, err := sequence.read() if err != nil { return -1, errors.Annotate(err, "could not read sequence") } if curVal == 0 { // No sequence document exists, create one. ok, err := sequence.create(minVal + 1) if err != nil { return -1, errors.Annotate(err, "could not create sequence") } if ok { return minVal, nil } // Someone else created the sequence document at the same // time, try again. } else { // Increment an existing sequence document, respecting the // minimum value provided. nextVal := curVal + 1 if nextVal < minVal { nextVal = minVal + 1 } ok, err := sequence.set(curVal, nextVal) if err != nil { return -1, errors.Annotate(err, "could not set sequence") } if ok { return nextVal - 1, nil } // Someone else incremented the sequence at the same time, // try again. } } return -1, errors.New("too much contention while updating sequence") }
go
{ "resource": "" }
q4046
EnsureSymlinks
train
func EnsureSymlinks(jujuDir, dir string, commands []string) (err error) { logger.Infof("ensure jujuc symlinks in %s", dir) defer func() { if err != nil { err = errors.Annotatef(err, "cannot initialize commands in %q", dir) } }() isSymlink, err := symlink.IsSymlink(jujuDir) if err != nil { return err } if isSymlink { link, err := symlink.Read(jujuDir) if err != nil { return err } if !filepath.IsAbs(link) { logger.Infof("%s is relative", link) link = filepath.Join(filepath.Dir(dir), link) } jujuDir = link logger.Infof("was a symlink, now looking at %s", jujuDir) } jujudPath := filepath.Join(jujuDir, names.Jujud) logger.Debugf("jujud path %s", jujudPath) for _, name := range commands { // The link operation fails when the target already exists, // so this is a no-op when the command names already // exist. err := symlink.New(jujudPath, filepath.Join(dir, name)) if err != nil && !os.IsExist(err) { return err } } return nil }
go
{ "resource": "" }
q4047
Validate
train
func (p BootstrapParams) Validate() error { if p.AdminSecret == "" { return errors.New("admin-secret is empty") } if p.ControllerConfig.ControllerUUID() == "" { return errors.New("controller configuration has no controller UUID") } if _, hasCACert := p.ControllerConfig.CACert(); !hasCACert { return errors.New("controller configuration has no ca-cert") } if p.CAPrivateKey == "" { return errors.New("empty ca-private-key") } // TODO(axw) validate other things. return nil }
go
{ "resource": "" }
q4048
withDefaultControllerConstraints
train
func withDefaultControllerConstraints(cons constraints.Value) constraints.Value { if !cons.HasInstanceType() && !cons.HasCpuCores() && !cons.HasCpuPower() && !cons.HasMem() { // A default of 3.5GiB will result in machines with up to 4GiB of memory, eg // - 3.75GiB on AWS, Google // - 3.5GiB on Azure // - 4GiB on Rackspace etc var mem uint64 = 3.5 * 1024 cons.Mem = &mem } return cons }
go
{ "resource": "" }
q4049
Bootstrap
train
func Bootstrap( ctx environs.BootstrapContext, environ environs.BootstrapEnviron, callCtx context.ProviderCallContext, args BootstrapParams, ) error { if err := args.Validate(); err != nil { return errors.Annotate(err, "validating bootstrap parameters") } bootstrapParams := environs.BootstrapParams{ CloudName: args.Cloud.Name, CloudRegion: args.CloudRegion, ControllerConfig: args.ControllerConfig, ModelConstraints: args.ModelConstraints, BootstrapSeries: args.BootstrapSeries, Placement: args.Placement, } doBootstrap := bootstrapIAAS if jujucloud.CloudIsCAAS(args.Cloud) { doBootstrap = bootstrapCAAS } if err := doBootstrap(ctx, environ, callCtx, args, bootstrapParams); err != nil { return errors.Trace(err) } ctx.Infof("Bootstrap agent now started") return nil }
go
{ "resource": "" }
q4050
bootstrapImageMetadata
train
func bootstrapImageMetadata( environ environs.BootstrapEnviron, bootstrapSeries *string, bootstrapArch string, bootstrapImageId string, customImageMetadata *[]*imagemetadata.ImageMetadata, ) ([]*imagemetadata.ImageMetadata, error) { hasRegion, ok := environ.(simplestreams.HasRegion) if !ok { if bootstrapImageId != "" { // We only support specifying image IDs for providers // that use simplestreams for now. return nil, errors.NotSupportedf( "specifying bootstrap image for %q provider", environ.Config().Type(), ) } // No region, no metadata. return nil, nil } region, err := hasRegion.Region() if err != nil { return nil, errors.Trace(err) } if bootstrapImageId != "" { if bootstrapSeries == nil { return nil, errors.NotValidf("no series specified with bootstrap image") } seriesVersion, err := series.SeriesVersion(*bootstrapSeries) if err != nil { return nil, errors.Trace(err) } // The returned metadata does not have information about the // storage or virtualisation type. Any provider that wants to // filter on those properties should allow for empty values. meta := &imagemetadata.ImageMetadata{ Id: bootstrapImageId, Arch: bootstrapArch, Version: seriesVersion, RegionName: region.Region, Endpoint: region.Endpoint, Stream: environ.Config().ImageStream(), } *customImageMetadata = append(*customImageMetadata, meta) return []*imagemetadata.ImageMetadata{meta}, nil } // For providers that support making use of simplestreams // image metadata, search public image metadata. We need // to pass this onto Bootstrap for selecting images. sources, err := environs.ImageMetadataSources(environ) if err != nil { return nil, errors.Trace(err) } // This constraint will search image metadata for all supported architectures and series. imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{ CloudSpec: region, Stream: environ.Config().ImageStream(), }) logger.Debugf("constraints for image metadata lookup %v", imageConstraint) // Get image metadata from all data sources. // Since order of data source matters, order of image metadata matters too. Append is important here. var publicImageMetadata []*imagemetadata.ImageMetadata for _, source := range sources { sourceMetadata, _, err := imagemetadata.Fetch([]simplestreams.DataSource{source}, imageConstraint) if err != nil { logger.Debugf("ignoring image metadata in %s: %v", source.Description(), err) // Just keep looking... continue } logger.Debugf("found %d image metadata in %s", len(sourceMetadata), source.Description()) publicImageMetadata = append(publicImageMetadata, sourceMetadata...) } logger.Debugf("found %d image metadata from all image data sources", len(publicImageMetadata)) if len(publicImageMetadata) == 0 { return nil, errors.New("no image metadata found") } return publicImageMetadata, nil }
go
{ "resource": "" }
q4051
getBootstrapToolsVersion
train
func getBootstrapToolsVersion(possibleTools coretools.List) (coretools.List, error) { if len(possibleTools) == 0 { return nil, errors.New("no bootstrap agent binaries available") } var newVersion version.Number newVersion, toolsList := possibleTools.Newest() logger.Infof("newest version: %s", newVersion) bootstrapVersion := newVersion // We should only ever bootstrap the exact same version as the client, // or we risk bootstrap incompatibility. if !isCompatibleVersion(newVersion, jujuversion.Current) { compatibleVersion, compatibleTools := findCompatibleTools(possibleTools, jujuversion.Current) if len(compatibleTools) == 0 { logger.Infof( "failed to find %s agent binaries, will attempt to use %s", jujuversion.Current, newVersion, ) } else { bootstrapVersion, toolsList = compatibleVersion, compatibleTools } } logger.Infof("picked bootstrap agent binary version: %s", bootstrapVersion) return toolsList, nil }
go
{ "resource": "" }
q4052
setBootstrapToolsVersion
train
func setBootstrapToolsVersion(environ environs.Configer, toolsVersion version.Number) error { cfg := environ.Config() if agentVersion, _ := cfg.AgentVersion(); agentVersion != toolsVersion { cfg, err := cfg.Apply(map[string]interface{}{ "agent-version": toolsVersion.String(), }) if err == nil { err = environ.SetConfig(cfg) } if err != nil { return errors.Errorf("failed to update model configuration: %v", err) } } return nil }
go
{ "resource": "" }
q4053
findCompatibleTools
train
func findCompatibleTools(possibleTools coretools.List, version version.Number) (version.Number, coretools.List) { var compatibleTools coretools.List for _, tools := range possibleTools { if isCompatibleVersion(tools.Version.Number, version) { compatibleTools = append(compatibleTools, tools) } } return compatibleTools.Newest() }
go
{ "resource": "" }
q4054
setPrivateMetadataSources
train
func setPrivateMetadataSources(metadataDir string) ([]*imagemetadata.ImageMetadata, error) { if _, err := os.Stat(metadataDir); err != nil { if !os.IsNotExist(err) { return nil, errors.Annotate(err, "cannot access simplestreams metadata directory") } return nil, errors.NotFoundf("simplestreams metadata source: %s", metadataDir) } agentBinaryMetadataDir := metadataDir ending := filepath.Base(agentBinaryMetadataDir) if ending != storage.BaseToolsPath { agentBinaryMetadataDir = filepath.Join(metadataDir, storage.BaseToolsPath) } if _, err := os.Stat(agentBinaryMetadataDir); err != nil { if !os.IsNotExist(err) { return nil, errors.Annotate(err, "cannot access agent metadata") } logger.Debugf("no agent directory found, using default agent metadata source: %s", tools.DefaultBaseURL) } else { if ending == storage.BaseToolsPath { // As the specified metadataDir ended in 'tools' // assume that is the only metadata to find and return. tools.DefaultBaseURL = filepath.Dir(metadataDir) logger.Debugf("setting default agent metadata source: %s", tools.DefaultBaseURL) return nil, nil } else { tools.DefaultBaseURL = metadataDir logger.Debugf("setting default agent metadata source: %s", tools.DefaultBaseURL) } } imageMetadataDir := metadataDir ending = filepath.Base(imageMetadataDir) if ending != storage.BaseImagesPath { imageMetadataDir = filepath.Join(metadataDir, storage.BaseImagesPath) } if _, err := os.Stat(imageMetadataDir); err != nil { if !os.IsNotExist(err) { return nil, errors.Annotate(err, "cannot access image metadata") } return nil, nil } else { logger.Debugf("setting default image metadata source: %s", imageMetadataDir) } baseURL := fmt.Sprintf("file://%s", filepath.ToSlash(imageMetadataDir)) publicKey, _ := simplestreams.UserPublicSigningKey() datasource := simplestreams.NewURLSignedDataSource("bootstrap metadata", baseURL, publicKey, utils.NoVerifySSLHostnames, simplestreams.CUSTOM_CLOUD_DATA, false) // Read the image metadata, as we'll want to upload it to the environment. imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{}) existingMetadata, _, err := imagemetadata.Fetch([]simplestreams.DataSource{datasource}, imageConstraint) if err != nil && !errors.IsNotFound(err) { return nil, errors.Annotate(err, "cannot read image metadata") } // Add an image metadata datasource for constraint validation, etc. environs.RegisterUserImageDataSourceFunc("bootstrap metadata", func(environs.Environ) (simplestreams.DataSource, error) { return datasource, nil }) logger.Infof("custom image metadata added to search path") return existingMetadata, nil }
go
{ "resource": "" }
q4055
guiArchive
train
func guiArchive(dataSourceBaseURL string, logProgress func(string)) *coretools.GUIArchive { // The environment variable is only used for development purposes. path := os.Getenv("JUJU_GUI") if path != "" { vers, err := guiVersion(path) if err != nil { logProgress(fmt.Sprintf("Cannot use Juju GUI at %q: %s", path, err)) return nil } hash, size, err := hashAndSize(path) if err != nil { logProgress(fmt.Sprintf("Cannot use Juju GUI at %q: %s", path, err)) return nil } logProgress(fmt.Sprintf("Fetching Juju GUI %s from local archive", vers)) return &coretools.GUIArchive{ Version: vers, URL: "file://" + filepath.ToSlash(path), SHA256: hash, Size: size, } } // Check if the user requested to bootstrap with no GUI. if dataSourceBaseURL == "" { logProgress("Juju GUI installation has been disabled") return nil } // Fetch GUI archives info from simplestreams. source := gui.NewDataSource(dataSourceBaseURL) allMeta, err := guiFetchMetadata(gui.ReleasedStream, source) if err != nil { logProgress(fmt.Sprintf("Unable to fetch Juju GUI info: %s", err)) return nil } if len(allMeta) == 0 { logProgress("No available Juju GUI archives found") return nil } // Metadata info are returned in descending version order. logProgress(fmt.Sprintf("Fetching Juju GUI %s", allMeta[0].Version)) return &coretools.GUIArchive{ Version: allMeta[0].Version, URL: allMeta[0].FullPath, SHA256: allMeta[0].SHA256, Size: allMeta[0].Size, } }
go
{ "resource": "" }
q4056
hashAndSize
train
func hashAndSize(path string) (hash string, size int64, err error) { f, err := os.Open(path) if err != nil { return "", 0, errors.Mask(err) } defer f.Close() h := sha256.New() size, err = io.Copy(h, f) if err != nil { return "", 0, errors.Mask(err) } return fmt.Sprintf("%x", h.Sum(nil)), size, nil }
go
{ "resource": "" }
q4057
NewListEndpointsCommand
train
func NewListEndpointsCommand() cmd.Command { listCmd := &listCommand{} listCmd.newAPIFunc = func() (ListAPI, error) { return listCmd.NewApplicationOffersAPI() } listCmd.refreshModels = listCmd.ModelCommandBase.RefreshModels return modelcmd.Wrap(listCmd) }
go
{ "resource": "" }
q4058
convertCharmEndpoints
train
func convertCharmEndpoints(relations ...charm.Relation) map[string]RemoteEndpoint { if len(relations) == 0 { return nil } output := make(map[string]RemoteEndpoint, len(relations)) for _, one := range relations { output[one.Name] = RemoteEndpoint{one.Name, one.Interface, string(one.Role)} } return output }
go
{ "resource": "" }
q4059
GetState
train
func (ctlr *Controller) GetState(modelTag names.ModelTag) (*PooledState, error) { return ctlr.pool.Get(modelTag.Id()) }
go
{ "resource": "" }
q4060
Ping
train
func (ctlr *Controller) Ping() error { if ctlr.pool.SystemState() == nil { return errors.New("pool is closed") } return ctlr.pool.SystemState().Ping() }
go
{ "resource": "" }
q4061
ControllerConfig
train
func (st *State) ControllerConfig() (jujucontroller.Config, error) { settings, err := readSettings(st.db(), controllersC, controllerSettingsGlobalKey) if err != nil { return nil, errors.Annotatef(err, "controller %q", st.ControllerUUID()) } return settings.Map(), nil }
go
{ "resource": "" }
q4062
checkSpaceIsAvailableToAllControllers
train
func (st *State) checkSpaceIsAvailableToAllControllers(configSpace string) error { info, err := st.ControllerInfo() if err != nil { return errors.Annotate(err, "cannot get controller info") } var missing []string spaceName := network.SpaceName(configSpace) for _, id := range info.MachineIds { m, err := st.Machine(id) if err != nil { return errors.Annotate(err, "cannot get machine") } if _, ok := network.SelectAddressesBySpaceNames(m.Addresses(), spaceName); !ok { missing = append(missing, id) } } if len(missing) > 0 { return errors.Errorf("machines with no addresses in this space: %s", strings.Join(missing, ", ")) } return nil }
go
{ "resource": "" }
q4063
Supports
train
func (s storageProvider) Supports(kind storage.StorageKind) bool { return kind == storage.StorageKindBlock }
go
{ "resource": "" }
q4064
DefaultPools
train
func (s storageProvider) DefaultPools() []*storage.Config { latencyPool, _ := storage.NewConfig("oracle-latency", oracleStorageProviderType, map[string]interface{}{ oracleVolumeType: latencyPool, }) return []*storage.Config{latencyPool} }
go
{ "resource": "" }
q4065
ValidateBranchName
train
func ValidateBranchName(name string) error { if name == "" { return errors.NotValidf("empty branch name") } if name == GenerationMaster { return errors.NotValidf("branch name %q", GenerationMaster) } return nil }
go
{ "resource": "" }
q4066
NewStorage
train
func NewStorage( session *mgo.Session, modelUUID string, ) Storage { blobDb := session.DB(ImagesDB) metadataCollection := blobDb.C(imagemetadataC) return &imageStorage{ modelUUID, metadataCollection, blobDb, } }
go
{ "resource": "" }
q4067
AddImage
train
func (s *imageStorage) AddImage(r io.Reader, metadata *Metadata) (resultErr error) { session := s.blobDb.Session.Copy() defer session.Close() managedStorage := s.getManagedStorage(session) path := imagePath(metadata.Kind, metadata.Series, metadata.Arch, metadata.SHA256) if err := managedStorage.PutForBucket(s.modelUUID, path, r, metadata.Size); err != nil { return errors.Annotate(err, "cannot store image") } defer func() { if resultErr == nil { return } err := managedStorage.RemoveForBucket(s.modelUUID, path) if err != nil { logger.Errorf("failed to remove image blob: %v", err) } }() newDoc := imageMetadataDoc{ Id: docId(metadata), ModelUUID: s.modelUUID, Kind: metadata.Kind, Series: metadata.Series, Arch: metadata.Arch, Size: metadata.Size, SHA256: metadata.SHA256, SourceURL: metadata.SourceURL, Path: path, // TODO(fwereade): 2016-03-17 lp:1558657 Created: time.Now(), } // Add or replace metadata. If replacing, record the // existing path so we can remove the blob later. var oldPath string buildTxn := func(attempt int) ([]txn.Op, error) { op := txn.Op{ C: imagemetadataC, Id: newDoc.Id, } // On the first attempt we assume we're adding a new image blob. // Subsequent attempts to add image will fetch the existing // doc, record the old path, and attempt to update the // size, path and hash fields. if attempt == 0 { op.Assert = txn.DocMissing op.Insert = &newDoc } else { oldDoc, err := s.imageMetadataDoc(metadata.ModelUUID, metadata.Kind, metadata.Series, metadata.Arch) if err != nil { return nil, err } oldPath = oldDoc.Path op.Assert = bson.D{{"path", oldPath}} if oldPath != path { op.Update = bson.D{{ "$set", bson.D{ {"size", metadata.Size}, {"sha256", metadata.SHA256}, {"path", path}, }, }} } } return []txn.Op{op}, nil } txnRunner := s.txnRunner(session) err := txnRunner.Run(buildTxn) if err != nil { return errors.Annotate(err, "cannot store image metadata") } if oldPath != "" && oldPath != path { // Attempt to remove the old path. Failure is non-fatal. err := managedStorage.RemoveForBucket(s.modelUUID, oldPath) if err != nil { logger.Errorf("failed to remove old image blob: %v", err) } else { logger.Debugf("removed old image blob") } } return nil }
go
{ "resource": "" }
q4068
ListImages
train
func (s *imageStorage) ListImages(filter ImageFilter) ([]*Metadata, error) { metadataDocs, err := s.listImageMetadataDocs(s.modelUUID, filter.Kind, filter.Series, filter.Arch) if err != nil { return nil, errors.Annotate(err, "cannot list image metadata") } result := make([]*Metadata, len(metadataDocs)) for i, metadataDoc := range metadataDocs { result[i] = &Metadata{ ModelUUID: s.modelUUID, Kind: metadataDoc.Kind, Series: metadataDoc.Series, Arch: metadataDoc.Arch, Size: metadataDoc.Size, SHA256: metadataDoc.SHA256, Created: metadataDoc.Created, SourceURL: metadataDoc.SourceURL, } } return result, nil }
go
{ "resource": "" }
q4069
DeleteImage
train
func (s *imageStorage) DeleteImage(metadata *Metadata) (resultErr error) { session := s.blobDb.Session.Copy() defer session.Close() managedStorage := s.getManagedStorage(session) path := imagePath(metadata.Kind, metadata.Series, metadata.Arch, metadata.SHA256) if err := managedStorage.RemoveForBucket(s.modelUUID, path); err != nil { return errors.Annotate(err, "cannot remove image blob") } // Remove the metadata. buildTxn := func(attempt int) ([]txn.Op, error) { op := txn.Op{ C: imagemetadataC, Id: docId(metadata), Remove: true, } return []txn.Op{op}, nil } txnRunner := s.txnRunner(session) err := txnRunner.Run(buildTxn) // Metadata already removed, we don't care. if err == mgo.ErrNotFound { return nil } return errors.Annotate(err, "cannot remove image metadata") }
go
{ "resource": "" }
q4070
Image
train
func (s *imageStorage) Image(kind, series, arch string) (*Metadata, io.ReadCloser, error) { metadataDoc, err := s.imageMetadataDoc(s.modelUUID, kind, series, arch) if err != nil { return nil, nil, err } session := s.blobDb.Session.Copy() managedStorage := s.getManagedStorage(session) image, err := s.imageBlob(managedStorage, metadataDoc.Path) if err != nil { return nil, nil, err } metadata := &Metadata{ ModelUUID: s.modelUUID, Kind: metadataDoc.Kind, Series: metadataDoc.Series, Arch: metadataDoc.Arch, Size: metadataDoc.Size, SHA256: metadataDoc.SHA256, SourceURL: metadataDoc.SourceURL, Created: metadataDoc.Created, } imageResult := &imageCloser{ image, session, } return metadata, imageResult, nil }
go
{ "resource": "" }
q4071
imagePath
train
func imagePath(kind, series, arch, checksum string) string { return fmt.Sprintf("images/%s-%s-%s:%s", kind, series, arch, checksum) }
go
{ "resource": "" }
q4072
docId
train
func docId(metadata *Metadata) string { return fmt.Sprintf("%s-%s-%s-%s", metadata.ModelUUID, metadata.Kind, metadata.Series, metadata.Arch) }
go
{ "resource": "" }
q4073
ReadCharmURL
train
func ReadCharmURL(path string) (*charm.URL, error) { surl := "" if err := utils.ReadYaml(path, &surl); err != nil { return nil, err } return charm.ParseURL(surl) }
go
{ "resource": "" }
q4074
WriteCharmURL
train
func WriteCharmURL(path string, url *charm.URL) error { return utils.WriteYaml(path, url.String()) }
go
{ "resource": "" }
q4075
LXDProfile
train
func (c *Charm) LXDProfile() lxdprofile.Profile { c.mu.Lock() defer c.mu.Unlock() return c.details.LXDProfile }
go
{ "resource": "" }
q4076
NewSetFirewallRuleCommand
train
func NewSetFirewallRuleCommand() cmd.Command { cmd := &setFirewallRuleCommand{} cmd.newAPIFunc = func() (SetFirewallRuleAPI, error) { root, err := cmd.NewAPIRoot() if err != nil { return nil, errors.Trace(err) } return firewallrules.NewClient(root), nil } return modelcmd.Wrap(cmd) }
go
{ "resource": "" }
q4077
Use
train
func (c *tracker) Use() (*state.Controller, error) { c.mu.Lock() defer c.mu.Unlock() if c.references == 0 { return nil, ErrControllerClosed } c.references++ return c.st, nil }
go
{ "resource": "" }
q4078
Done
train
func (c *tracker) Done() error { c.mu.Lock() defer c.mu.Unlock() if c.references == 0 { return ErrControllerClosed } c.references-- if c.references == 0 { err := c.st.Close() if err != nil { logger.Errorf("error when closing controller: %v", err) } } return nil }
go
{ "resource": "" }
q4079
resourceID
train
func resourceID(id, subType, subID string) string { if subType == "" { return fmt.Sprintf("resource#%s", id) } return fmt.Sprintf("resource#%s#%s-%s", id, subType, subID) }
go
{ "resource": "" }
q4080
newResolvePendingResourceOps
train
func newResolvePendingResourceOps(pending storedResource, exists bool) []txn.Op { oldID := pendingResourceID(pending.ID, pending.PendingID) newRes := pending newRes.PendingID = "" // TODO(ericsnow) Update newRes.StoragePath? Doing so would require // moving the resource in the blobstore to the correct path, which // we cannot do in the transaction... ops := []txn.Op{{ C: resourcesC, Id: oldID, Assert: txn.DocExists, Remove: true, }} // TODO(perrito666) 2016-05-02 lp:1558657 csRes := charmStoreResource{ Resource: newRes.Resource.Resource, id: newRes.ID, applicationID: newRes.ApplicationID, // Truncate the time to remove monotonic time for Go 1.9+ // to make it easier for tests to compare the time. lastPolled: time.Now().Truncate(1).UTC(), } if exists { ops = append(ops, newUpdateResourceOps(newRes)...) return append(ops, newUpdateCharmStoreResourceOps(csRes)...) } else { ops = append(ops, newInsertResourceOps(newRes)...) return append(ops, newInsertCharmStoreResourceOps(csRes)...) } }
go
{ "resource": "" }
q4081
newCharmStoreResourceDoc
train
func newCharmStoreResourceDoc(res charmStoreResource) *resourceDoc { fullID := charmStoreResourceID(res.id) return charmStoreResource2Doc(fullID, res) }
go
{ "resource": "" }
q4082
newUnitResourceDoc
train
func newUnitResourceDoc(unitID string, stored storedResource) *resourceDoc { fullID := unitResourceID(stored.ID, unitID) return unitResource2Doc(fullID, unitID, stored) }
go
{ "resource": "" }
q4083
newResourceDoc
train
func newResourceDoc(stored storedResource) *resourceDoc { fullID := applicationResourceID(stored.ID) if stored.PendingID != "" { fullID = pendingResourceID(stored.ID, stored.PendingID) } return resource2doc(fullID, stored) }
go
{ "resource": "" }
q4084
newStagedResourceDoc
train
func newStagedResourceDoc(stored storedResource) *resourceDoc { stagedID := stagedResourceID(stored.ID) return resource2doc(stagedID, stored) }
go
{ "resource": "" }
q4085
resources
train
func (p ResourcePersistence) resources(applicationID string) ([]resourceDoc, error) { logger.Tracef("querying db for resources for %q", applicationID) var docs []resourceDoc query := bson.D{{"application-id", applicationID}} if err := p.base.All(resourcesC, query, &docs); err != nil { return nil, errors.Trace(err) } logger.Tracef("found %d resources", len(docs)) return docs, nil }
go
{ "resource": "" }
q4086
getOne
train
func (p ResourcePersistence) getOne(resID string) (resourceDoc, error) { logger.Tracef("querying db for resource %q", resID) id := applicationResourceID(resID) var doc resourceDoc if err := p.base.One(resourcesC, id, &doc); err != nil { return doc, errors.Trace(err) } return doc, nil }
go
{ "resource": "" }
q4087
getOnePending
train
func (p ResourcePersistence) getOnePending(resID, pendingID string) (resourceDoc, error) { logger.Tracef("querying db for resource %q (pending %q)", resID, pendingID) id := pendingResourceID(resID, pendingID) var doc resourceDoc if err := p.base.One(resourcesC, id, &doc); err != nil { return doc, errors.Trace(err) } return doc, nil }
go
{ "resource": "" }
q4088
resource2doc
train
func resource2doc(id string, stored storedResource) *resourceDoc { res := stored.Resource // TODO(ericsnow) We may need to limit the resolution of timestamps // in order to avoid some conversion problems from Mongo. return &resourceDoc{ DocID: id, ID: res.ID, PendingID: res.PendingID, ApplicationID: res.ApplicationID, Name: res.Name, Type: res.Type.String(), Path: res.Path, Description: res.Description, Origin: res.Origin.String(), Revision: res.Revision, Fingerprint: res.Fingerprint.Bytes(), Size: res.Size, Username: res.Username, Timestamp: res.Timestamp, StoragePath: stored.storagePath, } }
go
{ "resource": "" }
q4089
doc2resource
train
func doc2resource(doc resourceDoc) (storedResource, error) { res, err := doc2basicResource(doc) if err != nil { return storedResource{}, errors.Trace(err) } stored := storedResource{ Resource: res, storagePath: doc.StoragePath, } return stored, nil }
go
{ "resource": "" }
q4090
doc2basicResource
train
func doc2basicResource(doc resourceDoc) (resource.Resource, error) { var res resource.Resource resType, err := charmresource.ParseType(doc.Type) if err != nil { return res, errors.Annotate(err, "got invalid data from DB") } origin, err := charmresource.ParseOrigin(doc.Origin) if err != nil { return res, errors.Annotate(err, "got invalid data from DB") } fp, err := resource.DeserializeFingerprint(doc.Fingerprint) if err != nil { return res, errors.Annotate(err, "got invalid data from DB") } res = resource.Resource{ Resource: charmresource.Resource{ Meta: charmresource.Meta{ Name: doc.Name, Type: resType, Path: doc.Path, Description: doc.Description, }, Origin: origin, Revision: doc.Revision, Fingerprint: fp, Size: doc.Size, }, ID: doc.ID, PendingID: doc.PendingID, ApplicationID: doc.ApplicationID, Username: doc.Username, Timestamp: doc.Timestamp, } if err := res.Validate(); err != nil { return res, errors.Annotate(err, "got invalid data from DB") } return res, nil }
go
{ "resource": "" }
q4091
Close
train
func (ls *LogStream) Close() error { ls.mu.Lock() defer ls.mu.Unlock() if ls.stream == nil { return nil } if err := ls.stream.Close(); err != nil { return errors.Trace(err) } ls.stream = nil return nil }
go
{ "resource": "" }
q4092
Validate
train
func (config Config) Validate() error { if config.Logger == nil { return errors.NotValidf("nil Logger") } if config.Tag == nil { return errors.NotValidf("nil machine tag") } k := config.Tag.Kind() if k != names.MachineTagKind { return errors.NotValidf("%q tag kind", k) } if config.FacadeFactory == nil { return errors.NotValidf("nil FacadeFactory") } if config.Service == nil { return errors.NotValidf("nil Service") } return nil }
go
{ "resource": "" }
q4093
NewWorker
train
func NewWorker(config Config) (worker.Worker, error) { if err := config.Validate(); err != nil { return nil, errors.Trace(err) } w := &upgradeSeriesWorker{ Facade: config.FacadeFactory(config.Tag), facadeFactory: config.FacadeFactory, logger: config.Logger, service: config.Service, upgraderFactory: config.UpgraderFactory, machineStatus: model.UpgradeSeriesNotStarted, leadersPinned: false, } if err := catacomb.Invoke(catacomb.Plan{ Site: &w.catacomb, Work: w.loop, }); err != nil { return nil, errors.Trace(err) } return w, nil }
go
{ "resource": "" }
q4094
handleUpgradeSeriesChange
train
func (w *upgradeSeriesWorker) handleUpgradeSeriesChange() error { w.mu.Lock() defer w.mu.Unlock() var err error if w.machineStatus, err = w.MachineStatus(); err != nil { if errors.IsNotFound(err) { // No upgrade-series lock. This can happen when: // - The first watch call is made. // - The lock is removed after a completed upgrade. w.logger.Infof("no series upgrade lock present") w.machineStatus = model.UpgradeSeriesNotStarted w.preparedUnits = nil w.completedUnits = nil return nil } return errors.Trace(err) } w.logger.Infof("machine series upgrade status is %q", w.machineStatus) switch w.machineStatus { case model.UpgradeSeriesPrepareStarted: err = w.handlePrepareStarted() case model.UpgradeSeriesCompleteStarted: err = w.handleCompleteStarted() case model.UpgradeSeriesCompleted: err = w.handleCompleted() } return errors.Trace(err) }
go
{ "resource": "" }
q4095
handlePrepareStarted
train
func (w *upgradeSeriesWorker) handlePrepareStarted() error { var err error if !w.leadersPinned { if err = w.pinLeaders(); err != nil { return errors.Trace(err) } } if w.preparedUnits, err = w.UnitsPrepared(); err != nil { return errors.Trace(err) } unitServices, allConfirmed, err := w.compareUnitAgentServices(w.preparedUnits) if err != nil { return errors.Trace(err) } if !allConfirmed { w.logger.Debugf( "waiting for units to complete series upgrade preparation; known unit agent services: %s", unitNames(unitServices), ) return nil } return errors.Trace(w.transitionPrepareComplete(unitServices)) }
go
{ "resource": "" }
q4096
transitionPrepareComplete
train
func (w *upgradeSeriesWorker) transitionPrepareComplete(unitServices map[string]string) error { w.logger.Infof("preparing service units for series upgrade") toSeries, err := w.TargetSeries() if err != nil { return errors.Trace(err) } upgrader, err := w.upgraderFactory(toSeries) if err != nil { return errors.Trace(err) } if err := upgrader.PerformUpgrade(); err != nil { return errors.Trace(err) } return errors.Trace(w.SetMachineStatus(model.UpgradeSeriesPrepareCompleted, "binaries and service files written")) }
go
{ "resource": "" }
q4097
transitionUnitsStarted
train
func (w *upgradeSeriesWorker) transitionUnitsStarted(unitServices map[string]string) error { w.logger.Infof("ensuring units are up after series upgrade") for unit, serviceName := range unitServices { svc, err := w.service.DiscoverService(serviceName) if err != nil { return errors.Trace(err) } running, err := svc.Running() if err != nil { return errors.Trace(err) } if running { continue } if err := svc.Start(); err != nil { return errors.Annotatef(err, "starting %q unit agent after series upgrade", unit) } } return errors.Trace(w.StartUnitCompletion("started unit agents after series upgrade")) }
go
{ "resource": "" }
q4098
handleCompleted
train
func (w *upgradeSeriesWorker) handleCompleted() error { s, err := hostSeries() if err != nil { return errors.Trace(err) } if err = w.FinishUpgradeSeries(s); err != nil { return errors.Trace(err) } return errors.Trace(w.unpinLeaders()) }
go
{ "resource": "" }
q4099
pinLeaders
train
func (w *upgradeSeriesWorker) pinLeaders() (err error) { // if we encounter an error, // attempt to ensure that no application leaders remain pinned. defer func() { if err != nil { if unpinErr := w.unpinLeaders(); unpinErr != nil { err = errors.Wrap(err, unpinErr) } } }() results, err := w.PinMachineApplications() if err != nil { // If pin machine applications method return not implemented because it's // utilising the legacy leases store, then we should display the warning // in the log and return out. Unpinning leaders should be safe as that // should be considered a no-op if params.IsCodeNotImplemented(err) { w.logger.Infof("failed to pin machine applications, with legacy lease manager leadership pinning is not implemented") return nil } return errors.Trace(err) } var lastErr error for app, err := range results { if err == nil { w.logger.Infof("unpin leader for application %q", app) continue } w.logger.Errorf("failed to pin leader for application %q: %s", app, err.Error()) lastErr = err } if lastErr == nil { w.leadersPinned = true return nil } return errors.Trace(lastErr) }
go
{ "resource": "" }