_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q4500
|
isSubnetConstrainedError
|
train
|
func isSubnetConstrainedError(err error) bool {
switch err := errors.Cause(err).(type) {
case *ec2.Error:
switch err.Code {
case "InsufficientFreeAddressesInSubnet", "InsufficientInstanceCapacity":
// Subnet and/or VPC general limits reached.
return true
case "InvalidSubnetID.NotFound":
// This shouldn't happen, as we validate the subnet IDs, but it can
// happen if the user manually deleted the subnet outside of Juju.
return true
}
}
return false
}
|
go
|
{
"resource": ""
}
|
q4501
|
SuperSubnets
|
train
|
func (e *environ) SuperSubnets(ctx context.ProviderCallContext) ([]string, error) {
vpcId := e.ecfg().vpcID()
if !isVPCIDSet(vpcId) {
if hasDefaultVPC, err := e.hasDefaultVPC(ctx); err == nil && hasDefaultVPC {
vpcId = e.defaultVPC.Id
}
}
if !isVPCIDSet(vpcId) {
return nil, errors.NotSupportedf("Not a VPC environment")
}
cidr, err := getVPCCIDR(e.ec2, ctx, vpcId)
if err != nil {
return nil, err
}
return []string{cidr}, nil
}
|
go
|
{
"resource": ""
}
|
q4502
|
Get
|
train
|
func (c *Client) Get(tags []string) ([]params.AnnotationsGetResult, error) {
annotations := params.AnnotationsGetResults{}
if err := c.facade.FacadeCall("Get", entitiesFromTags(tags), &annotations); err != nil {
return annotations.Results, errors.Trace(err)
}
return annotations.Results, nil
}
|
go
|
{
"resource": ""
}
|
q4503
|
Set
|
train
|
func (c *Client) Set(annotations map[string]map[string]string) ([]params.ErrorResult, error) {
args := params.AnnotationsSet{entitiesAnnotations(annotations)}
results := new(params.ErrorResults)
if err := c.facade.FacadeCall("Set", args, results); err != nil {
return nil, errors.Trace(err)
}
return results.Results, nil
}
|
go
|
{
"resource": ""
}
|
q4504
|
formatFilesystemListTabular
|
train
|
func formatFilesystemListTabular(writer io.Writer, infos map[string]FilesystemInfo) error {
tw := output.TabWriter(writer)
print := func(values ...string) {
fmt.Fprintln(tw, strings.Join(values, "\t"))
}
haveMachines := false
filesystemAttachmentInfos := make(filesystemAttachmentInfos, 0, len(infos))
for filesystemId, info := range infos {
filesystemAttachmentInfo := filesystemAttachmentInfo{
FilesystemId: filesystemId,
FilesystemInfo: info,
}
if info.Attachments == nil {
filesystemAttachmentInfos = append(filesystemAttachmentInfos, filesystemAttachmentInfo)
continue
}
// Each unit attachment must have a corresponding filesystem
// attachment. Enumerate each of the filesystem attachments,
// and locate the corresponding unit attachment if any.
// Each filesystem attachment has at most one corresponding
// unit attachment.
for machineId, machineInfo := range info.Attachments.Machines {
filesystemAttachmentInfo := filesystemAttachmentInfo
filesystemAttachmentInfo.MachineId = machineId
filesystemAttachmentInfo.FilesystemAttachment = machineInfo
for unitId, unitInfo := range info.Attachments.Units {
if unitInfo.MachineId == machineId {
filesystemAttachmentInfo.UnitId = unitId
filesystemAttachmentInfo.UnitStorageAttachment = unitInfo
break
}
}
haveMachines = true
filesystemAttachmentInfos = append(filesystemAttachmentInfos, filesystemAttachmentInfo)
}
for hostId, containerInfo := range info.Attachments.Containers {
filesystemAttachmentInfo := filesystemAttachmentInfo
filesystemAttachmentInfo.FilesystemAttachment = containerInfo
for unitId, unitInfo := range info.Attachments.Units {
if hostId == unitId {
filesystemAttachmentInfo.UnitId = unitId
filesystemAttachmentInfo.UnitStorageAttachment = unitInfo
break
}
}
filesystemAttachmentInfos = append(filesystemAttachmentInfos, filesystemAttachmentInfo)
}
}
sort.Sort(filesystemAttachmentInfos)
if haveMachines {
print("Machine", "Unit", "Storage id", "Id", "Volume", "Provider id", "Mountpoint", "Size", "State", "Message")
} else {
print("Unit", "Storage id", "Id", "Provider id", "Mountpoint", "Size", "State", "Message")
}
for _, info := range filesystemAttachmentInfos {
var size string
if info.Size > 0 {
size = humanize.IBytes(info.Size * humanize.MiByte)
}
if haveMachines {
print(
info.MachineId, info.UnitId, info.Storage,
info.FilesystemId, info.Volume, info.ProviderFilesystemId,
info.MountPoint, size,
string(info.Status.Current), info.Status.Message,
)
} else {
print(
info.UnitId, info.Storage,
info.FilesystemId, info.ProviderFilesystemId,
info.MountPoint, size,
string(info.Status.Current), info.Status.Message,
)
}
}
return tw.Flush()
}
|
go
|
{
"resource": ""
}
|
q4505
|
PerformUpgrade
|
train
|
func PerformUpgrade(from version.Number, targets []Target, context Context) error {
if hasStateTarget(targets) {
ops := newStateUpgradeOpsIterator(from)
if err := runUpgradeSteps(ops, targets, context.StateContext()); err != nil {
return err
}
}
ops := newUpgradeOpsIterator(from)
if err := runUpgradeSteps(ops, targets, context.APIContext()); err != nil {
return err
}
logger.Infof("All upgrade steps completed successfully")
return nil
}
|
go
|
{
"resource": ""
}
|
q4506
|
runUpgradeSteps
|
train
|
func runUpgradeSteps(ops *opsIterator, targets []Target, context Context) error {
for ops.Next() {
for _, step := range ops.Get().Steps() {
if targetsMatch(targets, step.Targets()) {
logger.Infof("running upgrade step: %v", step.Description())
if err := step.Run(context); err != nil {
logger.Errorf("upgrade step %q failed: %v", step.Description(), err)
return &upgradeError{
description: step.Description(),
err: err,
}
}
}
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4507
|
targetsMatch
|
train
|
func targetsMatch(machineTargets []Target, stepTargets []Target) bool {
for _, machineTarget := range machineTargets {
for _, stepTarget := range stepTargets {
if machineTarget == stepTarget || stepTarget == AllMachines {
return true
}
}
}
return false
}
|
go
|
{
"resource": ""
}
|
q4508
|
NewServer
|
train
|
func NewServer(cfg ServerConfig) (*Server, error) {
if cfg.LogSinkConfig == nil {
logSinkConfig := DefaultLogSinkConfig()
cfg.LogSinkConfig = &logSinkConfig
}
if err := cfg.Validate(); err != nil {
return nil, errors.Trace(err)
}
// Important note:
// Do not manipulate the state within NewServer as the API
// server needs to run before mongo upgrades have happened and
// any state manipulation may be be relying on features of the
// database added by upgrades. Here be dragons.
return newServer(cfg)
}
|
go
|
{
"resource": ""
}
|
q4509
|
Stop
|
train
|
func (srv *Server) Stop() error {
srv.tomb.Kill(nil)
return srv.tomb.Wait()
}
|
go
|
{
"resource": ""
}
|
q4510
|
loop
|
train
|
func (srv *Server) loop(ready chan struct{}) error {
// for pat based handlers, they are matched in-order of being
// registered, first match wins. So more specific ones have to be
// registered first.
for _, ep := range srv.endpoints() {
srv.mux.AddHandler(ep.Method, ep.Pattern, ep.Handler)
defer srv.mux.RemoveHandler(ep.Method, ep.Pattern)
if ep.Method == "GET" {
srv.mux.AddHandler("HEAD", ep.Pattern, ep.Handler)
defer srv.mux.RemoveHandler("HEAD", ep.Pattern)
}
}
close(ready)
<-srv.tomb.Dying()
srv.wg.Wait() // wait for any outstanding requests to complete.
return tomb.ErrDying
}
|
go
|
{
"resource": ""
}
|
q4511
|
publicDNSName
|
train
|
func (srv *Server) publicDNSName() string {
srv.mu.Lock()
defer srv.mu.Unlock()
return srv.publicDNSName_
}
|
go
|
{
"resource": ""
}
|
q4512
|
StartInstance
|
train
|
func (env *environ) StartInstance(ctx context.ProviderCallContext, args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
logger.Infof("sigmaEnviron.StartInstance...")
if args.InstanceConfig == nil {
return nil, errors.New("instance configuration is nil")
}
if len(args.Tools) == 0 {
return nil, errors.New("agent binaries not found")
}
img, err := findInstanceImage(args.ImageMetadata)
if err != nil {
return nil, err
}
tools, err := args.Tools.Match(tools.Filter{Arch: img.Arch})
if err != nil {
return nil, errors.Errorf("chosen architecture %v not present in %v", img.Arch, args.Tools.Arches())
}
if err := args.InstanceConfig.SetTools(tools); err != nil {
return nil, errors.Trace(err)
}
if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.Config()); err != nil {
return nil, err
}
userData, err := providerinit.ComposeUserData(args.InstanceConfig, nil, CloudSigmaRenderer{})
if err != nil {
return nil, errors.Annotate(err, "cannot make user data")
}
logger.Debugf("cloudsigma user data; %d bytes", len(userData))
client := env.client
cfg := env.Config()
server, rootdrive, arch, err := client.newInstance(args, img, userData, cfg.AuthorizedKeys())
if err != nil {
return nil, errors.Errorf("failed start instance: %v", err)
}
inst := &sigmaInstance{server: server}
// prepare hardware characteristics
hwch, err := inst.hardware(arch, rootdrive.Size())
if err != nil {
return nil, err
}
logger.Debugf("hardware: %v", hwch)
return &environs.StartInstanceResult{
Instance: inst,
Hardware: hwch,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4513
|
AllInstances
|
train
|
func (env *environ) AllInstances(ctx context.ProviderCallContext) ([]instances.Instance, error) {
// Please note that this must *not* return instances that have not been
// allocated as part of this environment -- if it does, juju will see they
// are not tracked in state, assume they're stale/rogue, and shut them down.
logger.Tracef("environ.AllInstances...")
servers, err := env.client.instances()
if err != nil {
logger.Tracef("environ.AllInstances failed: %v", err)
return nil, err
}
instances := make([]instances.Instance, 0, len(servers))
for _, server := range servers {
instance := sigmaInstance{server: server}
instances = append(instances, instance)
}
if logger.LogLevel() <= loggo.TRACE {
logger.Tracef("All instances, len = %d:", len(instances))
for _, instance := range instances {
logger.Tracef("... id: %q, status: %q", instance.Id(), instance.Status(ctx))
}
}
return instances, nil
}
|
go
|
{
"resource": ""
}
|
q4514
|
Instances
|
train
|
func (env *environ) Instances(ctx context.ProviderCallContext, ids []instance.Id) ([]instances.Instance, error) {
logger.Tracef("environ.Instances %#v", ids)
// Please note that this must *not* return instances that have not been
// allocated as part of this environment -- if it does, juju will see they
// are not tracked in state, assume they're stale/rogue, and shut them down.
// This advice applies even if an instance id passed in corresponds to a
// real instance that's not part of the environment -- the Environ should
// treat that no differently to a request for one that does not exist.
m, err := env.client.instanceMap()
if err != nil {
return nil, errors.Annotate(err, "environ.Instances failed")
}
var found int
r := make([]instances.Instance, len(ids))
for i, id := range ids {
if s, ok := m[string(id)]; ok {
r[i] = sigmaInstance{server: s}
found++
}
}
if found == 0 {
err = environs.ErrNoInstances
} else if found != len(ids) {
err = environs.ErrPartialInstances
}
return r, errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4515
|
getAddresses
|
train
|
func getAddresses(ctx context.ProviderCallContext, instances []instances.Instance) []network.Address {
var allAddrs []network.Address
for _, inst := range instances {
if inst == nil {
continue
}
addrs, err := inst.Addresses(ctx)
if err != nil {
logger.Debugf(
"failed to get addresses for %v: %v (ignoring)",
inst.Id(), err,
)
continue
}
allAddrs = append(allAddrs, addrs...)
}
return allAddrs
}
|
go
|
{
"resource": ""
}
|
q4516
|
waitAnyInstanceAddresses
|
train
|
func waitAnyInstanceAddresses(
env Environ,
ctx context.ProviderCallContext,
instanceIds []instance.Id,
) ([]network.Address, error) {
var addrs []network.Address
for a := AddressesRefreshAttempt.Start(); len(addrs) == 0 && a.Next(); {
instances, err := env.Instances(ctx, instanceIds)
if err != nil && err != ErrPartialInstances {
logger.Debugf("error getting state instances: %v", err)
return nil, err
}
addrs = getAddresses(ctx, instances)
}
if len(addrs) == 0 {
return nil, errors.NotFoundf("addresses for %v", instanceIds)
}
return addrs, nil
}
|
go
|
{
"resource": ""
}
|
q4517
|
APIInfo
|
train
|
func APIInfo(ctx context.ProviderCallContext, controllerUUID, modelUUID, caCert string, apiPort int, env Environ) (*api.Info, error) {
instanceIds, err := env.ControllerInstances(ctx, controllerUUID)
if err != nil {
return nil, err
}
logger.Debugf("ControllerInstances returned: %v", instanceIds)
addrs, err := waitAnyInstanceAddresses(env, ctx, instanceIds)
if err != nil {
return nil, err
}
apiAddrs := network.HostPortsToStrings(
network.AddressesWithPort(addrs, apiPort),
)
modelTag := names.NewModelTag(modelUUID)
apiInfo := &api.Info{Addrs: apiAddrs, CACert: caCert, ModelTag: modelTag}
return apiInfo, nil
}
|
go
|
{
"resource": ""
}
|
q4518
|
CheckProviderAPI
|
train
|
func CheckProviderAPI(env InstanceBroker, ctx context.ProviderCallContext) error {
// We will make a simple API call to the provider
// to ensure the underlying substrate is ok.
_, err := env.AllInstances(ctx)
switch err {
case nil, ErrPartialInstances, ErrNoInstances:
return nil
}
return errors.Annotate(err, "cannot make API call to provider")
}
|
go
|
{
"resource": ""
}
|
q4519
|
NewFacade
|
train
|
func NewFacade(
resources facade.Resources,
authorizer facade.Authorizer,
st CAASOperatorState,
) (*Facade, error) {
if !authorizer.AuthApplicationAgent() {
return nil, common.ErrPerm
}
model, err := st.Model()
if err != nil {
return nil, errors.Trace(err)
}
canRead := common.AuthAny(
common.AuthFuncForTagKind(names.ApplicationTagKind),
common.AuthFuncForTagKind(names.UnitTagKind),
)
accessUnit := func() (common.AuthFunc, error) {
switch tag := authorizer.GetAuthTag().(type) {
case names.ApplicationTag:
// Any of the units belonging to
// the application can be accessed.
app, err := st.Application(tag.Name)
if err != nil {
return nil, errors.Trace(err)
}
allUnits, err := app.AllUnits()
if err != nil {
return nil, errors.Trace(err)
}
return func(tag names.Tag) bool {
for _, u := range allUnits {
if u.Tag() == tag {
return true
}
}
return false
}, nil
default:
return nil, errors.Errorf("expected names.ApplicationTag, got %T", tag)
}
}
return &Facade{
LifeGetter: common.NewLifeGetter(st, canRead),
APIAddresser: common.NewAPIAddresser(st, resources),
AgentEntityWatcher: common.NewAgentEntityWatcher(st, resources, canRead),
Remover: common.NewRemover(st, true, accessUnit),
ToolsSetter: common.NewToolsSetter(st, common.AuthFuncForTag(authorizer.GetAuthTag())),
auth: authorizer,
resources: resources,
state: st,
model: model,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4520
|
Charm
|
train
|
func (f *Facade) Charm(args params.Entities) (params.ApplicationCharmResults, error) {
results := params.ApplicationCharmResults{
Results: make([]params.ApplicationCharmResult, len(args.Entities)),
}
authTag := f.auth.GetAuthTag()
for i, entity := range args.Entities {
tag, err := names.ParseApplicationTag(entity.Tag)
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
if tag != authTag {
results.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
application, err := f.state.Application(tag.Id())
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
charm, force, err := application.Charm()
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
results.Results[i].Result = ¶ms.ApplicationCharm{
URL: charm.URL().String(),
ForceUpgrade: force,
SHA256: charm.BundleSha256(),
CharmModifiedVersion: application.CharmModifiedVersion(),
}
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4521
|
SetPodSpec
|
train
|
func (f *Facade) SetPodSpec(args params.SetPodSpecParams) (params.ErrorResults, error) {
results := params.ErrorResults{
Results: make([]params.ErrorResult, len(args.Specs)),
}
cfg, err := f.model.ModelConfig()
if err != nil {
return params.ErrorResults{}, errors.Trace(err)
}
provider, err := environs.Provider(cfg.Type())
if err != nil {
return params.ErrorResults{}, errors.Trace(err)
}
caasProvider, ok := provider.(caas.ContainerEnvironProvider)
if !ok {
return params.ErrorResults{}, errors.NotValidf("container environ provider %T", provider)
}
for i, arg := range args.Specs {
tag, err := names.ParseApplicationTag(arg.Tag)
if err != nil {
results.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
if !f.auth.AuthOwner(tag) {
results.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
if _, err := caasProvider.ParsePodSpec(arg.Value); err != nil {
results.Results[i].Error = common.ServerError(errors.New("invalid pod spec"))
continue
}
results.Results[i].Error = common.ServerError(
f.model.SetPodSpec(tag, arg.Value),
)
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4522
|
WatchUnits
|
train
|
func (f *Facade) WatchUnits(args params.Entities) (params.StringsWatchResults, error) {
results := params.StringsWatchResults{
Results: make([]params.StringsWatchResult, len(args.Entities)),
}
for i, arg := range args.Entities {
id, changes, err := f.watchUnits(arg.Tag)
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
results.Results[i].StringsWatcherId = id
results.Results[i].Changes = changes
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4523
|
NewConnFacade
|
train
|
func NewConnFacade(caller base.APICaller) (ConnFacade, error) {
facadeCaller := base.NewFacadeCaller(caller, "Agent")
return &connFacade{
caller: facadeCaller,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4524
|
Life
|
train
|
func (facade *connFacade) Life(entity names.Tag) (Life, error) {
var results params.AgentGetEntitiesResults
args := params.Entities{
Entities: []params.Entity{{Tag: entity.String()}},
}
err := facade.caller.FacadeCall("GetEntities", args, &results)
if err != nil {
return "", errors.Trace(err)
}
if len(results.Entities) != 1 {
return "", errors.Errorf("expected 1 result, got %d", len(results.Entities))
}
if err := results.Entities[0].Error; err != nil {
if params.IsCodeNotFoundOrCodeUnauthorized(err) {
return "", ErrDenied
}
return "", errors.Trace(err)
}
life := Life(results.Entities[0].Life)
switch life {
case Alive, Dying, Dead:
return life, nil
}
return "", errors.Errorf("unknown life value %q", life)
}
|
go
|
{
"resource": ""
}
|
q4525
|
SetPassword
|
train
|
func (facade *connFacade) SetPassword(entity names.Tag, password string) error {
var results params.ErrorResults
args := params.EntityPasswords{
Changes: []params.EntityPassword{{
Tag: entity.String(),
Password: password,
}},
}
err := facade.caller.FacadeCall("SetPasswords", args, &results)
if err != nil {
return errors.Trace(err)
}
if len(results.Results) != 1 {
return errors.Errorf("expected 1 result, got %d", len(results.Results))
}
if err := results.Results[0].Error; err != nil {
if params.IsCodeDead(err) {
return ErrDenied
} else if params.IsCodeNotFoundOrCodeUnauthorized(err) {
return ErrDenied
}
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4526
|
NewMockCollection
|
train
|
func NewMockCollection(ctrl *gomock.Controller) *MockCollection {
mock := &MockCollection{ctrl: ctrl}
mock.recorder = &MockCollectionMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q4527
|
Count
|
train
|
func (m *MockCollection) Count() (int, error) {
ret := m.ctrl.Call(m, "Count")
ret0, _ := ret[0].(int)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q4528
|
Pipe
|
train
|
func (m *MockCollection) Pipe(arg0 interface{}) *mgo_v2.Pipe {
ret := m.ctrl.Call(m, "Pipe", arg0)
ret0, _ := ret[0].(*mgo_v2.Pipe)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4529
|
Pipe
|
train
|
func (mr *MockCollectionMockRecorder) Pipe(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pipe", reflect.TypeOf((*MockCollection)(nil).Pipe), arg0)
}
|
go
|
{
"resource": ""
}
|
q4530
|
Writeable
|
train
|
func (m *MockCollection) Writeable() mongo.WriteCollection {
ret := m.ctrl.Call(m, "Writeable")
ret0, _ := ret[0].(mongo.WriteCollection)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4531
|
NewMockQuery
|
train
|
func NewMockQuery(ctrl *gomock.Controller) *MockQuery {
mock := &MockQuery{ctrl: ctrl}
mock.recorder = &MockQueryMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q4532
|
Comment
|
train
|
func (m *MockQuery) Comment(arg0 string) mongo.Query {
ret := m.ctrl.Call(m, "Comment", arg0)
ret0, _ := ret[0].(mongo.Query)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4533
|
Distinct
|
train
|
func (m *MockQuery) Distinct(arg0 string, arg1 interface{}) error {
ret := m.ctrl.Call(m, "Distinct", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4534
|
Iter
|
train
|
func (m *MockQuery) Iter() mongo.Iterator {
ret := m.ctrl.Call(m, "Iter")
ret0, _ := ret[0].(mongo.Iterator)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4535
|
Iter
|
train
|
func (mr *MockQueryMockRecorder) Iter() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iter", reflect.TypeOf((*MockQuery)(nil).Iter))
}
|
go
|
{
"resource": ""
}
|
q4536
|
Limit
|
train
|
func (m *MockQuery) Limit(arg0 int) mongo.Query {
ret := m.ctrl.Call(m, "Limit", arg0)
ret0, _ := ret[0].(mongo.Query)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4537
|
LogReplay
|
train
|
func (m *MockQuery) LogReplay() mongo.Query {
ret := m.ctrl.Call(m, "LogReplay")
ret0, _ := ret[0].(mongo.Query)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4538
|
MapReduce
|
train
|
func (m *MockQuery) MapReduce(arg0 *mgo_v2.MapReduce, arg1 interface{}) (*mgo_v2.MapReduceInfo, error) {
ret := m.ctrl.Call(m, "MapReduce", arg0, arg1)
ret0, _ := ret[0].(*mgo_v2.MapReduceInfo)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q4539
|
One
|
train
|
func (m *MockQuery) One(arg0 interface{}) error {
ret := m.ctrl.Call(m, "One", arg0)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4540
|
Prefetch
|
train
|
func (m *MockQuery) Prefetch(arg0 float64) mongo.Query {
ret := m.ctrl.Call(m, "Prefetch", arg0)
ret0, _ := ret[0].(mongo.Query)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4541
|
Select
|
train
|
func (m *MockQuery) Select(arg0 interface{}) mongo.Query {
ret := m.ctrl.Call(m, "Select", arg0)
ret0, _ := ret[0].(mongo.Query)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4542
|
SetMaxTime
|
train
|
func (m *MockQuery) SetMaxTime(arg0 time.Duration) mongo.Query {
ret := m.ctrl.Call(m, "SetMaxTime", arg0)
ret0, _ := ret[0].(mongo.Query)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4543
|
SetMaxTime
|
train
|
func (mr *MockQueryMockRecorder) SetMaxTime(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMaxTime", reflect.TypeOf((*MockQuery)(nil).SetMaxTime), arg0)
}
|
go
|
{
"resource": ""
}
|
q4544
|
Sort
|
train
|
func (m *MockQuery) Sort(arg0 ...string) mongo.Query {
varargs := []interface{}{}
for _, a := range arg0 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Sort", varargs...)
ret0, _ := ret[0].(mongo.Query)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4545
|
Tail
|
train
|
func (m *MockQuery) Tail(arg0 time.Duration) *mgo_v2.Iter {
ret := m.ctrl.Call(m, "Tail", arg0)
ret0, _ := ret[0].(*mgo_v2.Iter)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4546
|
NewAPIBase
|
train
|
func NewAPIBase(backend Backend, resources facade.Resources, authorizer facade.Authorizer) (*APIBase, error) {
if !(authorizer.AuthMachineAgent() || authorizer.AuthUnitAgent()) {
return nil, common.ErrPerm
}
return &APIBase{
backend: backend,
resources: resources,
authorizer: authorizer,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4547
|
WatchForProxyConfigAndAPIHostPortChanges
|
train
|
func (api *APIBase) WatchForProxyConfigAndAPIHostPortChanges(args params.Entities) params.NotifyWatchResults {
results := params.NotifyWatchResults{
Results: make([]params.NotifyWatchResult, len(args.Entities)),
}
errors, _ := api.authEntities(args)
for i := range args.Entities {
if errors.Results[i].Error == nil {
results.Results[i] = api.oneWatch()
} else {
results.Results[i].Error = errors.Results[i].Error
}
}
return results
}
|
go
|
{
"resource": ""
}
|
q4548
|
CreateVolumes
|
train
|
func (lvs *loopVolumeSource) CreateVolumes(ctx context.ProviderCallContext, args []storage.VolumeParams) ([]storage.CreateVolumesResult, error) {
results := make([]storage.CreateVolumesResult, len(args))
for i, arg := range args {
volume, err := lvs.createVolume(arg)
if err != nil {
results[i].Error = errors.Annotate(err, "creating volume")
}
results[i].Volume = &volume
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4549
|
ListVolumes
|
train
|
func (lvs *loopVolumeSource) ListVolumes(ctx context.ProviderCallContext) ([]string, error) {
// TODO(axw) implement this when we need it.
return nil, errors.NotImplementedf("ListVolumes")
}
|
go
|
{
"resource": ""
}
|
q4550
|
DescribeVolumes
|
train
|
func (lvs *loopVolumeSource) DescribeVolumes(ctx context.ProviderCallContext, volumeIds []string) ([]storage.DescribeVolumesResult, error) {
// TODO(axw) implement this when we need it.
return nil, errors.NotImplementedf("DescribeVolumes")
}
|
go
|
{
"resource": ""
}
|
q4551
|
DestroyVolumes
|
train
|
func (lvs *loopVolumeSource) DestroyVolumes(ctx context.ProviderCallContext, volumeIds []string) ([]error, error) {
results := make([]error, len(volumeIds))
for i, volumeId := range volumeIds {
if err := lvs.destroyVolume(volumeId); err != nil {
results[i] = errors.Annotatef(err, "destroying %q", volumeId)
}
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4552
|
ReleaseVolumes
|
train
|
func (lvs *loopVolumeSource) ReleaseVolumes(ctx context.ProviderCallContext, volumeIds []string) ([]error, error) {
return make([]error, len(volumeIds)), nil
}
|
go
|
{
"resource": ""
}
|
q4553
|
AttachVolumes
|
train
|
func (lvs *loopVolumeSource) AttachVolumes(ctx context.ProviderCallContext, args []storage.VolumeAttachmentParams) ([]storage.AttachVolumesResult, error) {
results := make([]storage.AttachVolumesResult, len(args))
for i, arg := range args {
attachment, err := lvs.attachVolume(arg)
if err != nil {
results[i].Error = errors.Annotatef(err, "attaching volume %v", arg.Volume.Id())
continue
}
results[i].VolumeAttachment = attachment
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4554
|
DetachVolumes
|
train
|
func (lvs *loopVolumeSource) DetachVolumes(ctx context.ProviderCallContext, args []storage.VolumeAttachmentParams) ([]error, error) {
results := make([]error, len(args))
for i, arg := range args {
if err := lvs.detachVolume(arg.Volume); err != nil {
results[i] = errors.Annotatef(err, "detaching volume %s", arg.Volume.Id())
}
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4555
|
createBlockFile
|
train
|
func createBlockFile(run runCommandFunc, filePath string, sizeInMiB uint64) error {
// fallocate will reserve the space without actually writing to it.
_, err := run("fallocate", "-l", fmt.Sprintf("%dMiB", sizeInMiB), filePath)
if err != nil {
return errors.Annotatef(err, "allocating loop backing file %q", filePath)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4556
|
detachLoopDevice
|
train
|
func detachLoopDevice(run runCommandFunc, deviceName string) error {
_, err := run("losetup", "-d", path.Join("/dev", deviceName))
if err != nil {
return errors.Annotatef(err, "detaching loop device %q", deviceName)
}
return err
}
|
go
|
{
"resource": ""
}
|
q4557
|
associatedLoopDevices
|
train
|
func associatedLoopDevices(run runCommandFunc, filePath string) ([]string, error) {
stdout, err := run("losetup", "-j", filePath)
if err != nil {
return nil, errors.Trace(err)
}
stdout = strings.TrimSpace(stdout)
if stdout == "" {
return nil, nil
}
// The output will be zero or more lines with the format:
// "/dev/loop0: [0021]:7504142 (/tmp/test.dat)"
lines := strings.Split(stdout, "\n")
deviceNames := make([]string, len(lines))
for i, line := range lines {
pos := strings.IndexRune(line, ':')
if pos == -1 {
return nil, errors.Errorf("unexpected output %q", line)
}
deviceName := line[:pos][len("/dev/"):]
deviceNames[i] = deviceName
}
return deviceNames, nil
}
|
go
|
{
"resource": ""
}
|
q4558
|
NewRegisterCommand
|
train
|
func NewRegisterCommand() cmd.Command {
c := ®isterCommand{}
c.apiOpen = c.APIOpen
c.listModelsFunc = c.listModels
c.store = jujuclient.NewFileClientStore()
c.CanClearCurrentModel = true
return modelcmd.WrapBase(c)
}
|
go
|
{
"resource": ""
}
|
q4559
|
Info
|
train
|
func (c *registerCommand) Info() *cmd.Info {
return jujucmd.Info(&cmd.Info{
Name: "register",
Args: "<registration string>|<controller host name>",
Purpose: usageRegisterSummary,
Doc: usageRegisterDetails,
})
}
|
go
|
{
"resource": ""
}
|
q4560
|
Init
|
train
|
func (c *registerCommand) Init(args []string) error {
if len(args) < 1 {
return errors.New("registration data missing")
}
c.Arg, args = args[0], args[1:]
if err := cmd.CheckEmpty(args); err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4561
|
controllerDetails
|
train
|
func (c *registerCommand) controllerDetails(ctx *cmd.Context, p *registrationParams, controllerName string) (jujuclient.ControllerDetails, jujuclient.AccountDetails, error) {
if p.publicHost != "" {
return c.publicControllerDetails(p.publicHost, controllerName)
}
return c.nonPublicControllerDetails(ctx, p, controllerName)
}
|
go
|
{
"resource": ""
}
|
q4562
|
publicControllerDetails
|
train
|
func (c *registerCommand) publicControllerDetails(host, controllerName string) (jujuclient.ControllerDetails, jujuclient.AccountDetails, error) {
errRet := func(err error) (jujuclient.ControllerDetails, jujuclient.AccountDetails, error) {
return jujuclient.ControllerDetails{}, jujuclient.AccountDetails{}, err
}
apiAddr := host
if !strings.Contains(apiAddr, ":") {
apiAddr += ":443"
}
// Make a direct API connection because we don't yet know the
// controller UUID so can't store the thus-incomplete controller
// details to make a conventional connection.
//
// Unfortunately this means we'll connect twice to the controller
// but it's probably best to go through the conventional path the
// second time.
bclient, err := c.BakeryClient(c.store, controllerName)
if err != nil {
return errRet(errors.Trace(err))
}
dialOpts := api.DefaultDialOpts()
dialOpts.BakeryClient = bclient
conn, err := c.apiOpen(&api.Info{
Addrs: []string{apiAddr},
}, dialOpts)
if err != nil {
return errRet(errors.Trace(err))
}
defer conn.Close()
user, ok := conn.AuthTag().(names.UserTag)
if !ok {
return errRet(errors.Errorf("logged in as %v, not a user", conn.AuthTag()))
}
// If we get to here, then we have a cached macaroon for the registered
// user. If we encounter an error after here, we need to clear it.
c.onRunError = func() {
if err := c.ClearControllerMacaroons(c.store, controllerName); err != nil {
logger.Errorf("failed to clear macaroon: %v", err)
}
}
return jujuclient.ControllerDetails{
APIEndpoints: []string{apiAddr},
ControllerUUID: conn.ControllerTag().Id(),
}, jujuclient.AccountDetails{
User: user.Id(),
LastKnownAccess: conn.ControllerAccess(),
}, nil
}
|
go
|
{
"resource": ""
}
|
q4563
|
nonPublicControllerDetails
|
train
|
func (c *registerCommand) nonPublicControllerDetails(ctx *cmd.Context, registrationParams *registrationParams, controllerName string) (jujuclient.ControllerDetails, jujuclient.AccountDetails, error) {
errRet := func(err error) (jujuclient.ControllerDetails, jujuclient.AccountDetails, error) {
return jujuclient.ControllerDetails{}, jujuclient.AccountDetails{}, err
}
// During registration we must set a new password. This has to be done
// atomically with the clearing of the secret key.
payloadBytes, err := json.Marshal(params.SecretKeyLoginRequestPayload{
registrationParams.newPassword,
})
if err != nil {
return errRet(errors.Trace(err))
}
// Make the registration call. If this is successful, the client's
// cookie jar will be populated with a macaroon that may be used
// to log in below without the user having to type in the password
// again.
req := params.SecretKeyLoginRequest{
Nonce: registrationParams.nonce[:],
User: registrationParams.userTag.String(),
PayloadCiphertext: secretbox.Seal(
nil, payloadBytes,
®istrationParams.nonce,
®istrationParams.key,
),
}
resp, err := c.secretKeyLogin(registrationParams.controllerAddrs, req, controllerName)
if err != nil {
// If we got here and got an error, the registration token supplied
// will be expired.
// Log the error as it will be useful for debugging, but give user a
// suggestion for the way forward instead of error details.
logger.Infof("while validating secret key: %v", err)
err = errors.Errorf("Provided registration token may have been expired.\nA controller administrator must reset your user to issue a new token.\nSee %q for more information.", "juju help change-user-password")
return errRet(errors.Trace(err))
}
// Decrypt the response to authenticate the controller and
// obtain its CA certificate.
if len(resp.Nonce) != len(registrationParams.nonce) {
return errRet(errors.NotValidf("response nonce"))
}
var respNonce [24]byte
copy(respNonce[:], resp.Nonce)
payloadBytes, ok := secretbox.Open(nil, resp.PayloadCiphertext, &respNonce, ®istrationParams.key)
if !ok {
return errRet(errors.NotValidf("response payload"))
}
var responsePayload params.SecretKeyLoginResponsePayload
if err := json.Unmarshal(payloadBytes, &responsePayload); err != nil {
return errRet(errors.Annotate(err, "unmarshalling response payload"))
}
user := registrationParams.userTag.Id()
ctx.Infof("Initial password successfully set for %s.", friendlyUserName(user))
// If we get to here, then we have a cached macaroon for the registered
// user. If we encounter an error after here, we need to clear it.
c.onRunError = func() {
if err := c.ClearControllerMacaroons(c.store, controllerName); err != nil {
logger.Errorf("failed to clear macaroon: %v", err)
}
}
return jujuclient.ControllerDetails{
APIEndpoints: registrationParams.controllerAddrs,
ControllerUUID: responsePayload.ControllerUUID,
CACert: responsePayload.CACert,
}, jujuclient.AccountDetails{
User: user,
LastKnownAccess: string(permission.LoginAccess),
}, nil
}
|
go
|
{
"resource": ""
}
|
q4564
|
updateController
|
train
|
func (c *registerCommand) updateController(
ctx *cmd.Context,
store jujuclient.ClientStore,
controllerName string,
controllerDetails jujuclient.ControllerDetails,
accountDetails jujuclient.AccountDetails,
) error {
// Check that the same controller isn't already stored, so that we
// can avoid needlessly asking for a controller name in that case.
all, err := store.AllControllers()
if err != nil {
return errors.Trace(err)
}
for name, ctl := range all {
if ctl.ControllerUUID == controllerDetails.ControllerUUID {
var buf bytes.Buffer
if err := alreadyRegisteredMessageT.Execute(
&buf,
map[string]interface{}{
"ControllerName": name,
"UserName": accountDetails.User,
},
); err != nil {
return err
}
ctx.Warningf(buf.String())
return errors.Errorf("controller is already registered as %q", name)
}
}
if err := store.AddController(controllerName, controllerDetails); err != nil {
return errors.Trace(err)
}
if err := store.UpdateAccount(controllerName, accountDetails); err != nil {
return errors.Annotatef(err, "cannot update account information: %v", err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4565
|
getParameters
|
train
|
func (c *registerCommand) getParameters(ctx *cmd.Context) (*registrationParams, error) {
var params registrationParams
if strings.Contains(c.Arg, ".") || c.Arg == "localhost" {
// Looks like a host name - no URL-encoded base64 string should
// contain a dot and every public controller name should.
// Allow localhost for development purposes.
params.publicHost = c.Arg
// No need for password shenanigans if we're using a public controller.
return ¶ms, nil
}
// Decode key, username, controller addresses from the string supplied
// on the command line.
decodedData, err := base64.URLEncoding.DecodeString(c.Arg)
if err != nil {
return nil, errors.Trace(err)
}
var info jujuclient.RegistrationInfo
if _, err := asn1.Unmarshal(decodedData, &info); err != nil {
return nil, errors.Trace(err)
}
params.controllerAddrs = info.Addrs
params.userTag = names.NewUserTag(info.User)
if len(info.SecretKey) != len(params.key) {
return nil, errors.NotValidf("secret key")
}
copy(params.key[:], info.SecretKey)
params.defaultControllerName = info.ControllerName
// Prompt the user for the new password to set.
newPassword, err := c.promptNewPassword(ctx.Stderr, ctx.Stdin)
if err != nil {
return nil, errors.Trace(err)
}
params.newPassword = newPassword
// Generate a random nonce for encrypting the request.
if _, err := rand.Read(params.nonce[:]); err != nil {
return nil, errors.Trace(err)
}
return ¶ms, nil
}
|
go
|
{
"resource": ""
}
|
q4566
|
NewProvisionerAPIV4
|
train
|
func NewProvisionerAPIV4(st *state.State, resources facade.Resources, authorizer facade.Authorizer) (*ProvisionerAPIV4, error) {
provisionerAPI, err := NewProvisionerAPIV5(st, resources, authorizer)
if err != nil {
return nil, errors.Trace(err)
}
return &ProvisionerAPIV4{provisionerAPI}, nil
}
|
go
|
{
"resource": ""
}
|
q4567
|
NewProvisionerAPIV5
|
train
|
func NewProvisionerAPIV5(st *state.State, resources facade.Resources, authorizer facade.Authorizer) (*ProvisionerAPIV5, error) {
provisionerAPI, err := NewProvisionerAPIV6(st, resources, authorizer)
if err != nil {
return nil, errors.Trace(err)
}
return &ProvisionerAPIV5{provisionerAPI}, nil
}
|
go
|
{
"resource": ""
}
|
q4568
|
NewProvisionerAPIV6
|
train
|
func NewProvisionerAPIV6(st *state.State, resources facade.Resources, authorizer facade.Authorizer) (*ProvisionerAPIV6, error) {
provisionerAPI, err := NewProvisionerAPIV7(st, resources, authorizer)
if err != nil {
return nil, errors.Trace(err)
}
return &ProvisionerAPIV6{provisionerAPI}, nil
}
|
go
|
{
"resource": ""
}
|
q4569
|
NewProvisionerAPIV7
|
train
|
func NewProvisionerAPIV7(st *state.State, resources facade.Resources, authorizer facade.Authorizer) (*ProvisionerAPIV7, error) {
provisionerAPI, err := NewProvisionerAPIV8(st, resources, authorizer)
if err != nil {
return nil, errors.Trace(err)
}
return &ProvisionerAPIV7{provisionerAPI}, nil
}
|
go
|
{
"resource": ""
}
|
q4570
|
NewProvisionerAPIV8
|
train
|
func NewProvisionerAPIV8(st *state.State, resources facade.Resources, authorizer facade.Authorizer) (*ProvisionerAPIV8, error) {
provisionerAPI, err := NewProvisionerAPIV9(st, resources, authorizer)
if err != nil {
return nil, errors.Trace(err)
}
return &ProvisionerAPIV8{provisionerAPI}, nil
}
|
go
|
{
"resource": ""
}
|
q4571
|
NewProvisionerAPIV9
|
train
|
func NewProvisionerAPIV9(st *state.State, resources facade.Resources, authorizer facade.Authorizer) (*ProvisionerAPIV9, error) {
provisionerAPI, err := NewProvisionerAPI(st, resources, authorizer)
if err != nil {
return nil, errors.Trace(err)
}
return &ProvisionerAPIV9{provisionerAPI}, nil
}
|
go
|
{
"resource": ""
}
|
q4572
|
WatchContainers
|
train
|
func (p *ProvisionerAPI) WatchContainers(args params.WatchContainers) (params.StringsWatchResults, error) {
result := params.StringsWatchResults{
Results: make([]params.StringsWatchResult, len(args.Params)),
}
for i, arg := range args.Params {
watcherResult, err := p.watchOneMachineContainers(arg)
result.Results[i] = watcherResult
result.Results[i].Error = common.ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4573
|
WatchAllContainers
|
train
|
func (p *ProvisionerAPI) WatchAllContainers(args params.WatchContainers) (params.StringsWatchResults, error) {
return p.WatchContainers(args)
}
|
go
|
{
"resource": ""
}
|
q4574
|
SetSupportedContainers
|
train
|
func (p *ProvisionerAPI) SetSupportedContainers(args params.MachineContainersParams) (params.ErrorResults, error) {
result := params.ErrorResults{
Results: make([]params.ErrorResult, len(args.Params)),
}
canAccess, err := p.getAuthFunc()
if err != nil {
return result, err
}
for i, arg := range args.Params {
tag, err := names.ParseMachineTag(arg.MachineTag)
if err != nil {
logger.Warningf("SetSupportedContainers called with %q which is not a valid machine tag: %v", arg.MachineTag, err)
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
machine, err := p.getMachine(canAccess, tag)
if err != nil {
result.Results[i].Error = common.ServerError(err)
continue
}
if len(arg.ContainerTypes) == 0 {
err = machine.SupportsNoContainers()
} else {
err = machine.SetSupportedContainers(arg.ContainerTypes)
}
if err != nil {
result.Results[i].Error = common.ServerError(err)
}
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4575
|
SupportedContainers
|
train
|
func (p *ProvisionerAPI) SupportedContainers(args params.Entities) (params.MachineContainerResults, error) {
result := params.MachineContainerResults{
Results: make([]params.MachineContainerResult, len(args.Entities)),
}
canAccess, err := p.getAuthFunc()
if err != nil {
return result, err
}
for i, arg := range args.Entities {
tag, err := names.ParseMachineTag(arg.Tag)
if err != nil {
logger.Warningf("SupportedContainers called with %q which is not a valid machine tag: %v", arg.Tag, err)
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
machine, err := p.getMachine(canAccess, tag)
if err != nil {
result.Results[i].Error = common.ServerError(err)
continue
}
containerTypes, determined := machine.SupportedContainers()
result.Results[i].ContainerTypes = containerTypes
result.Results[i].Determined = determined
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4576
|
MachinesWithTransientErrors
|
train
|
func (p *ProvisionerAPI) MachinesWithTransientErrors() (params.StatusResults, error) {
var results params.StatusResults
canAccessFunc, err := p.getAuthFunc()
if err != nil {
return results, err
}
// TODO (wallyworld) - add state.State API for more efficient machines query
machines, err := p.st.AllMachines()
if err != nil {
return results, err
}
for _, machine := range machines {
if !canAccessFunc(machine.Tag()) {
continue
}
if _, provisionedErr := machine.InstanceId(); provisionedErr == nil {
// Machine may have been provisioned but machiner hasn't set the
// status to Started yet.
continue
}
var result params.StatusResult
statusInfo, err := machine.InstanceStatus()
if err != nil {
continue
}
result.Status = statusInfo.Status.String()
result.Info = statusInfo.Message
result.Data = statusInfo.Data
if statusInfo.Status != status.Error && statusInfo.Status != status.ProvisioningError {
continue
}
// Transient errors are marked as such in the status data.
if transient, ok := result.Data["transient"].(bool); !ok || !transient {
continue
}
result.Id = machine.Id()
result.Life = params.Life(machine.Life().String())
results.Results = append(results.Results, result)
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4577
|
Series
|
train
|
func (p *ProvisionerAPI) Series(args params.Entities) (params.StringResults, error) {
result := params.StringResults{
Results: make([]params.StringResult, len(args.Entities)),
}
canAccess, err := p.getAuthFunc()
if err != nil {
return result, err
}
for i, entity := range args.Entities {
tag, err := names.ParseMachineTag(entity.Tag)
if err != nil {
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
machine, err := p.getMachine(canAccess, tag)
if err == nil {
result.Results[i].Result = machine.Series()
}
result.Results[i].Error = common.ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4578
|
KeepInstance
|
train
|
func (p *ProvisionerAPI) KeepInstance(args params.Entities) (params.BoolResults, error) {
result := params.BoolResults{
Results: make([]params.BoolResult, len(args.Entities)),
}
canAccess, err := p.getAuthFunc()
if err != nil {
return result, err
}
for i, entity := range args.Entities {
tag, err := names.ParseMachineTag(entity.Tag)
if err != nil {
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
machine, err := p.getMachine(canAccess, tag)
if err == nil {
keep, err := machine.KeepInstance()
result.Results[i].Result = keep
result.Results[i].Error = common.ServerError(err)
}
result.Results[i].Error = common.ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4579
|
DistributionGroup
|
train
|
func (p *ProvisionerAPI) DistributionGroup(args params.Entities) (params.DistributionGroupResults, error) {
result := params.DistributionGroupResults{
Results: make([]params.DistributionGroupResult, len(args.Entities)),
}
canAccess, err := p.getAuthFunc()
if err != nil {
return result, err
}
for i, entity := range args.Entities {
tag, err := names.ParseMachineTag(entity.Tag)
if err != nil {
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
machine, err := p.getMachine(canAccess, tag)
if err == nil {
// If the machine is a controller, return
// controller instances. Otherwise, return
// instances with services in common with the machine
// being provisioned.
if machine.IsManager() {
result.Results[i].Result, err = controllerInstances(p.st)
} else {
result.Results[i].Result, err = commonServiceInstances(p.st, machine)
}
}
result.Results[i].Error = common.ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4580
|
controllerInstances
|
train
|
func controllerInstances(st *state.State) ([]instance.Id, error) {
info, err := st.ControllerInfo()
if err != nil {
return nil, err
}
instances := make([]instance.Id, 0, len(info.MachineIds))
for _, id := range info.MachineIds {
machine, err := st.Machine(id)
if err != nil {
return nil, err
}
instanceId, err := machine.InstanceId()
if err == nil {
instances = append(instances, instanceId)
} else if !errors.IsNotProvisioned(err) {
return nil, err
}
}
return instances, nil
}
|
go
|
{
"resource": ""
}
|
q4581
|
commonServiceInstances
|
train
|
func commonServiceInstances(st *state.State, m *state.Machine) ([]instance.Id, error) {
units, err := m.Units()
if err != nil {
return nil, err
}
instanceIdSet := make(set.Strings)
for _, unit := range units {
if !unit.IsPrincipal() {
continue
}
instanceIds, err := state.ApplicationInstances(st, unit.ApplicationName())
if err != nil {
return nil, err
}
for _, instanceId := range instanceIds {
instanceIdSet.Add(string(instanceId))
}
}
instanceIds := make([]instance.Id, instanceIdSet.Size())
// Sort values to simplify testing.
for i, instanceId := range instanceIdSet.SortedValues() {
instanceIds[i] = instance.Id(instanceId)
}
return instanceIds, nil
}
|
go
|
{
"resource": ""
}
|
q4582
|
DistributionGroupByMachineId
|
train
|
func (p *ProvisionerAPI) DistributionGroupByMachineId(args params.Entities) (params.StringsResults, error) {
result := params.StringsResults{
Results: make([]params.StringsResult, len(args.Entities)),
}
canAccess, err := p.getAuthFunc()
if err != nil {
return params.StringsResults{}, err
}
for i, entity := range args.Entities {
tag, err := names.ParseMachineTag(entity.Tag)
if err != nil {
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
machine, err := p.getMachine(canAccess, tag)
if err == nil {
// If the machine is a controller, return
// controller instances. Otherwise, return
// instances with services in common with the machine
// being provisioned.
if machine.IsManager() {
result.Results[i].Result, err = controllerMachineIds(p.st, machine)
} else {
result.Results[i].Result, err = commonApplicationMachineId(p.st, machine)
}
}
result.Results[i].Error = common.ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4583
|
controllerMachineIds
|
train
|
func controllerMachineIds(st *state.State, m *state.Machine) ([]string, error) {
info, err := st.ControllerInfo()
if err != nil {
return nil, err
}
result := set.NewStrings(info.MachineIds...)
result.Remove(m.Id())
return result.SortedValues(), nil
}
|
go
|
{
"resource": ""
}
|
q4584
|
commonApplicationMachineId
|
train
|
func commonApplicationMachineId(st *state.State, m *state.Machine) ([]string, error) {
applications := m.Principals()
var union set.Strings
for _, app := range applications {
machines, err := state.ApplicationMachines(st, app)
if err != nil {
return nil, err
}
union = union.Union(set.NewStrings(machines...))
}
union.Remove(m.Id())
return union.SortedValues(), nil
}
|
go
|
{
"resource": ""
}
|
q4585
|
Constraints
|
train
|
func (p *ProvisionerAPI) Constraints(args params.Entities) (params.ConstraintsResults, error) {
result := params.ConstraintsResults{
Results: make([]params.ConstraintsResult, len(args.Entities)),
}
canAccess, err := p.getAuthFunc()
if err != nil {
return result, err
}
for i, entity := range args.Entities {
tag, err := names.ParseMachineTag(entity.Tag)
if err != nil {
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
machine, err := p.getMachine(canAccess, tag)
if err == nil {
var cons constraints.Value
cons, err = machine.Constraints()
if err == nil {
result.Results[i].Constraints = cons
}
}
result.Results[i].Error = common.ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4586
|
SetInstanceInfo
|
train
|
func (p *ProvisionerAPI) SetInstanceInfo(args params.InstancesInfo) (params.ErrorResults, error) {
result := params.ErrorResults{
Results: make([]params.ErrorResult, len(args.Machines)),
}
canAccess, err := p.getAuthFunc()
if err != nil {
return result, err
}
setInstanceInfo := func(arg params.InstanceInfo) error {
tag, err := names.ParseMachineTag(arg.Tag)
if err != nil {
return common.ErrPerm
}
machine, err := p.getMachine(canAccess, tag)
if err != nil {
return err
}
volumes, err := storagecommon.VolumesToState(arg.Volumes)
if err != nil {
return err
}
volumeAttachments, err := storagecommon.VolumeAttachmentInfosToState(arg.VolumeAttachments)
if err != nil {
return err
}
devicesArgs, devicesAddrs := networkingcommon.NetworkConfigsToStateArgs(arg.NetworkConfig)
err = machine.SetInstanceInfo(
arg.InstanceId, arg.DisplayName, arg.Nonce, arg.Characteristics,
devicesArgs, devicesAddrs,
volumes, volumeAttachments, arg.CharmProfiles,
)
if err != nil {
return errors.Annotatef(err, "cannot record provisioning info for %q", arg.InstanceId)
}
return nil
}
for i, arg := range args.Machines {
err := setInstanceInfo(arg)
result.Results[i].Error = common.ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4587
|
WatchMachineErrorRetry
|
train
|
func (p *ProvisionerAPI) WatchMachineErrorRetry() (params.NotifyWatchResult, error) {
result := params.NotifyWatchResult{}
if !p.authorizer.AuthController() {
return result, common.ErrPerm
}
watch := newWatchMachineErrorRetry()
// Consume any initial event and forward it to the result.
if _, ok := <-watch.Changes(); ok {
result.NotifyWatcherId = p.resources.Register(watch)
} else {
return result, watcher.EnsureErr(watch)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4588
|
ReleaseContainerAddresses
|
train
|
func (p *ProvisionerAPI) ReleaseContainerAddresses(args params.Entities) (params.ErrorResults, error) {
result := params.ErrorResults{
Results: make([]params.ErrorResult, len(args.Entities)),
}
canAccess, err := p.getAuthFunc()
if err != nil {
logger.Errorf("failed to get an authorisation function: %v", err)
return result, errors.Trace(err)
}
// Loop over the passed container tags.
for i, entity := range args.Entities {
tag, err := names.ParseMachineTag(entity.Tag)
if err != nil {
logger.Warningf("failed to parse machine tag %q: %v", entity.Tag, err)
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
// The auth function (canAccess) checks that the machine is a
// top level machine (we filter those out next) or that the
// machine has the host as a parent.
guest, err := p.getMachine(canAccess, tag)
if err != nil {
logger.Warningf("failed to get machine %q: %v", tag, err)
result.Results[i].Error = common.ServerError(err)
continue
} else if !guest.IsContainer() {
err = errors.Errorf("cannot mark addresses for removal for %q: not a container", tag)
result.Results[i].Error = common.ServerError(err)
continue
}
// TODO(dimitern): Release those via the provider once we have
// Environ.ReleaseContainerAddresses. See LP bug http://pad.lv/1585878
err = guest.RemoveAllAddresses()
if err != nil {
logger.Warningf("failed to remove container %q addresses: %v", tag, err)
result.Results[i].Error = common.ServerError(err)
continue
}
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4589
|
prepareContainerAccessEnvironment
|
train
|
func (p *ProvisionerAPI) prepareContainerAccessEnvironment() (environs.Environ, *state.Machine, common.AuthFunc, error) {
env, err := environs.GetEnviron(p.configGetter, environs.New)
if err != nil {
return nil, nil, nil, errors.Trace(err)
}
// TODO(jam): 2017-02-01 NetworkingEnvironFromModelConfig used to do this, but it doesn't feel good
if env.Config().Type() == "dummy" {
return nil, nil, nil, errors.NotSupportedf("dummy provider network config")
}
canAccess, err := p.getAuthFunc()
if err != nil {
return nil, nil, nil, errors.Annotate(err, "cannot authenticate request")
}
hostAuthTag := p.authorizer.GetAuthTag()
if hostAuthTag == nil {
return nil, nil, nil, errors.Errorf("authenticated entity tag is nil")
}
hostTag, err := names.ParseMachineTag(hostAuthTag.String())
if err != nil {
return nil, nil, nil, errors.Trace(err)
}
host, err := p.getMachine(canAccess, hostTag)
if err != nil {
return nil, nil, nil, errors.Trace(err)
}
return env, host, canAccess, nil
}
|
go
|
{
"resource": ""
}
|
q4590
|
HostChangesForContainers
|
train
|
func (p *ProvisionerAPI) HostChangesForContainers(args params.Entities) (params.HostNetworkChangeResults, error) {
ctx := &hostChangesContext{
result: params.HostNetworkChangeResults{
Results: make([]params.HostNetworkChange, len(args.Entities)),
},
}
if err := p.processEachContainer(args, ctx); err != nil {
return ctx.result, errors.Trace(err)
}
return ctx.result, nil
}
|
go
|
{
"resource": ""
}
|
q4591
|
SetError
|
train
|
func (ctx *containerProfileContext) SetError(idx int, err error) {
ctx.result.Results[idx].Error = common.ServerError(err)
}
|
go
|
{
"resource": ""
}
|
q4592
|
SetModificationStatus
|
train
|
func (p *ProvisionerAPI) SetModificationStatus(args params.SetStatus) (params.ErrorResults, error) {
result := params.ErrorResults{
Results: make([]params.ErrorResult, len(args.Entities)),
}
canAccess, err := p.getAuthFunc()
if err != nil {
logger.Errorf("failed to get an authorisation function: %v", err)
return result, errors.Trace(err)
}
for i, arg := range args.Entities {
err = p.setOneModificationStatus(canAccess, arg)
result.Results[i].Error = common.ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4593
|
MarkMachinesForRemoval
|
train
|
func (p *ProvisionerAPI) MarkMachinesForRemoval(machines params.Entities) (params.ErrorResults, error) {
results := make([]params.ErrorResult, len(machines.Entities))
canAccess, err := p.getAuthFunc()
if err != nil {
logger.Errorf("failed to get an authorisation function: %v", err)
return params.ErrorResults{}, errors.Trace(err)
}
for i, machine := range machines.Entities {
results[i].Error = common.ServerError(p.markOneMachineForRemoval(machine.Tag, canAccess))
}
return params.ErrorResults{Results: results}, nil
}
|
go
|
{
"resource": ""
}
|
q4594
|
RemoveUpgradeCharmProfileData
|
train
|
func (p *ProvisionerAPIV8) RemoveUpgradeCharmProfileData(args params.Entities) (params.ErrorResults, error) {
results := params.ErrorResults{
Results: make([]params.ErrorResult, len(args.Entities)),
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4595
|
extractRegistryURL
|
train
|
func extractRegistryURL(imagePath string) (string, error) {
imageNamed, err := reference.ParseNormalizedNamed(imagePath)
if err != nil {
return "", errors.Annotate(err, "extracting registry from path")
}
return reference.Domain(imageNamed), nil
}
|
go
|
{
"resource": ""
}
|
q4596
|
copy
|
train
|
func (s *State) copy() *State {
copy := &State{
RelationId: s.RelationId,
ChangedPending: s.ChangedPending,
}
if s.Members != nil {
copy.Members = map[string]int64{}
for m, v := range s.Members {
copy.Members[m] = v
}
}
return copy
}
|
go
|
{
"resource": ""
}
|
q4597
|
Validate
|
train
|
func (s *State) Validate(hi hook.Info) (err error) {
defer errors.DeferredAnnotatef(&err, "inappropriate %q for %q", hi.Kind, hi.RemoteUnit)
if hi.RelationId != s.RelationId {
return fmt.Errorf("expected relation %d, got relation %d", s.RelationId, hi.RelationId)
}
if s.Members == nil {
return fmt.Errorf(`relation is broken and cannot be changed further`)
}
unit, kind := hi.RemoteUnit, hi.Kind
if kind == hooks.RelationBroken {
if len(s.Members) == 0 {
return nil
}
return fmt.Errorf(`cannot run "relation-broken" while units still present`)
}
if s.ChangedPending != "" {
if unit != s.ChangedPending || kind != hooks.RelationChanged {
return fmt.Errorf(`expected "relation-changed" for %q`, s.ChangedPending)
}
} else if _, joined := s.Members[unit]; joined && kind == hooks.RelationJoined {
return fmt.Errorf("unit already joined")
} else if !joined && kind != hooks.RelationJoined {
return fmt.Errorf("unit has not joined")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4598
|
ReadStateDir
|
train
|
func ReadStateDir(dirPath string, relationId int) (d *StateDir, err error) {
d = &StateDir{
filepath.Join(dirPath, strconv.Itoa(relationId)),
State{relationId, map[string]int64{}, ""},
}
defer errors.DeferredAnnotatef(&err, "cannot load relation state from %q", d.path)
if _, err := os.Stat(d.path); os.IsNotExist(err) {
return d, nil
} else if err != nil {
return nil, err
}
fis, err := ioutil.ReadDir(d.path)
if err != nil {
return nil, err
}
for _, fi := range fis {
// Entries with names ending in "-" followed by an integer must be
// files containing valid unit data; all other names are ignored.
name := fi.Name()
i := strings.LastIndex(name, "-")
if i == -1 {
continue
}
svcName := name[:i]
unitId := name[i+1:]
if _, err := strconv.Atoi(unitId); err != nil {
continue
}
unitName := svcName + "/" + unitId
var info diskInfo
if err = utils.ReadYaml(filepath.Join(d.path, name), &info); err != nil {
return nil, fmt.Errorf("invalid unit file %q: %v", name, err)
}
if info.ChangeVersion == nil {
return nil, fmt.Errorf(`invalid unit file %q: "changed-version" not set`, name)
}
d.state.Members[unitName] = *info.ChangeVersion
if info.ChangedPending {
if d.state.ChangedPending != "" {
return nil, fmt.Errorf("%q and %q both have pending changed hooks", d.state.ChangedPending, unitName)
}
d.state.ChangedPending = unitName
}
}
return d, nil
}
|
go
|
{
"resource": ""
}
|
q4599
|
ReadAllStateDirs
|
train
|
func ReadAllStateDirs(dirPath string) (dirs map[int]*StateDir, err error) {
defer errors.DeferredAnnotatef(&err, "cannot load relations state from %q", dirPath)
if _, err := os.Stat(dirPath); os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, err
}
fis, err := ioutil.ReadDir(dirPath)
if err != nil {
return nil, err
}
dirs = map[int]*StateDir{}
for _, fi := range fis {
// Entries with integer names must be directories containing StateDir
// data; all other names will be ignored.
relationId, err := strconv.Atoi(fi.Name())
if err != nil {
// This doesn't look like a relation.
continue
}
dir, err := ReadStateDir(dirPath, relationId)
if err != nil {
return nil, err
}
dirs[relationId] = dir
}
return dirs, nil
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.