_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q3600
|
AddSpace
|
train
|
func (st *State) AddSpace(name string, providerId network.Id, subnets []string, isPublic bool) (newSpace *Space, err error) {
defer errors.DeferredAnnotatef(&err, "adding space %q", name)
if !names.IsValidSpace(name) {
return nil, errors.NewNotValid(nil, "invalid space name")
}
spaceDoc := spaceDoc{
Life: Alive,
Name: name,
IsPublic: isPublic,
ProviderId: string(providerId),
}
newSpace = &Space{doc: spaceDoc, st: st}
ops := []txn.Op{{
C: spacesC,
Id: name,
Assert: txn.DocMissing,
Insert: spaceDoc,
}}
if providerId != "" {
ops = append(ops, st.networkEntityGlobalKeyOp("space", providerId))
}
for _, subnetId := range subnets {
// TODO:(mfoord) once we have refcounting for subnets we should
// also assert that the refcount is zero as moving the space of a
// subnet in use is not permitted.
ops = append(ops, txn.Op{
C: subnetsC,
Id: subnetId,
Assert: bson.D{bson.DocElem{"fan-local-underlay", bson.D{{"$exists", false}}}},
Update: bson.D{{"$set", bson.D{{"space-name", name}}}},
})
}
if err := st.db().RunTransaction(ops); err == txn.ErrAborted {
if _, err := st.Space(name); err == nil {
return nil, errors.AlreadyExistsf("space %q", name)
}
for _, subnetId := range subnets {
subnet, err := st.Subnet(subnetId)
if errors.IsNotFound(err) {
return nil, err
}
if subnet.FanLocalUnderlay() != "" {
return nil, errors.Errorf("Can't set space for FAN subnet %q - it's always inherited from underlay", subnet.CIDR())
}
}
if err := newSpace.Refresh(); err != nil {
if errors.IsNotFound(err) {
return nil, errors.Errorf("ProviderId %q not unique", providerId)
}
return nil, errors.Trace(err)
}
return nil, errors.Trace(err)
} else if err != nil {
return nil, err
}
return newSpace, nil
}
|
go
|
{
"resource": ""
}
|
q3601
|
Space
|
train
|
func (st *State) Space(name string) (*Space, error) {
spaces, closer := st.db().GetCollection(spacesC)
defer closer()
var doc spaceDoc
err := spaces.FindId(name).One(&doc)
if err == mgo.ErrNotFound {
return nil, errors.NotFoundf("space %q", name)
}
if err != nil {
return nil, errors.Annotatef(err, "cannot get space %q", name)
}
return &Space{st, doc}, nil
}
|
go
|
{
"resource": ""
}
|
q3602
|
AllSpaces
|
train
|
func (st *State) AllSpaces() ([]*Space, error) {
spacesCollection, closer := st.db().GetCollection(spacesC)
defer closer()
docs := []spaceDoc{}
err := spacesCollection.Find(nil).All(&docs)
if err != nil {
return nil, errors.Annotatef(err, "cannot get all spaces")
}
spaces := make([]*Space, len(docs))
for i, doc := range docs {
spaces[i] = &Space{st: st, doc: doc}
}
return spaces, nil
}
|
go
|
{
"resource": ""
}
|
q3603
|
EnsureDead
|
train
|
func (s *Space) EnsureDead() (err error) {
defer errors.DeferredAnnotatef(&err, "cannot set space %q to dead", s)
if s.doc.Life == Dead {
return nil
}
ops := []txn.Op{{
C: spacesC,
Id: s.doc.Name,
Update: bson.D{{"$set", bson.D{{"life", Dead}}}},
Assert: isAliveDoc,
}}
txnErr := s.st.db().RunTransaction(ops)
if txnErr == nil {
s.doc.Life = Dead
return nil
}
return onAbort(txnErr, spaceNotAliveErr)
}
|
go
|
{
"resource": ""
}
|
q3604
|
Remove
|
train
|
func (s *Space) Remove() (err error) {
defer errors.DeferredAnnotatef(&err, "cannot remove space %q", s)
if s.doc.Life != Dead {
return errors.New("space is not dead")
}
ops := []txn.Op{{
C: spacesC,
Id: s.doc.Name,
Remove: true,
Assert: isDeadDoc,
}}
if s.ProviderId() != "" {
ops = append(ops, s.st.networkEntityGlobalKeyRemoveOp("space", s.ProviderId()))
}
txnErr := s.st.db().RunTransaction(ops)
if txnErr == nil {
return nil
}
return onAbort(txnErr, errors.New("not found or not dead"))
}
|
go
|
{
"resource": ""
}
|
q3605
|
NewMockUpgradeSeriesBackend
|
train
|
func NewMockUpgradeSeriesBackend(ctrl *gomock.Controller) *MockUpgradeSeriesBackend {
mock := &MockUpgradeSeriesBackend{ctrl: ctrl}
mock.recorder = &MockUpgradeSeriesBackendMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q3606
|
NewMockUpgradeSeriesMachine
|
train
|
func NewMockUpgradeSeriesMachine(ctrl *gomock.Controller) *MockUpgradeSeriesMachine {
mock := &MockUpgradeSeriesMachine{ctrl: ctrl}
mock.recorder = &MockUpgradeSeriesMachineMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q3607
|
RemoveUpgradeSeriesLock
|
train
|
func (m *MockUpgradeSeriesMachine) RemoveUpgradeSeriesLock() error {
ret := m.ctrl.Call(m, "RemoveUpgradeSeriesLock")
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3608
|
RemoveUpgradeSeriesLock
|
train
|
func (mr *MockUpgradeSeriesMachineMockRecorder) RemoveUpgradeSeriesLock() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveUpgradeSeriesLock", reflect.TypeOf((*MockUpgradeSeriesMachine)(nil).RemoveUpgradeSeriesLock))
}
|
go
|
{
"resource": ""
}
|
q3609
|
Series
|
train
|
func (m *MockUpgradeSeriesMachine) Series() string {
ret := m.ctrl.Call(m, "Series")
ret0, _ := ret[0].(string)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3610
|
SetUpgradeSeriesStatus
|
train
|
func (m *MockUpgradeSeriesMachine) SetUpgradeSeriesStatus(arg0 model.UpgradeSeriesStatus, arg1 string) error {
ret := m.ctrl.Call(m, "SetUpgradeSeriesStatus", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3611
|
StartUpgradeSeriesUnitCompletion
|
train
|
func (m *MockUpgradeSeriesMachine) StartUpgradeSeriesUnitCompletion(arg0 string) error {
ret := m.ctrl.Call(m, "StartUpgradeSeriesUnitCompletion", arg0)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3612
|
UpdateMachineSeries
|
train
|
func (m *MockUpgradeSeriesMachine) UpdateMachineSeries(arg0 string, arg1 bool) error {
ret := m.ctrl.Call(m, "UpdateMachineSeries", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3613
|
UpgradeSeriesStatus
|
train
|
func (m *MockUpgradeSeriesMachine) UpgradeSeriesStatus() (model.UpgradeSeriesStatus, error) {
ret := m.ctrl.Call(m, "UpgradeSeriesStatus")
ret0, _ := ret[0].(model.UpgradeSeriesStatus)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3614
|
UpgradeSeriesTarget
|
train
|
func (m *MockUpgradeSeriesMachine) UpgradeSeriesTarget() (string, error) {
ret := m.ctrl.Call(m, "UpgradeSeriesTarget")
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3615
|
UpgradeSeriesUnitStatuses
|
train
|
func (m *MockUpgradeSeriesMachine) UpgradeSeriesUnitStatuses() (map[string]state.UpgradeSeriesUnitStatus, error) {
ret := m.ctrl.Call(m, "UpgradeSeriesUnitStatuses")
ret0, _ := ret[0].(map[string]state.UpgradeSeriesUnitStatus)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3616
|
NewMockUpgradeSeriesUnit
|
train
|
func NewMockUpgradeSeriesUnit(ctrl *gomock.Controller) *MockUpgradeSeriesUnit {
mock := &MockUpgradeSeriesUnit{ctrl: ctrl}
mock.recorder = &MockUpgradeSeriesUnitMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q3617
|
UpgradeSeriesStatus
|
train
|
func (mr *MockUpgradeSeriesUnitMockRecorder) UpgradeSeriesStatus() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeSeriesStatus", reflect.TypeOf((*MockUpgradeSeriesUnit)(nil).UpgradeSeriesStatus))
}
|
go
|
{
"resource": ""
}
|
q3618
|
RegisterContainerProvider
|
train
|
func RegisterContainerProvider(name string, p ContainerEnvironProvider, alias ...string) (unregister func()) {
if err := environs.GlobalProviderRegistry().RegisterProvider(p, name, alias...); err != nil {
panic(fmt.Errorf("juju: %v", err))
}
return func() {
environs.GlobalProviderRegistry().UnregisterProvider(name)
}
}
|
go
|
{
"resource": ""
}
|
q3619
|
Open
|
train
|
func Open(p environs.EnvironProvider, args environs.OpenParams) (Broker, error) {
if envProvider, ok := p.(ContainerEnvironProvider); !ok {
return nil, errors.NotValidf("container environ provider %T", p)
} else {
return envProvider.Open(args)
}
}
|
go
|
{
"resource": ""
}
|
q3620
|
DeployApplication
|
train
|
func DeployApplication(st ApplicationDeployer, args DeployApplicationParams) (Application, error) {
charmConfig, err := args.Charm.Config().ValidateSettings(args.CharmConfig)
if err != nil {
return nil, errors.Trace(err)
}
if args.Charm.Meta().Subordinate {
if args.NumUnits != 0 {
return nil, fmt.Errorf("subordinate application must be deployed without units")
}
if !constraints.IsEmpty(&args.Constraints) {
return nil, fmt.Errorf("subordinate application must be deployed without constraints")
}
}
// TODO(fwereade): transactional State.AddApplication including settings, constraints
// (minimumUnitCount, initialMachineIds?).
effectiveBindings, err := getEffectiveBindingsForCharmMeta(args.Charm.Meta(), args.EndpointBindings)
if err != nil {
return nil, errors.Trace(err)
}
asa := state.AddApplicationArgs{
Name: args.ApplicationName,
Series: args.Series,
Charm: args.Charm,
Channel: args.Channel,
Storage: stateStorageConstraints(args.Storage),
Devices: stateDeviceConstraints(args.Devices),
AttachStorage: args.AttachStorage,
ApplicationConfig: args.ApplicationConfig,
CharmConfig: charmConfig,
NumUnits: args.NumUnits,
Placement: args.Placement,
Resources: args.Resources,
EndpointBindings: effectiveBindings,
}
if !args.Charm.Meta().Subordinate {
asa.Constraints = args.Constraints
}
return st.AddApplication(asa)
}
|
go
|
{
"resource": ""
}
|
q3621
|
addUnits
|
train
|
func addUnits(
unitAdder UnitAdder,
appName string,
n int,
placement []*instance.Placement,
attachStorage []names.StorageTag,
assignUnits bool,
) ([]Unit, error) {
units := make([]Unit, n)
// Hard code for now till we implement a different approach.
policy := state.AssignCleanEmpty
// TODO what do we do if we fail half-way through this process?
for i := 0; i < n; i++ {
unit, err := unitAdder.AddUnit(state.AddUnitParams{
AttachStorage: attachStorage,
})
if err != nil {
return nil, errors.Annotatef(err, "cannot add unit %d/%d to application %q", i+1, n, appName)
}
units[i] = unit
if !assignUnits {
continue
}
// Are there still placement directives to use?
if i > len(placement)-1 {
if err := unit.AssignWithPolicy(policy); err != nil {
return nil, errors.Trace(err)
}
continue
}
if err := unit.AssignWithPlacement(placement[i]); err != nil {
return nil, errors.Annotatef(err, "acquiring machine to host unit %q", unit.UnitTag().Id())
}
}
return units, nil
}
|
go
|
{
"resource": ""
}
|
q3622
|
Info
|
train
|
func (c *BootstrapCommand) Info() *cmd.Info {
return jujucmd.Info(&cmd.Info{
Name: "bootstrap-state",
Purpose: "initialize juju state",
})
}
|
go
|
{
"resource": ""
}
|
q3623
|
SetFlags
|
train
|
func (c *BootstrapCommand) SetFlags(f *gnuflag.FlagSet) {
c.AgentConf.AddFlags(f)
f.DurationVar(&c.Timeout, "timeout", time.Duration(0), "set the bootstrap timeout")
}
|
go
|
{
"resource": ""
}
|
q3624
|
populateTools
|
train
|
func (c *BootstrapCommand) populateTools(st *state.State, env environs.BootstrapEnviron) error {
agentConfig := c.CurrentConfig()
dataDir := agentConfig.DataDir()
hostSeries, err := series.HostSeries()
if err != nil {
return errors.Trace(err)
}
current := version.Binary{
Number: jujuversion.Current,
Arch: arch.HostArch(),
Series: hostSeries,
}
tools, err := agenttools.ReadTools(dataDir, current)
if err != nil {
return errors.Trace(err)
}
data, err := ioutil.ReadFile(filepath.Join(
agenttools.SharedToolsDir(dataDir, current),
"tools.tar.gz",
))
if err != nil {
return errors.Trace(err)
}
toolstorage, err := st.ToolsStorage()
if err != nil {
return errors.Trace(err)
}
defer toolstorage.Close()
var toolsVersions []version.Binary
if strings.HasPrefix(tools.URL, "file://") {
// Tools were uploaded: clone for each series of the same OS.
os, err := series.GetOSFromSeries(tools.Version.Series)
if err != nil {
return errors.Trace(err)
}
osSeries := series.OSSupportedSeries(os)
for _, series := range osSeries {
toolsVersion := tools.Version
toolsVersion.Series = series
toolsVersions = append(toolsVersions, toolsVersion)
}
} else {
// Tools were downloaded from an external source: don't clone.
toolsVersions = []version.Binary{tools.Version}
}
for _, toolsVersion := range toolsVersions {
metadata := binarystorage.Metadata{
Version: toolsVersion.String(),
Size: tools.Size,
SHA256: tools.SHA256,
}
logger.Debugf("Adding agent binaries: %v", toolsVersion)
if err := toolstorage.Add(bytes.NewReader(data), metadata); err != nil {
return errors.Trace(err)
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3625
|
populateGUIArchive
|
train
|
func (c *BootstrapCommand) populateGUIArchive(st *state.State, env environs.BootstrapEnviron) error {
agentConfig := c.CurrentConfig()
dataDir := agentConfig.DataDir()
guistorage, err := st.GUIStorage()
if err != nil {
return errors.Trace(err)
}
defer guistorage.Close()
gui, err := agenttools.ReadGUIArchive(dataDir)
if err != nil {
return errors.Annotate(err, "cannot fetch GUI info")
}
f, err := os.Open(filepath.Join(agenttools.SharedGUIDir(dataDir), "gui.tar.bz2"))
if err != nil {
return errors.Annotate(err, "cannot read GUI archive")
}
defer f.Close()
if err := guistorage.Add(f, binarystorage.Metadata{
Version: gui.Version.String(),
Size: gui.Size,
SHA256: gui.SHA256,
}); err != nil {
return errors.Annotate(err, "cannot store GUI archive")
}
if err = st.GUISetVersion(gui.Version); err != nil {
return errors.Annotate(err, "cannot set current GUI version")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3626
|
saveCustomImageMetadata
|
train
|
func (c *BootstrapCommand) saveCustomImageMetadata(st *state.State, env environs.BootstrapEnviron, imageMetadata []*imagemetadata.ImageMetadata) error {
logger.Debugf("saving custom image metadata")
return storeImageMetadataInState(st, env, "custom", simplestreams.CUSTOM_CLOUD_DATA, imageMetadata)
}
|
go
|
{
"resource": ""
}
|
q3627
|
storeImageMetadataInState
|
train
|
func storeImageMetadataInState(st *state.State, env environs.BootstrapEnviron, source string, priority int, existingMetadata []*imagemetadata.ImageMetadata) error {
if len(existingMetadata) == 0 {
return nil
}
cfg := env.Config()
metadataState := make([]cloudimagemetadata.Metadata, len(existingMetadata))
for i, one := range existingMetadata {
m := cloudimagemetadata.Metadata{
MetadataAttributes: cloudimagemetadata.MetadataAttributes{
Stream: one.Stream,
Region: one.RegionName,
Arch: one.Arch,
VirtType: one.VirtType,
RootStorageType: one.Storage,
Source: source,
Version: one.Version,
},
Priority: priority,
ImageId: one.Id,
}
s, err := seriesFromVersion(one.Version)
if err != nil {
return errors.Annotatef(err, "cannot determine series for version %v", one.Version)
}
m.Series = s
if m.Stream == "" {
m.Stream = cfg.ImageStream()
}
if m.Source == "" {
m.Source = "custom"
}
metadataState[i] = m
}
if err := st.CloudImageMetadataStorage.SaveMetadataNoExpiry(metadataState); err != nil {
return errors.Annotatef(err, "cannot cache image metadata")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3628
|
NewWorker
|
train
|
func NewWorker() worker.Worker {
var w terminationWorker
c := make(chan os.Signal, 1)
signal.Notify(c, TerminationSignal)
w.tomb.Go(func() error {
defer signal.Stop(c)
return w.loop(c)
})
return &w
}
|
go
|
{
"resource": ""
}
|
q3629
|
NewListCommand
|
train
|
func NewListCommand() cmd.Command {
cmd := &listCommand{}
cmd.newAPIFunc = func() (StorageListAPI, error) {
return cmd.NewStorageAPI()
}
return modelcmd.Wrap(cmd)
}
|
go
|
{
"resource": ""
}
|
q3630
|
GetCombinedStorageInfo
|
train
|
func GetCombinedStorageInfo(p GetCombinedStorageInfoParams) (*CombinedStorage, error) {
combined := &CombinedStorage{}
if p.WantFilesystems {
filesystems, err := generateListFilesystemsOutput(p.Context, p.APIClient, p.Ids)
if err != nil {
return nil, errors.Trace(err)
}
combined.Filesystems = filesystems
}
if p.WantVolumes {
volumes, err := generateListVolumeOutput(p.Context, p.APIClient, p.Ids)
if err != nil {
return nil, errors.Trace(err)
}
combined.Volumes = volumes
}
if p.WantStorage {
storageInstances, err := generateListStorageOutput(p.Context, p.APIClient)
if err != nil {
return nil, errors.Trace(err)
}
combined.StorageInstances = storageInstances
}
return combined, nil
}
|
go
|
{
"resource": ""
}
|
q3631
|
generateListStorageOutput
|
train
|
func generateListStorageOutput(ctx *cmd.Context, api StorageListAPI) (map[string]StorageInfo, error) {
results, err := api.ListStorageDetails()
if err != nil {
return nil, errors.Trace(err)
}
if len(results) == 0 {
return nil, nil
}
return formatStorageDetails(results)
}
|
go
|
{
"resource": ""
}
|
q3632
|
Empty
|
train
|
func (c *CombinedStorage) Empty() bool {
return len(c.StorageInstances) == 0 && len(c.Filesystems) == 0 && len(c.Volumes) == 0
}
|
go
|
{
"resource": ""
}
|
q3633
|
formatListTabularOne
|
train
|
func formatListTabularOne(writer io.Writer, value interface{}) error {
return formatListTabular(writer, value, false)
}
|
go
|
{
"resource": ""
}
|
q3634
|
FormatListTabularAll
|
train
|
func FormatListTabularAll(writer io.Writer, value interface{}) error {
return formatListTabular(writer, value, true)
}
|
go
|
{
"resource": ""
}
|
q3635
|
Manifold
|
train
|
func Manifold(config ManifoldConfig) dependency.Manifold {
return dependency.Manifold{
Inputs: []string{
config.AgentName,
config.APICallerName,
},
Start: func(context dependency.Context) (worker.Worker, error) {
var agent agent.Agent
if err := context.Get(config.AgentName, &agent); err != nil {
return nil, err
}
var apiCaller base.APICaller
if err := context.Get(config.APICallerName, &apiCaller); err != nil {
return nil, err
}
if config.Clock == nil {
return nil, errors.NotValidf("missing Clock")
}
if config.MachineLock == nil {
return nil, errors.NotValidf("missing MachineLock")
}
return newWorker(agent, apiCaller, config.MachineLock, config.Clock)
},
}
}
|
go
|
{
"resource": ""
}
|
q3636
|
ApplicationInstances
|
train
|
func ApplicationInstances(st *State, application string) ([]instance.Id, error) {
units, err := allUnits(st, application)
if err != nil {
return nil, err
}
instanceIds := make([]instance.Id, 0, len(units))
for _, unit := range units {
machineId, err := unit.AssignedMachineId()
if errors.IsNotAssigned(err) {
continue
} else if err != nil {
return nil, err
}
machine, err := st.Machine(machineId)
if err != nil {
return nil, err
}
instanceId, err := machine.InstanceId()
if err == nil {
instanceIds = append(instanceIds, instanceId)
} else if errors.IsNotProvisioned(err) {
continue
} else {
return nil, err
}
}
return instanceIds, nil
}
|
go
|
{
"resource": ""
}
|
q3637
|
ApplicationMachines
|
train
|
func ApplicationMachines(st *State, application string) ([]string, error) {
machines, err := st.AllMachines()
if err != nil {
return nil, err
}
applicationName := unitAppName(application)
var machineIds []string
for _, machine := range machines {
principalSet := set.NewStrings()
for _, principal := range machine.Principals() {
principalSet.Add(unitAppName(principal))
}
if principalSet.Contains(applicationName) {
machineIds = append(machineIds, machine.Id())
}
}
return machineIds, nil
}
|
go
|
{
"resource": ""
}
|
q3638
|
NewAddressOnSpace
|
train
|
func NewAddressOnSpace(spaceName string, value string) Address {
addr := NewAddress(value)
addr.SpaceName = SpaceName(spaceName)
return addr
}
|
go
|
{
"resource": ""
}
|
q3639
|
NewAddresses
|
train
|
func NewAddresses(inAddresses ...string) (outAddresses []Address) {
outAddresses = make([]Address, len(inAddresses))
for i, address := range inAddresses {
outAddresses[i] = NewAddress(address)
}
return outAddresses
}
|
go
|
{
"resource": ""
}
|
q3640
|
NewAddressesOnSpace
|
train
|
func NewAddressesOnSpace(spaceName string, inAddresses ...string) (outAddresses []Address) {
outAddresses = make([]Address, len(inAddresses))
for i, address := range inAddresses {
outAddresses[i] = NewAddressOnSpace(spaceName, address)
}
return outAddresses
}
|
go
|
{
"resource": ""
}
|
q3641
|
DeriveAddressType
|
train
|
func DeriveAddressType(value string) AddressType {
ip := net.ParseIP(value)
switch {
case ip == nil:
// TODO(gz): Check value is a valid hostname
return HostName
case ip.To4() != nil:
return IPv4Address
case ip.To16() != nil:
return IPv6Address
default:
panic("Unknown form of IP address")
}
}
|
go
|
{
"resource": ""
}
|
q3642
|
deriveScope
|
train
|
func deriveScope(addr Address) Scope {
if addr.Type == HostName {
return addr.Scope
}
ip := net.ParseIP(addr.Value)
if ip == nil {
return addr.Scope
}
if ip.IsLoopback() {
return ScopeMachineLocal
}
if isIPv4PrivateNetworkAddress(addr.Type, ip) ||
isIPv6UniqueLocalAddress(addr.Type, ip) {
return ScopeCloudLocal
}
if isIPv4ReservedEAddress(addr.Type, ip) {
return ScopeFanLocal
}
if ip.IsLinkLocalMulticast() ||
ip.IsLinkLocalUnicast() ||
ip.IsInterfaceLocalMulticast() {
return ScopeLinkLocal
}
if ip.IsGlobalUnicast() {
return ScopePublic
}
return addr.Scope
}
|
go
|
{
"resource": ""
}
|
q3643
|
ExactScopeMatch
|
train
|
func ExactScopeMatch(addr Address, addrScopes ...Scope) bool {
for _, scope := range addrScopes {
if addr.Scope == scope {
return true
}
}
return false
}
|
go
|
{
"resource": ""
}
|
q3644
|
SelectAddressesBySpaceNames
|
train
|
func SelectAddressesBySpaceNames(addresses []Address, spaceNames ...SpaceName) ([]Address, bool) {
if len(spaceNames) == 0 {
logger.Errorf("addresses not filtered - no spaces given.")
return addresses, false
}
var selectedAddresses []Address
for _, addr := range addresses {
if spaceNameList(spaceNames).IndexOf(addr.SpaceName) >= 0 {
logger.Debugf("selected %q as an address in space %q", addr.Value, addr.SpaceName)
selectedAddresses = append(selectedAddresses, addr)
}
}
if len(selectedAddresses) > 0 {
return selectedAddresses, true
}
logger.Errorf("no addresses found in spaces %s", spaceNames)
return addresses, false
}
|
go
|
{
"resource": ""
}
|
q3645
|
SelectHostPortsBySpaceNames
|
train
|
func SelectHostPortsBySpaceNames(hps []HostPort, spaceNames ...SpaceName) ([]HostPort, bool) {
if len(spaceNames) == 0 {
logger.Errorf("host ports not filtered - no spaces given.")
return hps, false
}
var selectedHostPorts []HostPort
for _, hp := range hps {
if spaceNameList(spaceNames).IndexOf(hp.SpaceName) >= 0 {
logger.Debugf("selected %q as a hostPort in space %q", hp.Value, hp.SpaceName)
selectedHostPorts = append(selectedHostPorts, hp)
}
}
if len(selectedHostPorts) > 0 {
return selectedHostPorts, true
}
logger.Errorf("no hostPorts found in spaces %s", spaceNames)
return hps, false
}
|
go
|
{
"resource": ""
}
|
q3646
|
SelectPublicHostPort
|
train
|
func SelectPublicHostPort(hps []HostPort) string {
index := bestAddressIndex(len(hps), func(i int) Address {
return hps[i].Address
}, publicMatch)
if index < 0 {
return ""
}
return hps[index].NetAddr()
}
|
go
|
{
"resource": ""
}
|
q3647
|
SelectInternalAddresses
|
train
|
func SelectInternalAddresses(addresses []Address, machineLocal bool) []Address {
indexes := bestAddressIndexes(len(addresses), func(i int) Address {
return addresses[i]
}, internalAddressMatcher(machineLocal))
if len(indexes) == 0 {
return nil
}
out := make([]Address, 0, len(indexes))
for _, index := range indexes {
out = append(out, addresses[index])
}
return out
}
|
go
|
{
"resource": ""
}
|
q3648
|
SelectInternalHostPort
|
train
|
func SelectInternalHostPort(hps []HostPort, machineLocal bool) string {
index := bestAddressIndex(len(hps), func(i int) Address {
return hps[i].Address
}, internalAddressMatcher(machineLocal))
if index < 0 {
return ""
}
return hps[index].NetAddr()
}
|
go
|
{
"resource": ""
}
|
q3649
|
SelectInternalHostPorts
|
train
|
func SelectInternalHostPorts(hps []HostPort, machineLocal bool) []string {
indexes := bestAddressIndexes(len(hps), func(i int) Address {
return hps[i].Address
}, internalAddressMatcher(machineLocal))
out := make([]string, 0, len(indexes))
for _, index := range indexes {
out = append(out, hps[index].NetAddr())
}
return out
}
|
go
|
{
"resource": ""
}
|
q3650
|
DecimalToIPv4
|
train
|
func DecimalToIPv4(addr uint32) net.IP {
bytes := make([]byte, 4)
binary.BigEndian.PutUint32(bytes, addr)
return net.IP(bytes)
}
|
go
|
{
"resource": ""
}
|
q3651
|
IPv4ToDecimal
|
train
|
func IPv4ToDecimal(ipv4Addr net.IP) (uint32, error) {
ip := ipv4Addr.To4()
if ip == nil {
return 0, errors.Errorf("%q is not a valid IPv4 address", ipv4Addr.String())
}
return binary.BigEndian.Uint32([]byte(ip)), nil
}
|
go
|
{
"resource": ""
}
|
q3652
|
machineBlockDevicesChanged
|
train
|
func machineBlockDevicesChanged(ctx *context) error {
volumeTags := make([]names.VolumeTag, 0, len(ctx.incompleteFilesystemParams))
// We must query volumes for both incomplete filesystems
// and incomplete filesystem attachments, because even
// though a filesystem attachment cannot exist without a
// filesystem, the filesystem may be created and attached
// in different sessions, and there is no guarantee that
// the block device will remain attached to the machine
// in between.
for _, params := range ctx.incompleteFilesystemParams {
if params.Volume == (names.VolumeTag{}) {
// Filesystem is not volume-backed.
continue
}
if _, ok := ctx.volumeBlockDevices[params.Volume]; ok {
// Backing-volume's block device is already attached.
continue
}
volumeTags = append(volumeTags, params.Volume)
}
for _, params := range ctx.incompleteFilesystemAttachmentParams {
filesystem, ok := ctx.filesystems[params.Filesystem]
if !ok {
continue
}
if filesystem.Volume == (names.VolumeTag{}) {
// Filesystem is not volume-backed.
continue
}
if _, ok := ctx.volumeBlockDevices[filesystem.Volume]; ok {
// Backing-volume's block device is already attached.
continue
}
var found bool
for _, tag := range volumeTags {
if filesystem.Volume == tag {
found = true
break
}
}
if !found {
volumeTags = append(volumeTags, filesystem.Volume)
}
}
if len(volumeTags) == 0 {
return nil
}
return refreshVolumeBlockDevices(ctx, volumeTags)
}
|
go
|
{
"resource": ""
}
|
q3653
|
processPendingVolumeBlockDevices
|
train
|
func processPendingVolumeBlockDevices(ctx *context) error {
if len(ctx.pendingVolumeBlockDevices) == 0 {
logger.Tracef("no pending volume block devices")
return nil
}
volumeTags := make([]names.VolumeTag, len(ctx.pendingVolumeBlockDevices))
for i, tag := range ctx.pendingVolumeBlockDevices.SortedValues() {
volumeTags[i] = tag.(names.VolumeTag)
}
// Clear out the pending set, so we don't force-refresh again.
ctx.pendingVolumeBlockDevices = names.NewSet()
return refreshVolumeBlockDevices(ctx, volumeTags)
}
|
go
|
{
"resource": ""
}
|
q3654
|
refreshVolumeBlockDevices
|
train
|
func refreshVolumeBlockDevices(ctx *context, volumeTags []names.VolumeTag) error {
machineTag, ok := ctx.config.Scope.(names.MachineTag)
if !ok {
// This function should only be called by machine-scoped
// storage provisioners.
logger.Warningf("refresh block devices, expected machine tag, got %v", ctx.config.Scope)
return nil
}
ids := make([]params.MachineStorageId, len(volumeTags))
for i, volumeTag := range volumeTags {
ids[i] = params.MachineStorageId{
MachineTag: machineTag.String(),
AttachmentTag: volumeTag.String(),
}
}
results, err := ctx.config.Volumes.VolumeBlockDevices(ids)
if err != nil {
return errors.Annotate(err, "refreshing volume block devices")
}
for i, result := range results {
if result.Error == nil {
ctx.volumeBlockDevices[volumeTags[i]] = result.Result
for _, params := range ctx.incompleteFilesystemParams {
if params.Volume == volumeTags[i] {
updatePendingFilesystem(ctx, params)
}
}
for id, params := range ctx.incompleteFilesystemAttachmentParams {
filesystem, ok := ctx.filesystems[params.Filesystem]
if !ok {
continue
}
if filesystem.Volume == volumeTags[i] {
updatePendingFilesystemAttachment(ctx, id, params)
}
}
} else if params.IsCodeNotProvisioned(result.Error) || params.IsCodeNotFound(result.Error) {
// Either the volume (attachment) isn't provisioned,
// or the corresponding block device is not yet known.
//
// Neither of these errors is fatal; we just wait for
// the block device watcher to notify us again.
} else {
return errors.Annotatef(
err, "getting block device info for volume attachment %v",
ids[i],
)
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3655
|
NewLayeredStorage
|
train
|
func NewLayeredStorage(s ...Storage) (Storage, error) {
if len(s) <= 1 {
return nil, errors.Errorf("expected multiple stores")
}
return layeredStorage(s), nil
}
|
go
|
{
"resource": ""
}
|
q3656
|
Add
|
train
|
func (s layeredStorage) Add(r io.Reader, m Metadata) error {
return s[0].Add(r, m)
}
|
go
|
{
"resource": ""
}
|
q3657
|
Open
|
train
|
func (s layeredStorage) Open(v string) (Metadata, io.ReadCloser, error) {
var m Metadata
var rc io.ReadCloser
var err error
for _, s := range s {
m, rc, err = s.Open(v)
if !errors.IsNotFound(err) {
break
}
}
return m, rc, err
}
|
go
|
{
"resource": ""
}
|
q3658
|
Metadata
|
train
|
func (s layeredStorage) Metadata(v string) (Metadata, error) {
var m Metadata
var err error
for _, s := range s {
m, err = s.Metadata(v)
if !errors.IsNotFound(err) {
break
}
}
return m, err
}
|
go
|
{
"resource": ""
}
|
q3659
|
AllMetadata
|
train
|
func (s layeredStorage) AllMetadata() ([]Metadata, error) {
seen := set.NewStrings()
var all []Metadata
for _, s := range s {
sm, err := s.AllMetadata()
if err != nil {
return nil, err
}
for _, m := range sm {
if seen.Contains(m.Version) {
continue
}
all = append(all, m)
seen.Add(m.Version)
}
}
return all, nil
}
|
go
|
{
"resource": ""
}
|
q3660
|
Unstage
|
train
|
func (staged StagedResource) Unstage() error {
buildTxn := func(attempt int) ([]txn.Op, error) {
if attempt > 0 {
// The op has no assert so we should not get here.
return nil, errors.New("unstaging the resource failed")
}
ops := newRemoveStagedResourceOps(staged.id)
return ops, nil
}
if err := staged.base.Run(buildTxn); err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3661
|
Activate
|
train
|
func (staged StagedResource) Activate() error {
buildTxn := func(attempt int) ([]txn.Op, error) {
// This is an "upsert".
var ops []txn.Op
switch attempt {
case 0:
ops = newInsertResourceOps(staged.stored)
case 1:
ops = newUpdateResourceOps(staged.stored)
default:
return nil, errors.New("setting the resource failed")
}
if staged.stored.PendingID == "" {
// Only non-pending resources must have an existing application.
ops = append(ops, staged.base.ApplicationExistsOps(staged.stored.ApplicationID)...)
}
// No matter what, we always remove any staging.
ops = append(ops, newRemoveStagedResourceOps(staged.id)...)
// If we are changing the bytes for a resource, we increment the
// CharmModifiedVersion on the application, since resources are integral to
// the high level "version" of the charm.
if staged.stored.PendingID == "" {
hasNewBytes, err := staged.hasNewBytes()
if err != nil {
logger.Errorf("can't read existing resource during activate: %v", errors.Details(err))
return nil, errors.Trace(err)
}
if hasNewBytes {
incOps := staged.base.IncCharmModifiedVersionOps(staged.stored.ApplicationID)
ops = append(ops, incOps...)
}
}
return ops, nil
}
if err := staged.base.Run(buildTxn); err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3662
|
Id
|
train
|
func (a *action) Id() string {
return a.st.localID(a.doc.DocId)
}
|
go
|
{
"resource": ""
}
|
q3663
|
Results
|
train
|
func (a *action) Results() (map[string]interface{}, string) {
return a.doc.Results, a.doc.Message
}
|
go
|
{
"resource": ""
}
|
q3664
|
Begin
|
train
|
func (a *action) Begin() (Action, error) {
m, err := a.Model()
if err != nil {
return nil, errors.Trace(err)
}
err = m.st.db().RunTransaction([]txn.Op{
{
C: actionsC,
Id: a.doc.DocId,
Assert: bson.D{{"status", ActionPending}},
Update: bson.D{{"$set", bson.D{
{"status", ActionRunning},
{"started", a.st.nowToTheSecond()},
}}},
}})
if err != nil {
return nil, err
}
return m.Action(a.Id())
}
|
go
|
{
"resource": ""
}
|
q3665
|
Finish
|
train
|
func (a *action) Finish(results ActionResults) (Action, error) {
return a.removeAndLog(results.Status, results.Results, results.Message)
}
|
go
|
{
"resource": ""
}
|
q3666
|
removeAndLog
|
train
|
func (a *action) removeAndLog(finalStatus ActionStatus, results map[string]interface{}, message string) (Action, error) {
m, err := a.Model()
if err != nil {
return nil, errors.Trace(err)
}
err = m.st.db().RunTransaction([]txn.Op{
{
C: actionsC,
Id: a.doc.DocId,
Assert: bson.D{{"status", bson.D{
{"$nin", []interface{}{
ActionCompleted,
ActionCancelled,
ActionFailed,
}}}}},
Update: bson.D{{"$set", bson.D{
{"status", finalStatus},
{"message", message},
{"results", results},
{"completed", a.st.nowToTheSecond()},
}}},
}, {
C: actionNotificationsC,
Id: m.st.docID(ensureActionMarker(a.Receiver()) + a.Id()),
Remove: true,
}})
if err != nil {
return nil, err
}
return m.Action(a.Id())
}
|
go
|
{
"resource": ""
}
|
q3667
|
newAction
|
train
|
func newAction(st *State, adoc actionDoc) Action {
return &action{
st: st,
doc: adoc,
}
}
|
go
|
{
"resource": ""
}
|
q3668
|
newActionDoc
|
train
|
func newActionDoc(mb modelBackend, receiverTag names.Tag, actionName string, parameters map[string]interface{}) (actionDoc, actionNotificationDoc, error) {
prefix := ensureActionMarker(receiverTag.Id())
actionId, err := NewUUID()
if err != nil {
return actionDoc{}, actionNotificationDoc{}, err
}
actionLogger.Debugf("newActionDoc name: '%s', receiver: '%s', actionId: '%s'", actionName, receiverTag, actionId)
modelUUID := mb.modelUUID()
return actionDoc{
DocId: mb.docID(actionId.String()),
ModelUUID: modelUUID,
Receiver: receiverTag.Id(),
Name: actionName,
Parameters: parameters,
Enqueued: mb.nowToTheSecond(),
Status: ActionPending,
}, actionNotificationDoc{
DocId: mb.docID(prefix + actionId.String()),
ModelUUID: modelUUID,
Receiver: receiverTag.Id(),
ActionID: actionId.String(),
}, nil
}
|
go
|
{
"resource": ""
}
|
q3669
|
Action
|
train
|
func (m *Model) Action(id string) (Action, error) {
actionLogger.Tracef("Action() %q", id)
st := m.st
actions, closer := st.db().GetCollection(actionsC)
defer closer()
doc := actionDoc{}
err := actions.FindId(id).One(&doc)
if err == mgo.ErrNotFound {
return nil, errors.NotFoundf("action %q", id)
}
if err != nil {
return nil, errors.Annotatef(err, "cannot get action %q", id)
}
actionLogger.Tracef("Action() %q found %+v", id, doc)
return newAction(st, doc), nil
}
|
go
|
{
"resource": ""
}
|
q3670
|
AllActions
|
train
|
func (m *Model) AllActions() ([]Action, error) {
actionLogger.Tracef("AllActions()")
actions, closer := m.st.db().GetCollection(actionsC)
defer closer()
results := []Action{}
docs := []actionDoc{}
err := actions.Find(nil).All(&docs)
if err != nil {
return nil, errors.Annotatef(err, "cannot get all actions")
}
for _, doc := range docs {
results = append(results, newAction(m.st, doc))
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q3671
|
ActionByTag
|
train
|
func (m *Model) ActionByTag(tag names.ActionTag) (Action, error) {
return m.Action(tag.Id())
}
|
go
|
{
"resource": ""
}
|
q3672
|
FindActionTagsByPrefix
|
train
|
func (m *Model) FindActionTagsByPrefix(prefix string) []names.ActionTag {
actionLogger.Tracef("FindActionTagsByPrefix() %q", prefix)
var results []names.ActionTag
var doc struct {
Id string `bson:"_id"`
}
actions, closer := m.st.db().GetCollection(actionsC)
defer closer()
iter := actions.Find(bson.D{{"_id", bson.D{{"$regex", "^" + m.st.docID(prefix)}}}}).Iter()
defer iter.Close()
for iter.Next(&doc) {
actionLogger.Tracef("FindActionTagsByPrefix() iter doc %+v", doc)
localID := m.st.localID(doc.Id)
if names.IsValidAction(localID) {
results = append(results, names.NewActionTag(localID))
}
}
actionLogger.Tracef("FindActionTagsByPrefix() %q found %+v", prefix, results)
return results
}
|
go
|
{
"resource": ""
}
|
q3673
|
FindActionsByName
|
train
|
func (m *Model) FindActionsByName(name string) ([]Action, error) {
var results []Action
var doc actionDoc
actions, closer := m.st.db().GetCollection(actionsC)
defer closer()
iter := actions.Find(bson.D{{"name", name}}).Iter()
for iter.Next(&doc) {
results = append(results, newAction(m.st, doc))
}
return results, errors.Trace(iter.Close())
}
|
go
|
{
"resource": ""
}
|
q3674
|
matchingActions
|
train
|
func (st *State) matchingActions(ar ActionReceiver) ([]Action, error) {
return st.matchingActionsByReceiverId(ar.Tag().Id())
}
|
go
|
{
"resource": ""
}
|
q3675
|
matchingActionsByReceiverId
|
train
|
func (st *State) matchingActionsByReceiverId(id string) ([]Action, error) {
var doc actionDoc
var actions []Action
actionsCollection, closer := st.db().GetCollection(actionsC)
defer closer()
iter := actionsCollection.Find(bson.D{{"receiver", id}}).Iter()
for iter.Next(&doc) {
actions = append(actions, newAction(st, doc))
}
return actions, errors.Trace(iter.Close())
}
|
go
|
{
"resource": ""
}
|
q3676
|
matchingActionsPending
|
train
|
func (st *State) matchingActionsPending(ar ActionReceiver) ([]Action, error) {
completed := bson.D{{"status", ActionPending}}
return st.matchingActionsByReceiverAndStatus(ar.Tag(), completed)
}
|
go
|
{
"resource": ""
}
|
q3677
|
matchingActionsRunning
|
train
|
func (st *State) matchingActionsRunning(ar ActionReceiver) ([]Action, error) {
completed := bson.D{{"status", ActionRunning}}
return st.matchingActionsByReceiverAndStatus(ar.Tag(), completed)
}
|
go
|
{
"resource": ""
}
|
q3678
|
matchingActionsCompleted
|
train
|
func (st *State) matchingActionsCompleted(ar ActionReceiver) ([]Action, error) {
completed := bson.D{{"$or", []bson.D{
{{"status", ActionCompleted}},
{{"status", ActionCancelled}},
{{"status", ActionFailed}},
}}}
return st.matchingActionsByReceiverAndStatus(ar.Tag(), completed)
}
|
go
|
{
"resource": ""
}
|
q3679
|
matchingActionsByReceiverAndStatus
|
train
|
func (st *State) matchingActionsByReceiverAndStatus(tag names.Tag, statusCondition bson.D) ([]Action, error) {
var doc actionDoc
var actions []Action
actionsCollection, closer := st.db().GetCollection(actionsC)
defer closer()
sel := append(bson.D{{"receiver", tag.Id()}}, statusCondition...)
iter := actionsCollection.Find(sel).Iter()
for iter.Next(&doc) {
actions = append(actions, newAction(st, doc))
}
return actions, errors.Trace(iter.Close())
}
|
go
|
{
"resource": ""
}
|
q3680
|
CharmInfo
|
train
|
func (c *Client) CharmInfo(charmURL string) (*CharmInfo, error) {
args := params.CharmURL{URL: charmURL}
var info params.CharmInfo
if err := c.facade.FacadeCall("CharmInfo", args, &info); err != nil {
return nil, errors.Trace(err)
}
meta, err := convertCharmMeta(info.Meta)
if err != nil {
return nil, errors.Trace(err)
}
result := &CharmInfo{
Revision: info.Revision,
URL: info.URL,
Config: convertCharmConfig(info.Config),
Meta: meta,
Actions: convertCharmActions(info.Actions),
Metrics: convertCharmMetrics(info.Metrics),
LXDProfile: convertCharmLXDProfile(info.LXDProfile),
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3681
|
NewAddCloudCommand
|
train
|
func NewAddCloudCommand(cloudMetadataStore CloudMetadataStore) cmd.Command {
cloudCallCtx := context.NewCloudCallContext()
store := jujuclient.NewFileClientStore()
c := &AddCloudCommand{
OptionalControllerCommand: modelcmd.OptionalControllerCommand{Store: store},
cloudMetadataStore: cloudMetadataStore,
CloudCallCtx: cloudCallCtx,
// Ping is provider.Ping except in tests where we don't actually want to
// require a valid cloud.
Ping: func(p environs.EnvironProvider, endpoint string) error {
return p.Ping(cloudCallCtx, endpoint)
},
store: store,
}
c.addCloudAPIFunc = c.cloudAPI
return modelcmd.WrapBase(c)
}
|
go
|
{
"resource": ""
}
|
q3682
|
Run
|
train
|
func (c *AddCloudCommand) Run(ctxt *cmd.Context) error {
if c.CloudFile == "" && c.controllerName == "" {
return c.runInteractive(ctxt)
}
var newCloud *jujucloud.Cloud
var err error
if c.CloudFile != "" {
newCloud, err = c.readCloudFromFile(ctxt)
} else {
// No cloud file specified so we try and use a named
// cloud that already has been added to the local cache.
newCloud, err = cloudFromLocal(c.Cloud)
}
if err != nil {
return errors.Trace(err)
}
if c.controllerName == "" {
if !c.Local {
ctxt.Infof(
"There are no controllers running.\nAdding cloud to local cache so you can use it to bootstrap a controller.\n")
}
return addLocalCloud(c.cloudMetadataStore, *newCloud)
}
// A controller has been specified so upload the cloud details
// plus a corresponding credential to the controller.
api, err := c.addCloudAPIFunc()
if err != nil {
return err
}
defer api.Close()
err = api.AddCloud(*newCloud)
if err != nil && params.ErrCode(err) != params.CodeAlreadyExists {
return err
}
// Add a credential for the newly added cloud.
err = c.addCredentialToController(ctxt, *newCloud, api)
if err != nil {
return err
}
ctxt.Infof("Cloud %q added to controller %q.", c.Cloud, c.controllerName)
return nil
}
|
go
|
{
"resource": ""
}
|
q3683
|
addCertificate
|
train
|
func addCertificate(data []byte) (string, []byte, error) {
vals, err := ensureStringMaps(string(data))
if err != nil {
return "", nil, err
}
name, ok := vals[jujucloud.CertFilenameKey]
if !ok {
return "", nil, errors.NotFoundf("yaml has no certificate file")
}
filename := name.(string)
if ok && filename != "" {
out, err := ioutil.ReadFile(filename)
if err != nil {
return filename, nil, err
}
certificate := string(out)
if _, err := cert.ParseCert(certificate); err != nil {
return filename, nil, errors.Annotate(err, "bad cloud CA certificate")
}
vals["ca-certificates"] = []string{certificate}
} else {
return filename, nil, errors.NotFoundf("yaml has no certificate file")
}
alt, err := yaml.Marshal(vals)
return filename, alt, err
}
|
go
|
{
"resource": ""
}
|
q3684
|
addableCloudProviders
|
train
|
func addableCloudProviders() (providers []string, unsupported []string, _ error) {
allproviders := environs.RegisteredProviders()
for _, name := range allproviders {
provider, err := environs.Provider(name)
if err != nil {
// should be impossible
return nil, nil, errors.Trace(err)
}
if provider.CloudSchema() != nil {
providers = append(providers, name)
} else {
unsupported = append(unsupported, name)
}
}
sort.Strings(providers)
return providers, unsupported, nil
}
|
go
|
{
"resource": ""
}
|
q3685
|
nameExists
|
train
|
func nameExists(name string, public map[string]jujucloud.Cloud) (string, error) {
if _, ok := public[name]; ok {
return fmt.Sprintf("%q is the name of a public cloud", name), nil
}
builtin, err := common.BuiltInClouds()
if err != nil {
return "", errors.Trace(err)
}
if _, ok := builtin[name]; ok {
return fmt.Sprintf("%q is the name of a built-in cloud", name), nil
}
return "", nil
}
|
go
|
{
"resource": ""
}
|
q3686
|
New
|
train
|
func New(backend Backend, resources facade.Resources, auth facade.Authorizer) (*Facade, error) {
if !auth.AuthMachineAgent() && !auth.AuthUnitAgent() && !auth.AuthApplicationAgent() {
return nil, common.ErrPerm
}
return &Facade{
backend: backend,
resources: resources,
}, nil
}
|
go
|
{
"resource": ""
}
|
q3687
|
Phase
|
train
|
func (facade *Facade) Phase(entities params.Entities) params.PhaseResults {
count := len(entities.Entities)
results := params.PhaseResults{
Results: make([]params.PhaseResult, count),
}
for i, entity := range entities.Entities {
phase, err := facade.onePhase(entity.Tag)
results.Results[i].Phase = phase
results.Results[i].Error = common.ServerError(err)
}
return results
}
|
go
|
{
"resource": ""
}
|
q3688
|
onePhase
|
train
|
func (facade *Facade) onePhase(tagString string) (string, error) {
if err := facade.auth(tagString); err != nil {
return "", errors.Trace(err)
}
phase, err := facade.backend.MigrationPhase()
if err != nil {
return "", errors.Trace(err)
}
return phase.String(), nil
}
|
go
|
{
"resource": ""
}
|
q3689
|
Watch
|
train
|
func (facade *Facade) Watch(entities params.Entities) params.NotifyWatchResults {
count := len(entities.Entities)
results := params.NotifyWatchResults{
Results: make([]params.NotifyWatchResult, count),
}
for i, entity := range entities.Entities {
id, err := facade.oneWatch(entity.Tag)
results.Results[i].NotifyWatcherId = id
results.Results[i].Error = common.ServerError(err)
}
return results
}
|
go
|
{
"resource": ""
}
|
q3690
|
Manifold
|
train
|
func Manifold(config ManifoldConfig) dependency.Manifold {
return dependency.Manifold{
Inputs: []string{
config.AgentName,
config.MetricSpoolName,
config.CharmDirName,
},
Start: func(context dependency.Context) (worker.Worker, error) {
collector, err := newCollect(config, context)
if err != nil {
return nil, err
}
return spool.NewPeriodicWorker(collector.Do, collector.period, jworker.NewTimer, collector.stop), nil
},
}
}
|
go
|
{
"resource": ""
}
|
q3691
|
Do
|
train
|
func (w *collect) Do(stop <-chan struct{}) (err error) {
defer func() {
// See bug https://pad/lv/1733469
// If this function which is run by a PeriodicWorker
// exits with an error, we need to call stop() to
// ensure the listener socket is closed.
if err != nil {
w.stop()
}
}()
config := w.agent.CurrentConfig()
tag := config.Tag()
unitTag, ok := tag.(names.UnitTag)
if !ok {
return errors.Errorf("expected a unit tag, got %v", tag)
}
paths := uniter.NewWorkerPaths(config.DataDir(), unitTag, "metrics-collect")
recorder, err := newRecorder(unitTag, paths, w.metricFactory)
if errors.Cause(err) == errMetricsNotDefined {
logger.Tracef("%v", err)
return nil
} else if err != nil {
return errors.Annotate(err, "failed to instantiate metric recorder")
}
err = w.charmdir.Visit(func() error {
return w.runner.do(recorder)
}, stop)
if err == fortress.ErrAborted {
logger.Tracef("cannot execute collect-metrics: %v", err)
return nil
}
if spool.IsMetricsDataError(err) {
logger.Debugf("cannot record metrics: %v", err)
return nil
}
return err
}
|
go
|
{
"resource": ""
}
|
q3692
|
NewCredentialValidatorAPI
|
train
|
func NewCredentialValidatorAPI(ctx facade.Context) (*CredentialValidatorAPI, error) {
return internalNewCredentialValidatorAPI(NewBackend(NewStateShim(ctx.State())), ctx.Resources(), ctx.Auth())
}
|
go
|
{
"resource": ""
}
|
q3693
|
NewCredentialValidatorAPIv1
|
train
|
func NewCredentialValidatorAPIv1(ctx facade.Context) (*CredentialValidatorAPIV1, error) {
v2, err := NewCredentialValidatorAPI(ctx)
if err != nil {
return nil, err
}
return &CredentialValidatorAPIV1{v2}, nil
}
|
go
|
{
"resource": ""
}
|
q3694
|
WatchCredential
|
train
|
func (api *CredentialValidatorAPI) WatchCredential(tag params.Entity) (params.NotifyWatchResult, error) {
fail := func(failure error) (params.NotifyWatchResult, error) {
return params.NotifyWatchResult{}, common.ServerError(failure)
}
credentialTag, err := names.ParseCloudCredentialTag(tag.Tag)
if err != nil {
return fail(err)
}
// Is credential used by the model that has created this backend?
isUsed, err := api.backend.ModelUsesCredential(credentialTag)
if err != nil {
return fail(err)
}
if !isUsed {
return fail(common.ErrPerm)
}
result := params.NotifyWatchResult{}
watch := api.backend.WatchCredential(credentialTag)
// Consume the initial event. Technically, API calls to Watch
// 'transmit' the initial event in the Watch response. But
// NotifyWatchers have no state to transmit.
if _, ok := <-watch.Changes(); ok {
result.NotifyWatcherId = api.resources.Register(watch)
} else {
err = watcher.EnsureErr(watch)
result.Error = common.ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3695
|
ModelCredential
|
train
|
func (api *CredentialValidatorAPI) ModelCredential() (params.ModelCredential, error) {
c, err := api.backend.ModelCredential()
if err != nil {
return params.ModelCredential{}, common.ServerError(err)
}
return params.ModelCredential{
Model: c.Model.String(),
CloudCredential: c.Credential.String(),
Exists: c.Exists,
Valid: c.Valid,
}, nil
}
|
go
|
{
"resource": ""
}
|
q3696
|
WatchModelCredential
|
train
|
func (api *CredentialValidatorAPI) WatchModelCredential() (params.NotifyWatchResult, error) {
result := params.NotifyWatchResult{}
watch, err := api.backend.WatchModelCredential()
if err != nil {
return result, common.ServerError(err)
}
// Consume the initial event. Technically, API calls to Watch
// 'transmit' the initial event in the Watch response. But
// NotifyWatchers have no state to transmit.
if _, ok := <-watch.Changes(); ok {
result.NotifyWatcherId = api.resources.Register(watch)
} else {
err = watcher.EnsureErr(watch)
result.Error = common.ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3697
|
NewFacade
|
train
|
func NewFacade(
resources facade.Resources,
authorizer facade.Authorizer,
st CAASUnitProvisionerState,
sb StorageBackend,
db DeviceBackend,
storagePoolManager poolmanager.PoolManager,
registry storage.ProviderRegistry,
clock clock.Clock,
) (*Facade, error) {
if !authorizer.AuthController() {
return nil, common.ErrPerm
}
return &Facade{
LifeGetter: common.NewLifeGetter(
st, common.AuthAny(
common.AuthFuncForTagKind(names.ApplicationTagKind),
common.AuthFuncForTagKind(names.UnitTagKind),
),
),
resources: resources,
state: st,
storage: sb,
devices: db,
storagePoolManager: storagePoolManager,
registry: registry,
clock: clock,
}, nil
}
|
go
|
{
"resource": ""
}
|
q3698
|
WatchApplicationsScale
|
train
|
func (f *Facade) WatchApplicationsScale(args params.Entities) (params.NotifyWatchResults, error) {
results := params.NotifyWatchResults{
Results: make([]params.NotifyWatchResult, len(args.Entities)),
}
for i, arg := range args.Entities {
id, err := f.watchApplicationScale(arg.Tag)
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
results.Results[i].NotifyWatcherId = id
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q3699
|
WatchPodSpec
|
train
|
func (f *Facade) WatchPodSpec(args params.Entities) (params.NotifyWatchResults, error) {
model, err := f.state.Model()
if err != nil {
return params.NotifyWatchResults{}, errors.Trace(err)
}
results := params.NotifyWatchResults{
Results: make([]params.NotifyWatchResult, len(args.Entities)),
}
for i, arg := range args.Entities {
id, err := f.watchPodSpec(model, arg.Tag)
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
results.Results[i].NotifyWatcherId = id
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.