_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q3800
|
InstanceStatus
|
train
|
func (m *Machine) InstanceStatus() (status.Status, string, error) {
var results params.StatusResults
args := params.Entities{Entities: []params.Entity{
{Tag: m.tag.String()},
}}
err := m.st.facade.FacadeCall("InstanceStatus", args, &results)
if err != nil {
return "", "", err
}
if len(results.Results) != 1 {
return "", "", fmt.Errorf("expected 1 result, got %d", len(results.Results))
}
result := results.Results[0]
if result.Error != nil {
return "", "", result.Error
}
// TODO(perrito666) add status validation.
return status.Status(result.Status), result.Info, nil
}
|
go
|
{
"resource": ""
}
|
q3801
|
SetModificationStatus
|
train
|
func (m *Machine) SetModificationStatus(status status.Status, info string, data map[string]interface{}) error {
var result params.ErrorResults
args := params.SetStatus{
Entities: []params.EntityStatusArgs{
{Tag: m.tag.String(), Status: status.String(), Info: info, Data: data},
},
}
err := m.st.facade.FacadeCall("SetModificationStatus", args, &result)
if err != nil {
return err
}
return result.OneError()
}
|
go
|
{
"resource": ""
}
|
q3802
|
AvailabilityZone
|
train
|
func (m *Machine) AvailabilityZone() (string, error) {
var results params.StringResults
args := params.Entities{
Entities: []params.Entity{{Tag: m.tag.String()}},
}
err := m.st.facade.FacadeCall("AvailabilityZone", args, &results)
if err != nil {
return "", err
}
if len(results.Results) != 1 {
return "", fmt.Errorf("expected 1 result, got %d", len(results.Results))
}
result := results.Results[0]
if result.Error != nil {
return "", result.Error
}
return result.Result, nil
}
|
go
|
{
"resource": ""
}
|
q3803
|
SetInstanceInfo
|
train
|
func (m *Machine) SetInstanceInfo(
id instance.Id, displayName string, nonce string, characteristics *instance.HardwareCharacteristics,
networkConfig []params.NetworkConfig, volumes []params.Volume,
volumeAttachments map[string]params.VolumeAttachmentInfo, charmProfiles []string,
) error {
var result params.ErrorResults
args := params.InstancesInfo{
Machines: []params.InstanceInfo{{
Tag: m.tag.String(),
InstanceId: id,
DisplayName: displayName,
Nonce: nonce,
Characteristics: characteristics,
Volumes: volumes,
VolumeAttachments: volumeAttachments,
NetworkConfig: networkConfig,
CharmProfiles: charmProfiles,
}},
}
err := m.st.facade.FacadeCall("SetInstanceInfo", args, &result)
if err != nil {
return err
}
return result.OneError()
}
|
go
|
{
"resource": ""
}
|
q3804
|
WatchContainers
|
train
|
func (m *Machine) WatchContainers(ctype instance.ContainerType) (watcher.StringsWatcher, error) {
if string(ctype) == "" {
return nil, fmt.Errorf("container type must be specified")
}
supported := false
for _, c := range instance.ContainerTypes {
if ctype == c {
supported = true
break
}
}
if !supported {
return nil, fmt.Errorf("unsupported container type %q", ctype)
}
var results params.StringsWatchResults
args := params.WatchContainers{
Params: []params.WatchContainer{
{MachineTag: m.tag.String(), ContainerType: string(ctype)},
},
}
err := m.st.facade.FacadeCall("WatchContainers", args, &results)
if err != nil {
return nil, err
}
if len(results.Results) != 1 {
return nil, fmt.Errorf("expected 1 result, got %d", len(results.Results))
}
result := results.Results[0]
if result.Error != nil {
return nil, result.Error
}
w := apiwatcher.NewStringsWatcher(m.st.facade.RawAPICaller(), result)
return w, nil
}
|
go
|
{
"resource": ""
}
|
q3805
|
WatchAllContainers
|
train
|
func (m *Machine) WatchAllContainers() (watcher.StringsWatcher, error) {
var results params.StringsWatchResults
args := params.WatchContainers{
Params: []params.WatchContainer{
{MachineTag: m.tag.String()},
},
}
err := m.st.facade.FacadeCall("WatchContainers", args, &results)
if err != nil {
return nil, err
}
if len(results.Results) != 1 {
return nil, fmt.Errorf("expected 1 result, got %d", len(results.Results))
}
result := results.Results[0]
if result.Error != nil {
return nil, result.Error
}
w := apiwatcher.NewStringsWatcher(m.st.facade.RawAPICaller(), result)
return w, nil
}
|
go
|
{
"resource": ""
}
|
q3806
|
SetSupportedContainers
|
train
|
func (m *Machine) SetSupportedContainers(containerTypes ...instance.ContainerType) error {
var results params.ErrorResults
args := params.MachineContainersParams{
Params: []params.MachineContainers{
{MachineTag: m.tag.String(), ContainerTypes: containerTypes},
},
}
err := m.st.facade.FacadeCall("SetSupportedContainers", args, &results)
if err != nil {
return err
}
if len(results.Results) != 1 {
return fmt.Errorf("expected 1 result, got %d", len(results.Results))
}
apiError := results.Results[0].Error
if apiError != nil {
return apiError
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3807
|
SupportedContainers
|
train
|
func (m *Machine) SupportedContainers() ([]instance.ContainerType, bool, error) {
var results params.MachineContainerResults
args := params.Entities{
Entities: []params.Entity{
{Tag: m.tag.String()},
},
}
err := m.st.facade.FacadeCall("SupportedContainers", args, &results)
if err != nil {
return nil, false, err
}
if len(results.Results) != 1 {
return nil, false, errors.Errorf("expected 1 result, got %d", len(results.Results))
}
apiError := results.Results[0].Error
if apiError != nil {
return nil, false, apiError
}
result := results.Results[0]
return result.ContainerTypes, result.Determined, nil
}
|
go
|
{
"resource": ""
}
|
q3808
|
SetCharmProfiles
|
train
|
func (m *Machine) SetCharmProfiles(profiles []string) error {
var results params.ErrorResults
args := params.SetProfileArgs{
Args: []params.SetProfileArg{
{
Entity: params.Entity{Tag: m.tag.String()},
Profiles: profiles,
},
},
}
err := m.st.facade.FacadeCall("SetCharmProfiles", args, &results)
if err != nil {
return err
}
if len(results.Results) != 1 {
return fmt.Errorf("expected 1 result, got %d", len(results.Results))
}
result := results.Results[0]
if result.Error != nil {
return result.Error
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3809
|
Init
|
train
|
func (c *agreeCommand) Init(args []string) error {
if len(args) < 1 {
return errors.New("missing arguments")
}
for _, t := range args {
termId, err := charm.ParseTerm(t)
if err != nil {
return errors.Annotate(err, "invalid term format")
}
if termId.Revision == 0 {
return errors.Errorf("must specify a valid term revision %q", t)
}
c.terms = append(c.terms, term{owner: termId.Owner, name: termId.Name, revision: termId.Revision})
c.termIds = append(c.termIds, t)
}
if len(c.terms) == 0 {
return errors.New("must specify a valid term revision")
}
return c.CommandBase.Init([]string{})
}
|
go
|
{
"resource": ""
}
|
q3810
|
LeadershipClaimer
|
train
|
func (st *State) LeadershipClaimer() leadership.Claimer {
return leadershipClaimer{
lazyLeaseClaimer{func() (lease.Claimer, error) {
manager := st.workers.leadershipManager()
return manager.Claimer(applicationLeadershipNamespace, st.modelUUID())
}},
}
}
|
go
|
{
"resource": ""
}
|
q3811
|
LeadershipChecker
|
train
|
func (st *State) LeadershipChecker() leadership.Checker {
return leadershipChecker{
lazyLeaseChecker{func() (lease.Checker, error) {
manager := st.workers.leadershipManager()
return manager.Checker(applicationLeadershipNamespace, st.modelUUID())
}},
}
}
|
go
|
{
"resource": ""
}
|
q3812
|
buildTxnWithLeadership
|
train
|
func buildTxnWithLeadership(buildTxn jujutxn.TransactionSource, token leadership.Token) jujutxn.TransactionSource {
return func(attempt int) ([]txn.Op, error) {
var prereqs []txn.Op
if err := token.Check(attempt, &prereqs); err != nil {
return nil, errors.Annotatef(err, "prerequisites failed")
}
ops, err := buildTxn(attempt)
if err == jujutxn.ErrNoOperations {
return nil, jujutxn.ErrNoOperations
} else if err != nil {
return nil, errors.Trace(err)
}
return append(prereqs, ops...), nil
}
}
|
go
|
{
"resource": ""
}
|
q3813
|
InitDbLogs
|
train
|
func InitDbLogs(session *mgo.Session, modelUUID string) error {
logsColl := session.DB(logsDB).C(logCollectionName(modelUUID))
for _, key := range logIndexes {
err := logsColl.EnsureIndex(mgo.Index{Key: key})
if err != nil {
return errors.Annotate(err, "cannot create index for logs collection")
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3814
|
NewLastSentLogTracker
|
train
|
func NewLastSentLogTracker(st ModelSessioner, modelUUID, sink string) *LastSentLogTracker {
session := st.MongoSession().Copy()
return &LastSentLogTracker{
id: fmt.Sprintf("%s#%s", modelUUID, sink),
model: modelUUID,
sink: sink,
session: session,
}
}
|
go
|
{
"resource": ""
}
|
q3815
|
Set
|
train
|
func (logger *LastSentLogTracker) Set(recID, recTimestamp int64) error {
collection := logger.session.DB(logsDB).C(forwardedC)
_, err := collection.UpsertId(
logger.id,
lastSentDoc{
ID: logger.id,
ModelUUID: logger.model,
Sink: logger.sink,
RecordID: recID,
RecordTimestamp: recTimestamp,
},
)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3816
|
Get
|
train
|
func (logger *LastSentLogTracker) Get() (int64, int64, error) {
collection := logger.session.DB(logsDB).C(forwardedC)
var doc lastSentDoc
err := collection.FindId(logger.id).One(&doc)
if err != nil {
if err == mgo.ErrNotFound {
return 0, 0, errors.Trace(ErrNeverForwarded)
}
return 0, 0, errors.Trace(err)
}
return doc.RecordID, doc.RecordTimestamp, nil
}
|
go
|
{
"resource": ""
}
|
q3817
|
Log
|
train
|
func (logger *DbLogger) Log(records []LogRecord) error {
for _, r := range records {
if err := validateInputLogRecord(r); err != nil {
return errors.Annotate(err, "validating input log record")
}
}
bulk := logger.logsColl.Bulk()
for _, r := range records {
var versionString string
if r.Version != version.Zero {
versionString = r.Version.String()
}
bulk.Insert(&logDoc{
// TODO(axw) Use a controller-global int
// sequence for Id, so we can order by
// insertion.
Id: bson.NewObjectId(),
Time: r.Time.UnixNano(),
Entity: r.Entity.String(),
Version: versionString,
Module: r.Module,
Location: r.Location,
Level: int(r.Level),
Message: r.Message,
})
}
_, err := bulk.Run()
return errors.Annotatef(err, "inserting %d log record(s)", len(records))
}
|
go
|
{
"resource": ""
}
|
q3818
|
Close
|
train
|
func (logger *DbLogger) Close() {
if logger.logsColl != nil {
logger.logsColl.Database.Session.Close()
}
}
|
go
|
{
"resource": ""
}
|
q3819
|
NewLogTailer
|
train
|
func NewLogTailer(st LogTailerState, params LogTailerParams) (LogTailer, error) {
session := st.MongoSession().Copy()
t := &logTailer{
modelUUID: st.ModelUUID(),
session: session,
logsColl: session.DB(logsDB).C(logCollectionName(st.ModelUUID())).With(session),
params: params,
logCh: make(chan *LogRecord),
recentIds: newRecentIdTracker(maxRecentLogIds),
maxInitialLines: maxInitialLines,
}
t.tomb.Go(func() error {
defer close(t.logCh)
defer session.Close()
err := t.loop()
return errors.Cause(err)
})
return t, nil
}
|
go
|
{
"resource": ""
}
|
q3820
|
Stop
|
train
|
func (t *logTailer) Stop() error {
t.tomb.Kill(nil)
return t.tomb.Wait()
}
|
go
|
{
"resource": ""
}
|
q3821
|
initLogsSession
|
train
|
func initLogsSession(st ModelSessioner) (*mgo.Session, *mgo.Collection) {
session, db := initLogsSessionDB(st)
return session, db.C(logCollectionName(st.ModelUUID()))
}
|
go
|
{
"resource": ""
}
|
q3822
|
getCollectionTotalMB
|
train
|
func getCollectionTotalMB(colls map[string]*mgo.Collection) (int, error) {
total := 0
for _, coll := range colls {
size, err := getCollectionMB(coll)
if err != nil {
return 0, errors.Trace(err)
}
total += size
}
return total, nil
}
|
go
|
{
"resource": ""
}
|
q3823
|
getLogCollections
|
train
|
func getLogCollections(db *mgo.Database) (map[string]*mgo.Collection, error) {
result := make(map[string]*mgo.Collection)
names, err := db.CollectionNames()
if err != nil {
return nil, errors.Trace(err)
}
for _, name := range names {
if !strings.HasPrefix(name, logsCPrefix) {
continue
}
uuid := name[len(logsCPrefix):]
result[uuid] = db.C(name)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3824
|
findModelWithMostLogs
|
train
|
func findModelWithMostLogs(colls map[string]*mgo.Collection) (string, int, error) {
var maxModelUUID string
var maxCount int
for modelUUID, coll := range colls {
count, err := getRowCountForCollection(coll)
if err != nil {
return "", -1, errors.Trace(err)
}
if count > maxCount {
maxModelUUID = modelUUID
maxCount = count
}
}
return maxModelUUID, maxCount, nil
}
|
go
|
{
"resource": ""
}
|
q3825
|
getRowCountForCollection
|
train
|
func getRowCountForCollection(coll *mgo.Collection) (int, error) {
count, err := coll.Count()
if err != nil {
return -1, errors.Annotate(err, "failed to get log count")
}
return count, nil
}
|
go
|
{
"resource": ""
}
|
q3826
|
SetUpGroups
|
train
|
func (c *legacyNovaFirewaller) SetUpGroups(ctx context.ProviderCallContext, controllerUUID, machineId string, apiPort int) ([]string, error) {
jujuGroup, err := c.setUpGlobalGroup(ctx, c.jujuGroupName(controllerUUID), apiPort)
if err != nil {
return nil, errors.Trace(err)
}
var machineGroup nova.SecurityGroup
switch c.environ.Config().FirewallMode() {
case config.FwInstance:
machineGroup, err = c.ensureGroup(ctx, c.machineGroupName(controllerUUID, machineId), nil)
case config.FwGlobal:
machineGroup, err = c.ensureGroup(ctx, c.globalGroupName(controllerUUID), nil)
}
if err != nil {
return nil, errors.Trace(err)
}
groupNames := []string{jujuGroup.Name, machineGroup.Name}
if c.environ.ecfg().useDefaultSecurityGroup() {
groupNames = append(groupNames, "default")
}
return groupNames, nil
}
|
go
|
{
"resource": ""
}
|
q3827
|
ensureGroup
|
train
|
func (c *legacyNovaFirewaller) ensureGroup(ctx context.ProviderCallContext, name string, rules []nova.RuleInfo) (nova.SecurityGroup, error) {
novaClient := c.environ.nova()
// First attempt to look up an existing group by name.
group, err := novaClient.SecurityGroupByName(name)
if err == nil {
common.HandleCredentialError(IsAuthorisationFailure, err, ctx)
// Group exists, so assume it is correctly set up and return it.
// TODO(jam): 2013-09-18 http://pad.lv/121795
// We really should verify the group is set up correctly,
// because deleting and re-creating environments can get us bad
// groups (especially if they were set up under Python)
return *group, nil
}
// Doesn't exist, so try and create it.
group, err = novaClient.CreateSecurityGroup(name, "juju group")
if err != nil {
if !gooseerrors.IsDuplicateValue(err) {
return legacyZeroGroup, err
} else {
// We just tried to create a duplicate group, so load the existing group.
group, err = novaClient.SecurityGroupByName(name)
if err != nil {
common.HandleCredentialError(IsAuthorisationFailure, err, ctx)
return legacyZeroGroup, err
}
return *group, nil
}
}
// The new group is created so now add the rules.
group.Rules = make([]nova.SecurityGroupRule, len(rules))
for i, rule := range rules {
rule.ParentGroupId = group.Id
if rule.Cidr == "" {
// http://pad.lv/1226996 Rules that don't have a CIDR
// are meant to apply only to this group. If you don't
// supply CIDR or GroupId then openstack assumes you
// mean CIDR=0.0.0.0/0
rule.GroupId = &group.Id
}
groupRule, err := novaClient.CreateSecurityGroupRule(rule)
if err != nil && !gooseerrors.IsDuplicateValue(err) {
common.HandleCredentialError(IsAuthorisationFailure, err, ctx)
return legacyZeroGroup, err
}
group.Rules[i] = *groupRule
}
return *group, nil
}
|
go
|
{
"resource": ""
}
|
q3828
|
DeleteAllModelGroups
|
train
|
func (c *legacyNovaFirewaller) DeleteAllModelGroups(ctx context.ProviderCallContext) error {
return deleteSecurityGroupsMatchingName(ctx, c.deleteSecurityGroups, c.jujuGroupRegexp())
}
|
go
|
{
"resource": ""
}
|
q3829
|
DeleteGroups
|
train
|
func (c *legacyNovaFirewaller) DeleteGroups(ctx context.ProviderCallContext, names ...string) error {
return deleteSecurityGroupsOneOfNames(ctx, c.deleteSecurityGroups, names...)
}
|
go
|
{
"resource": ""
}
|
q3830
|
legacyRuleMatchesPortRange
|
train
|
func legacyRuleMatchesPortRange(rule nova.SecurityGroupRule, portRange network.IngressRule) bool {
if rule.IPProtocol == nil || rule.FromPort == nil || rule.ToPort == nil {
return false
}
return *rule.IPProtocol == portRange.Protocol &&
*rule.FromPort == portRange.FromPort &&
*rule.ToPort == portRange.ToPort
}
|
go
|
{
"resource": ""
}
|
q3831
|
List
|
train
|
func (a *API) List(args params.BackupsListArgs) (params.BackupsListResult, error) {
var result params.BackupsListResult
backups, closer := newBackups(a.backend)
defer closer.Close()
metaList, err := backups.List()
if err != nil {
return result, errors.Trace(err)
}
result.List = make([]params.BackupsMetadataResult, len(metaList))
for i, meta := range metaList {
result.List[i] = CreateResult(meta, "")
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3832
|
AuthorisedKeys
|
train
|
func (st *State) AuthorisedKeys(tag names.MachineTag) ([]string, error) {
var results params.StringsResults
args := params.Entities{
Entities: []params.Entity{{Tag: tag.String()}},
}
err := st.facade.FacadeCall("AuthorisedKeys", args, &results)
if err != nil {
// TODO: Not directly tested
return nil, err
}
if len(results.Results) != 1 {
// TODO: Not directly tested
return nil, errors.Errorf("expected 1 result, got %d", len(results.Results))
}
result := results.Results[0]
if err := result.Error; err != nil {
return nil, err
}
return result.Result, nil
}
|
go
|
{
"resource": ""
}
|
q3833
|
WatchAuthorisedKeys
|
train
|
func (st *State) WatchAuthorisedKeys(tag names.MachineTag) (watcher.NotifyWatcher, error) {
var results params.NotifyWatchResults
args := params.Entities{
Entities: []params.Entity{{Tag: tag.String()}},
}
err := st.facade.FacadeCall("WatchAuthorisedKeys", args, &results)
if err != nil {
// TODO: Not directly tested
return nil, err
}
if len(results.Results) != 1 {
// TODO: Not directly tested
return nil, errors.Errorf("expected 1 result, got %d", len(results.Results))
}
result := results.Results[0]
if result.Error != nil {
// TODO: Not directly tested
return nil, result.Error
}
w := apiwatcher.NewNotifyWatcher(st.facade.RawAPICaller(), result)
return w, nil
}
|
go
|
{
"resource": ""
}
|
q3834
|
precheckInstance
|
train
|
func (st *State) precheckInstance(
series string,
cons constraints.Value,
placement string,
volumeAttachments []storage.VolumeAttachmentParams,
) error {
if st.policy == nil {
return nil
}
prechecker, err := st.policy.Prechecker()
if errors.IsNotImplemented(err) {
return nil
} else if err != nil {
return err
}
if prechecker == nil {
return errors.New("policy returned nil prechecker without an error")
}
return prechecker.PrecheckInstance(
CallContext(st),
environs.PrecheckInstanceParams{
Series: series,
Constraints: cons,
Placement: placement,
VolumeAttachments: volumeAttachments,
})
}
|
go
|
{
"resource": ""
}
|
q3835
|
ResolveConstraints
|
train
|
func (st *State) ResolveConstraints(cons constraints.Value) (constraints.Value, error) {
validator, err := st.constraintsValidator()
if err != nil {
return constraints.Value{}, err
}
modelCons, err := st.ModelConstraints()
if err != nil {
return constraints.Value{}, err
}
return validator.Merge(modelCons, cons)
}
|
go
|
{
"resource": ""
}
|
q3836
|
validateConstraints
|
train
|
func (st *State) validateConstraints(cons constraints.Value) ([]string, error) {
validator, err := st.constraintsValidator()
if err != nil {
return nil, err
}
return validator.Validate(cons)
}
|
go
|
{
"resource": ""
}
|
q3837
|
validate
|
train
|
func (st *State) validate(cfg, old *config.Config) (valid *config.Config, err error) {
if st.policy == nil {
return cfg, nil
}
configValidator, err := st.policy.ConfigValidator()
if errors.IsNotImplemented(err) {
return cfg, nil
} else if err != nil {
return nil, err
}
if configValidator == nil {
return nil, errors.New("policy returned nil configValidator without an error")
}
return configValidator.Validate(cfg, old)
}
|
go
|
{
"resource": ""
}
|
q3838
|
Id
|
train
|
func (m *Machine) Id() string {
m.mu.Lock()
defer m.mu.Unlock()
return m.details.Id
}
|
go
|
{
"resource": ""
}
|
q3839
|
InstanceId
|
train
|
func (m *Machine) InstanceId() (instance.Id, error) {
m.mu.Lock()
defer m.mu.Unlock()
if m.details.InstanceId == "" {
return "", errors.NotProvisionedf("machine %v", m.details.Id)
}
return instance.Id(m.details.InstanceId), nil
}
|
go
|
{
"resource": ""
}
|
q3840
|
CharmProfiles
|
train
|
func (m *Machine) CharmProfiles() []string {
m.mu.Lock()
defer m.mu.Unlock()
return m.details.CharmProfiles
}
|
go
|
{
"resource": ""
}
|
q3841
|
Units
|
train
|
func (m *Machine) Units() ([]*Unit, error) {
m.mu.Lock()
defer m.mu.Unlock()
result := make([]*Unit, 0)
for unitName, unit := range m.model.units {
if unit.details.MachineId == m.details.Id {
result = append(result, unit)
}
if unit.details.Subordinate {
principalUnit, found := m.model.units[unit.details.Principal]
if !found {
return result, errors.NotFoundf("principal unit %q for subordinate %s", unit.details.Principal, unitName)
}
if principalUnit.details.MachineId == m.details.Id {
result = append(result, unit)
}
}
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3842
|
newInstanceType
|
train
|
func newInstanceType(size compute.VirtualMachineSize) instances.InstanceType {
// We're not doing real costs for now; just made-up, relative
// costs, to ensure we choose the right VMs given matching
// constraints. This was based on the pricing for West US,
// and assumes that all regions have the same relative costs.
//
// DS is the same price as D, but is targeted at Premium Storage.
// Likewise for GS and G. We put the premium storage variants
// directly after their non-premium counterparts.
machineSizeCost := []string{
"Standard_A0",
"Standard_A1",
"Standard_D1",
"Standard_DS1",
"Standard_D1_v2",
"Standard_A2",
"Standard_D2",
"Standard_DS2",
"Standard_D2_v2",
"Standard_D11",
"Standard_DS11",
"Standard_D11_v2",
"Standard_A3",
"Standard_D3",
"Standard_DS3",
"Standard_D3_v2",
"Standard_D12",
"Standard_DS12",
"Standard_D12_v2",
"Standard_A5", // Yes, A5 is cheaper than A4.
"Standard_A4",
"Standard_A6",
"Standard_G1",
"Standard_GS1",
"Standard_D4",
"Standard_DS4",
"Standard_D4_v2",
"Standard_D13",
"Standard_DS13",
"Standard_D13_v2",
"Standard_A7",
"Standard_A10",
"Standard_G2",
"Standard_GS2",
"Standard_D5_v2",
"Standard_D14",
"Standard_DS14",
"Standard_D14_v2",
"Standard_A8",
"Standard_A11",
"Standard_G3",
"Standard_GS3",
"Standard_A9",
"Standard_G4",
"Standard_GS4",
"Standard_GS5",
"Standard_G5",
// Basic instances are less capable than standard
// ones, so we don't want to be providing them as
// a default. This is achieved by costing them at
// a higher price, even though they are cheaper
// in reality.
"Basic_A0",
"Basic_A1",
"Basic_A2",
"Basic_A3",
"Basic_A4",
}
// Anything not in the list is more expensive that is in the list.
cost := len(machineSizeCost)
sizeName := to.String(size.Name)
for i, name := range machineSizeCost {
if sizeName == name {
cost = i
break
}
}
if cost == len(machineSizeCost) {
logger.Debugf("found unknown VM size %q", sizeName)
}
vtype := "Hyper-V"
return instances.InstanceType{
Id: sizeName,
Name: sizeName,
Arches: []string{arch.AMD64},
CpuCores: uint64(to.Int32(size.NumberOfCores)),
Mem: uint64(to.Int32(size.MemoryInMB)),
// NOTE(axw) size.OsDiskSizeInMB is the *maximum*
// OS-disk size. When we create a VM, we can create
// one that is smaller.
RootDisk: mbToMib(uint64(to.Int32(size.OsDiskSizeInMB))),
Cost: uint64(cost),
VirtType: &vtype,
// tags are not currently supported by azure
}
}
|
go
|
{
"resource": ""
}
|
q3843
|
defaultToBaselineSpec
|
train
|
func defaultToBaselineSpec(constraint constraints.Value) constraints.Value {
result := constraint
if !result.HasInstanceType() && result.Mem == nil {
var value uint64 = defaultMem
result.Mem = &value
}
return result
}
|
go
|
{
"resource": ""
}
|
q3844
|
NewFirewallerFacade
|
train
|
func NewFirewallerFacade(apiCaller base.APICaller) (FirewallerAPI, error) {
facade, err := firewaller.NewClient(apiCaller)
if err != nil {
return nil, errors.Trace(err)
}
return facade, nil
}
|
go
|
{
"resource": ""
}
|
q3845
|
NewWorker
|
train
|
func NewWorker(cfg Config) (worker.Worker, error) {
w, err := NewFirewaller(cfg)
if err != nil {
return nil, errors.Trace(err)
}
return w, nil
}
|
go
|
{
"resource": ""
}
|
q3846
|
crossmodelFirewallerFacadeFunc
|
train
|
func crossmodelFirewallerFacadeFunc(
connectionFunc apicaller.NewExternalControllerConnectionFunc,
) newCrossModelFacadeFunc {
return func(apiInfo *api.Info) (CrossModelFirewallerFacadeCloser, error) {
apiInfo.Tag = names.NewUserTag(api.AnonymousUsername)
conn, err := connectionFunc(apiInfo)
if err != nil {
return nil, errors.Trace(err)
}
facade := crossmodelrelations.NewClient(conn)
return &crossModelFirewallerFacadeCloser{facade, conn}, nil
}
}
|
go
|
{
"resource": ""
}
|
q3847
|
NewWorker
|
train
|
func NewWorker(config Config) (worker.Worker, error) {
if err := config.Validate(); err != nil {
return nil, errors.Annotate(err, "validating config")
}
updater, err := config.NewUpdater()
if err != nil {
return nil, errors.Annotate(err, "getting new updater")
}
w := &updaterWorker{
config: config,
updater: updater,
}
w.tomb.Go(w.loop)
return w, nil
}
|
go
|
{
"resource": ""
}
|
q3848
|
SetStatus
|
train
|
func (r *Relation) SetStatus(status relation.Status) error {
return r.st.setRelationStatus(r.id, status)
}
|
go
|
{
"resource": ""
}
|
q3849
|
Endpoint
|
train
|
func (r *Relation) Endpoint() (*Endpoint, error) {
// NOTE: This differs from state.Relation.Endpoint(), because when
// talking to the API, there's already an authenticated entity - the
// unit, and we can find out its application name.
result, err := r.st.relation(r.tag, r.st.unitTag)
if err != nil {
return nil, err
}
return &Endpoint{r.toCharmRelation(result.Endpoint.Relation)}, nil
}
|
go
|
{
"resource": ""
}
|
q3850
|
NewListCommand
|
train
|
func NewListCommand(deps ListDeps) modelcmd.ModelCommand {
return modelcmd.Wrap(&ListCommand{deps: deps})
}
|
go
|
{
"resource": ""
}
|
q3851
|
NewMachinerAPI
|
train
|
func NewMachinerAPI(st *state.State, resources facade.Resources, authorizer facade.Authorizer) (*MachinerAPI, error) {
if !authorizer.AuthMachineAgent() {
return nil, common.ErrPerm
}
getCanModify := func() (common.AuthFunc, error) {
return authorizer.AuthOwner, nil
}
getCanRead := func() (common.AuthFunc, error) {
return authorizer.AuthOwner, nil
}
return &MachinerAPI{
LifeGetter: common.NewLifeGetter(st, getCanRead),
StatusSetter: common.NewStatusSetter(st, getCanModify),
DeadEnsurer: common.NewDeadEnsurer(st, getCanModify),
AgentEntityWatcher: common.NewAgentEntityWatcher(st, resources, getCanRead),
APIAddresser: common.NewAPIAddresser(st, resources),
NetworkConfigAPI: networkingcommon.NewNetworkConfigAPI(st, state.CallContext(st), getCanModify),
st: st,
auth: authorizer,
getCanModify: getCanModify,
getCanRead: getCanRead,
}, nil
}
|
go
|
{
"resource": ""
}
|
q3852
|
Jobs
|
train
|
func (api *MachinerAPI) Jobs(args params.Entities) (params.JobsResults, error) {
result := params.JobsResults{
Results: make([]params.JobsResult, len(args.Entities)),
}
canRead, err := api.getCanRead()
if err != nil {
return result, err
}
for i, agent := range args.Entities {
tag, err := names.ParseMachineTag(agent.Tag)
if err != nil {
result.Results[i].Error = common.ServerError(err)
continue
}
if !canRead(tag) {
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
machine, err := api.getMachine(tag)
if err != nil {
result.Results[i].Error = common.ServerError(err)
continue
}
machineJobs := machine.Jobs()
jobs := make([]multiwatcher.MachineJob, len(machineJobs))
for i, job := range machineJobs {
jobs[i] = job.ToParams()
}
result.Results[i].Jobs = jobs
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3853
|
GetCollection
|
train
|
func (m *environMongo) GetCollection(name string) (mongo.Collection, func()) {
return m.state.db().GetCollection(name)
}
|
go
|
{
"resource": ""
}
|
q3854
|
RunTransaction
|
train
|
func (m *environMongo) RunTransaction(buildTxn jujutxn.TransactionSource) error {
return m.state.db().Run(buildTxn)
}
|
go
|
{
"resource": ""
}
|
q3855
|
SetUpgradeMongoMode
|
train
|
func (st *State) SetUpgradeMongoMode(v mongo.Version) (UpgradeMongoParams, error) {
currentInfo, err := st.ControllerInfo()
if err != nil {
return UpgradeMongoParams{}, errors.Annotate(err, "could not obtain current controller information")
}
result := UpgradeMongoParams{}
machines := []*Machine{}
for _, mID := range currentInfo.MachineIds {
m, err := st.Machine(mID)
if err != nil {
return UpgradeMongoParams{}, errors.Annotate(err, "cannot change all the replicas")
}
isMaster, err := mongo.IsMaster(st.session, m)
if err != nil {
return UpgradeMongoParams{}, errors.Annotatef(err, "cannot determine if machine %q is master", mID)
}
paddr, err := m.PublicAddress()
if err != nil {
return UpgradeMongoParams{}, errors.Annotatef(err, "cannot obtain public address for machine: %v", m)
}
tag := m.Tag()
mtag := tag.(names.MachineTag)
member := HAMember{
Tag: mtag.Id(),
PublicAddress: paddr,
Series: m.Series(),
}
if isMaster {
result.Master = member
} else {
result.Members = append(result.Members, member)
}
machines = append(machines, m)
}
rsMembers, err := replicaset.CurrentMembers(st.session)
if err != nil {
return UpgradeMongoParams{}, errors.Annotate(err, "cannot obtain current replicaset members")
}
masterRs, err := replicaset.MasterHostPort(st.session)
if err != nil {
return UpgradeMongoParams{}, errors.Annotate(err, "cannot determine master on replicaset members")
}
for _, m := range rsMembers {
if m.Address != masterRs {
result.RsMembers = append(result.RsMembers, m)
}
}
for _, m := range machines {
if err := m.SetStopMongoUntilVersion(v); err != nil {
return UpgradeMongoParams{}, errors.Annotate(err, "cannot trigger replica shutdown")
}
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3856
|
ResumeReplication
|
train
|
func (st *State) ResumeReplication(members []replicaset.Member) error {
return replicaset.Add(st.session, members...)
}
|
go
|
{
"resource": ""
}
|
q3857
|
NewCleanerAPI
|
train
|
func NewCleanerAPI(
st *state.State,
res facade.Resources,
authorizer facade.Authorizer,
) (*CleanerAPI, error) {
if !authorizer.AuthController() {
return nil, common.ErrPerm
}
return &CleanerAPI{
st: getState(st),
resources: res,
}, nil
}
|
go
|
{
"resource": ""
}
|
q3858
|
WatchCleanups
|
train
|
func (api *CleanerAPI) WatchCleanups() (params.NotifyWatchResult, error) {
watch := api.st.WatchCleanups()
if _, ok := <-watch.Changes(); ok {
return params.NotifyWatchResult{
NotifyWatcherId: api.resources.Register(watch),
}, nil
}
return params.NotifyWatchResult{
Error: common.ServerError(watcher.EnsureErr(watch)),
}, nil
}
|
go
|
{
"resource": ""
}
|
q3859
|
ChangeAgentTools
|
train
|
func (e *UpgradeReadyError) ChangeAgentTools() error {
agentTools, err := tools.ChangeAgentTools(e.DataDir, e.AgentName, e.NewTools)
if err != nil {
return err
}
logger.Infof("upgraded from %v to %v (%q)", e.OldTools, agentTools.Version, agentTools.URL)
return nil
}
|
go
|
{
"resource": ""
}
|
q3860
|
IsAuthorisationFailure
|
train
|
func IsAuthorisationFailure(err error) bool {
if err == nil {
return false
}
serviceError, ok := err.(ociCommon.ServiceError)
if !ok {
// Just to double check, also try the SDK's
// implementation. This isn't checked first, because
// it is hard to test.
serviceError, ok = ociCommon.IsServiceError(err)
}
if ok && authErrorCodes.Contains(serviceError.GetCode()) {
return true
}
return false
}
|
go
|
{
"resource": ""
}
|
q3861
|
HandleCredentialError
|
train
|
func HandleCredentialError(err error, ctx context.ProviderCallContext) {
common.HandleCredentialError(IsAuthorisationFailure, err, ctx)
}
|
go
|
{
"resource": ""
}
|
q3862
|
Manifold
|
train
|
func Manifold(config ManifoldConfig) dependency.Manifold {
openLogStream := config.OpenLogStream
if openLogStream == nil {
openLogStream = func(caller base.APICaller, cfg params.LogStreamConfig, controllerUUID string) (LogStream, error) {
return logstream.Open(caller, cfg, controllerUUID)
}
}
openForwarder := config.OpenLogForwarder
if openForwarder == nil {
openForwarder = NewLogForwarder
}
return dependency.Manifold{
Inputs: []string{
config.APICallerName,
},
Start: func(context dependency.Context) (worker.Worker, error) {
var apiCaller base.APICaller
if err := context.Get(config.APICallerName, &apiCaller); err != nil {
return nil, errors.Trace(err)
}
agentFacade, err := apiagent.NewState(apiCaller)
if err != nil {
return nil, errors.Trace(err)
}
controllerCfg, err := agentFacade.ControllerConfig()
if err != nil {
return nil, errors.Annotate(err, "cannot read controller config")
}
orchestrator, err := newOrchestratorForController(OrchestratorArgs{
ControllerUUID: controllerCfg.ControllerUUID(),
LogForwardConfig: agentFacade,
Caller: apiCaller,
Sinks: config.Sinks,
OpenLogStream: openLogStream,
OpenLogForwarder: openForwarder,
})
return orchestrator, errors.Annotate(err, "creating log forwarding orchestrator")
},
}
}
|
go
|
{
"resource": ""
}
|
q3863
|
Open
|
train
|
func (e *EnvironProvider) Open(params environs.OpenParams) (environs.Environ, error) {
logger.Infof("opening model %q", params.Config.Name())
if err := validateCloudSpec(params.Cloud); err != nil {
return nil, errors.Trace(err)
}
creds := params.Cloud.Credential.Attributes()
jujuConfig := common.JujuConfigProvider{
Key: []byte(creds["key"]),
Fingerprint: creds["fingerprint"],
Passphrase: creds["pass-phrase"],
Tenancy: creds["tenancy"],
User: creds["user"],
OCIRegion: creds["region"],
}
provider, err := jujuConfig.Config()
if err != nil {
return nil, errors.Trace(err)
}
compute, err := ociCore.NewComputeClientWithConfigurationProvider(provider)
if err != nil {
return nil, errors.Trace(err)
}
networking, err := ociCore.NewVirtualNetworkClientWithConfigurationProvider(provider)
if err != nil {
return nil, errors.Trace(err)
}
storage, err := ociCore.NewBlockstorageClientWithConfigurationProvider(provider)
if err != nil {
return nil, errors.Trace(err)
}
identity, err := ociIdentity.NewIdentityClientWithConfigurationProvider(provider)
if err != nil {
return nil, errors.Trace(err)
}
env := &Environ{
Compute: compute,
Networking: networking,
Storage: storage,
Firewall: networking,
Identity: identity,
ociConfig: provider,
clock: clock.WallClock,
p: e,
}
if err := env.SetConfig(params.Config); err != nil {
return nil, err
}
env.namespace, err = instance.NewNamespace(env.Config().UUID())
cfg := env.ecfg()
if cfg.compartmentID() == nil {
return nil, errors.New("compartment-id may not be empty")
}
addressSpace := cfg.addressSpace()
if _, ipNET, err := net.ParseCIDR(*addressSpace); err == nil {
size, _ := ipNET.Mask.Size()
if size > 16 {
return nil, errors.Errorf("configured subnet (%q) is not large enough. Please use a prefix length in the range /8 to /16. Current prefix length is /%d", *addressSpace, size)
}
} else {
return nil, errors.Trace(err)
}
return env, nil
}
|
go
|
{
"resource": ""
}
|
q3864
|
Validate
|
train
|
func (e EnvironProvider) Validate(cfg, old *config.Config) (valid *config.Config, err error) {
if err := config.Validate(cfg, old); err != nil {
return nil, err
}
newAttrs, err := cfg.ValidateUnknownAttrs(
configFields, configDefaults,
)
if err != nil {
return nil, err
}
return cfg.Apply(newAttrs)
}
|
go
|
{
"resource": ""
}
|
q3865
|
NewMockLinkLayerDevice
|
train
|
func NewMockLinkLayerDevice(ctrl *gomock.Controller) *MockLinkLayerDevice {
mock := &MockLinkLayerDevice{ctrl: ctrl}
mock.recorder = &MockLinkLayerDeviceMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q3866
|
Addresses
|
train
|
func (m *MockLinkLayerDevice) Addresses() ([]*state.Address, error) {
ret := m.ctrl.Call(m, "Addresses")
ret0, _ := ret[0].([]*state.Address)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3867
|
Addresses
|
train
|
func (mr *MockLinkLayerDeviceMockRecorder) Addresses() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Addresses", reflect.TypeOf((*MockLinkLayerDevice)(nil).Addresses))
}
|
go
|
{
"resource": ""
}
|
q3868
|
EthernetDeviceForBridge
|
train
|
func (m *MockLinkLayerDevice) EthernetDeviceForBridge(arg0 string) (state.LinkLayerDeviceArgs, error) {
ret := m.ctrl.Call(m, "EthernetDeviceForBridge", arg0)
ret0, _ := ret[0].(state.LinkLayerDeviceArgs)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3869
|
EthernetDeviceForBridge
|
train
|
func (mr *MockLinkLayerDeviceMockRecorder) EthernetDeviceForBridge(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthernetDeviceForBridge", reflect.TypeOf((*MockLinkLayerDevice)(nil).EthernetDeviceForBridge), arg0)
}
|
go
|
{
"resource": ""
}
|
q3870
|
IsUp
|
train
|
func (m *MockLinkLayerDevice) IsUp() bool {
ret := m.ctrl.Call(m, "IsUp")
ret0, _ := ret[0].(bool)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3871
|
MTU
|
train
|
func (m *MockLinkLayerDevice) MTU() uint {
ret := m.ctrl.Call(m, "MTU")
ret0, _ := ret[0].(uint)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3872
|
ParentDevice
|
train
|
func (m *MockLinkLayerDevice) ParentDevice() (containerizer.LinkLayerDevice, error) {
ret := m.ctrl.Call(m, "ParentDevice")
ret0, _ := ret[0].(containerizer.LinkLayerDevice)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3873
|
NewMockUnit
|
train
|
func NewMockUnit(ctrl *gomock.Controller) *MockUnit {
mock := &MockUnit{ctrl: ctrl}
mock.recorder = &MockUnitMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q3874
|
NewMockApplication
|
train
|
func NewMockApplication(ctrl *gomock.Controller) *MockApplication {
mock := &MockApplication{ctrl: ctrl}
mock.recorder = &MockApplicationMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q3875
|
NewMockCharm
|
train
|
func NewMockCharm(ctrl *gomock.Controller) *MockCharm {
mock := &MockCharm{ctrl: ctrl}
mock.recorder = &MockCharmMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q3876
|
AddFlags
|
train
|
func (c *agentConf) AddFlags(f *gnuflag.FlagSet) {
// TODO(dimitern) 2014-02-19 bug 1282025
// We need to pass a config location here instead and
// use it to locate the conf and the infer the data-dir
// from there instead of passing it like that.
f.StringVar(&c.dataDir, "data-dir", util.DataDir, "directory for juju data")
}
|
go
|
{
"resource": ""
}
|
q3877
|
CheckArgs
|
train
|
func (c *agentConf) CheckArgs(args []string) error {
if c.dataDir == "" {
return util.RequiredError("data-dir")
}
return cmd.CheckEmpty(args)
}
|
go
|
{
"resource": ""
}
|
q3878
|
ReadConfig
|
train
|
func (c *agentConf) ReadConfig(tag string) error {
t, err := names.ParseTag(tag)
if err != nil {
return errors.Trace(err)
}
c.mu.Lock()
defer c.mu.Unlock()
conf, err := agent.ReadConfig(agent.ConfigPath(c.dataDir, t))
if err != nil {
return errors.Trace(err)
}
c._config = conf
return nil
}
|
go
|
{
"resource": ""
}
|
q3879
|
ChangeConfig
|
train
|
func (c *agentConf) ChangeConfig(change agent.ConfigMutator) error {
c.mu.Lock()
defer c.mu.Unlock()
if err := change(c._config); err != nil {
return errors.Trace(err)
}
if err := c._config.Write(); err != nil {
return errors.Annotate(err, "cannot write agent configuration")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3880
|
CurrentConfig
|
train
|
func (c *agentConf) CurrentConfig() agent.Config {
c.mu.Lock()
defer c.mu.Unlock()
return c._config.Clone()
}
|
go
|
{
"resource": ""
}
|
q3881
|
GetJujuVersion
|
train
|
func GetJujuVersion(machineAgent string, dataDir string) (version.Number, error) {
agentConf := NewAgentConf(dataDir)
if err := agentConf.ReadConfig(machineAgent); err != nil {
err = errors.Annotate(err, "failed to read agent config file.")
return version.Number{}, err
}
config := agentConf.CurrentConfig()
if config == nil {
return version.Number{}, errors.Errorf("%s agent conf is not found", machineAgent)
}
return config.UpgradedToVersion(), nil
}
|
go
|
{
"resource": ""
}
|
q3882
|
NewMockRunner
|
train
|
func NewMockRunner(ctrl *gomock.Controller) *MockRunner {
mock := &MockRunner{ctrl: ctrl}
mock.recorder = &MockRunnerMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q3883
|
MaybePruneTransactions
|
train
|
func (m *MockRunner) MaybePruneTransactions(arg0 txn.PruneOptions) error {
ret := m.ctrl.Call(m, "MaybePruneTransactions", arg0)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3884
|
ResumeTransactions
|
train
|
func (m *MockRunner) ResumeTransactions() error {
ret := m.ctrl.Call(m, "ResumeTransactions")
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3885
|
ResumeTransactions
|
train
|
func (mr *MockRunnerMockRecorder) ResumeTransactions() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResumeTransactions", reflect.TypeOf((*MockRunner)(nil).ResumeTransactions))
}
|
go
|
{
"resource": ""
}
|
q3886
|
Run
|
train
|
func (m *MockRunner) Run(arg0 txn.TransactionSource) error {
ret := m.ctrl.Call(m, "Run", arg0)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3887
|
Run
|
train
|
func (mr *MockRunnerMockRecorder) Run(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockRunner)(nil).Run), arg0)
}
|
go
|
{
"resource": ""
}
|
q3888
|
NewMockEnviron
|
train
|
func NewMockEnviron(ctrl *gomock.Controller) *MockEnviron {
mock := &MockEnviron{ctrl: ctrl}
mock.recorder = &MockEnvironMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q3889
|
AllInstances
|
train
|
func (m *MockEnviron) AllInstances(arg0 context.ProviderCallContext) ([]instances.Instance, error) {
ret := m.ctrl.Call(m, "AllInstances", arg0)
ret0, _ := ret[0].([]instances.Instance)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3890
|
Bootstrap
|
train
|
func (m *MockEnviron) Bootstrap(arg0 environs.BootstrapContext, arg1 context.ProviderCallContext, arg2 environs.BootstrapParams) (*environs.BootstrapResult, error) {
ret := m.ctrl.Call(m, "Bootstrap", arg0, arg1, arg2)
ret0, _ := ret[0].(*environs.BootstrapResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3891
|
ControllerInstances
|
train
|
func (m *MockEnviron) ControllerInstances(arg0 context.ProviderCallContext, arg1 string) ([]instance.Id, error) {
ret := m.ctrl.Call(m, "ControllerInstances", arg0, arg1)
ret0, _ := ret[0].([]instance.Id)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3892
|
DestroyController
|
train
|
func (mr *MockEnvironMockRecorder) DestroyController(arg0, arg1 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DestroyController", reflect.TypeOf((*MockEnviron)(nil).DestroyController), arg0, arg1)
}
|
go
|
{
"resource": ""
}
|
q3893
|
Provider
|
train
|
func (mr *MockEnvironMockRecorder) Provider() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Provider", reflect.TypeOf((*MockEnviron)(nil).Provider))
}
|
go
|
{
"resource": ""
}
|
q3894
|
NewK8sBroker
|
train
|
func NewK8sBroker(
controllerUUID string,
k8sRestConfig *rest.Config,
cfg *config.Config,
newClient NewK8sClientFunc,
newWatcher NewK8sWatcherFunc,
clock jujuclock.Clock,
) (*kubernetesClient, error) {
k8sClient, apiextensionsClient, err := newClient(k8sRestConfig)
if err != nil {
return nil, errors.Trace(err)
}
newCfg, err := providerInstance.newConfig(cfg)
if err != nil {
return nil, errors.Trace(err)
}
modelUUID := newCfg.UUID()
if modelUUID == "" {
return nil, errors.NotValidf("modelUUID is required")
}
client := &kubernetesClient{
clock: clock,
clientUnlocked: k8sClient,
apiextensionsClientUnlocked: apiextensionsClient,
envCfgUnlocked: newCfg.Config,
namespace: newCfg.Name(),
modelUUID: modelUUID,
newWatcher: newWatcher,
newClient: newClient,
annotations: k8sannotations.New(nil).
Add(annotationModelUUIDKey, modelUUID),
}
if controllerUUID != "" {
// controllerUUID could be empty in add-k8s without -c because there might be no controller yet.
client.annotations.Add(annotationControllerUUIDKey, controllerUUID)
}
return client, nil
}
|
go
|
{
"resource": ""
}
|
q3895
|
addAnnotations
|
train
|
func (k *kubernetesClient) addAnnotations(key, value string) k8sannotations.Annotation {
return k.annotations.Add(key, value)
}
|
go
|
{
"resource": ""
}
|
q3896
|
Config
|
train
|
func (k *kubernetesClient) Config() *config.Config {
k.lock.Lock()
defer k.lock.Unlock()
cfg := k.envCfgUnlocked
return cfg
}
|
go
|
{
"resource": ""
}
|
q3897
|
PrepareForBootstrap
|
train
|
func (k *kubernetesClient) PrepareForBootstrap(ctx environs.BootstrapContext, controllerName string) error {
alreadyExistErr := errors.NewAlreadyExists(nil,
fmt.Sprintf(`a controller called %q already exists on this k8s cluster.
Please bootstrap again and choose a different controller name.`, controllerName),
)
k.namespace = DecideControllerNamespace(controllerName)
// ensure no existing namespace has the same name.
_, err := k.getNamespaceByName(k.namespace)
if err == nil {
return alreadyExistErr
}
if !errors.IsNotFound(err) {
return errors.Trace(err)
}
// Good, no existing namespace has the same name.
// Now, try to find if there is any existing controller running in this cluster.
// Note: we have to do this check before we are confident to support multi controllers running in same k8s cluster.
_, err = k.listNamespacesByAnnotations(k.annotations)
if err == nil {
return alreadyExistErr
}
if !errors.IsNotFound(err) {
return errors.Trace(err)
}
// All good, no existing controller found on the cluster.
// The namespace will be set to controller-name in newcontrollerStack.
// do validation on storage class.
_, err = k.validateOperatorStorage()
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3898
|
Create
|
train
|
func (k *kubernetesClient) Create(context.ProviderCallContext, environs.CreateParams) error {
// must raise errors.AlreadyExistsf if it's already exist.
return k.createNamespace(k.namespace)
}
|
go
|
{
"resource": ""
}
|
q3899
|
Bootstrap
|
train
|
func (k *kubernetesClient) Bootstrap(
ctx environs.BootstrapContext,
callCtx context.ProviderCallContext,
args environs.BootstrapParams,
) (*environs.BootstrapResult, error) {
if args.BootstrapSeries != "" {
return nil, errors.NotSupportedf("set series for bootstrapping to kubernetes")
}
storageClass, err := k.validateOperatorStorage()
if err != nil {
return nil, errors.Trace(err)
}
finalizer := func(ctx environs.BootstrapContext, pcfg *podcfg.ControllerPodConfig, opts environs.BootstrapDialOpts) (err error) {
if err = podcfg.FinishControllerPodConfig(pcfg, k.Config()); err != nil {
return errors.Trace(err)
}
if err = pcfg.VerifyConfig(); err != nil {
return errors.Trace(err)
}
logger.Debugf("controller pod config: \n%+v", pcfg)
// validate hosted model name if we need to create it.
if hostedModelName, has := pcfg.GetHostedModel(); has {
_, err := k.getNamespaceByName(hostedModelName)
if err == nil {
return errors.NewAlreadyExists(nil,
fmt.Sprintf(`
namespace %q already exists in the cluster,
please choose a different hosted model name then try again.`, hostedModelName),
)
}
if !errors.IsNotFound(err) {
return errors.Trace(err)
}
// hosted model is all good.
}
// we use controller name to name controller namespace in bootstrap time.
setControllerNamespace := func(controllerName string, broker *kubernetesClient) error {
nsName := DecideControllerNamespace(controllerName)
_, err := broker.GetNamespace(nsName)
if errors.IsNotFound(err) {
// all good.
broker.SetNamespace(nsName)
// ensure controller specific annotations.
_ = broker.addAnnotations(annotationControllerIsControllerKey, "true")
return nil
}
if err == nil {
// this should never happen because we avoid it in broker.PrepareForBootstrap before reaching here.
return errors.NotValidf("existing namespace %q found", broker.namespace)
}
return errors.Trace(err)
}
if err := setControllerNamespace(pcfg.ControllerName, k); err != nil {
return errors.Trace(err)
}
// create configmap, secret, volume, statefulset, etc resources for controller stack.
controllerStack, err := newcontrollerStack(ctx, JujuControllerStackName, storageClass, k, pcfg)
if err != nil {
return errors.Trace(err)
}
return errors.Annotate(
controllerStack.Deploy(),
"creating controller stack for controller",
)
}
return &environs.BootstrapResult{
// TODO(bootstrap): review this default arch and series(required for determining DataDir etc.) later.
Arch: arch.AMD64,
Series: jujuversion.SupportedLTS(),
CaasBootstrapFinalizer: finalizer,
}, nil
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.