_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q5200
|
NewCertificate
|
train
|
func NewCertificate(certPEM, keyPEM []byte) *Certificate {
return &Certificate{
CertPEM: certPEM,
KeyPEM: keyPEM,
}
}
|
go
|
{
"resource": ""
}
|
q5201
|
Validate
|
train
|
func (c *Certificate) Validate() error {
if len(c.CertPEM) == 0 {
return errors.NotValidf("missing cert PEM")
}
if len(c.KeyPEM) == 0 {
return errors.NotValidf("missing key PEM")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5202
|
WriteCertPEM
|
train
|
func (c *Certificate) WriteCertPEM(out io.Writer) error {
_, err := out.Write(c.CertPEM)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q5203
|
WriteKeyPEM
|
train
|
func (c *Certificate) WriteKeyPEM(out io.Writer) error {
_, err := out.Write(c.KeyPEM)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q5204
|
Fingerprint
|
train
|
func (c *Certificate) Fingerprint() (string, error) {
x509Cert, err := c.X509()
if err != nil {
return "", errors.Trace(err)
}
data := sha256.Sum256(x509Cert.Raw)
return fmt.Sprintf("%x", data), nil
}
|
go
|
{
"resource": ""
}
|
q5205
|
X509
|
train
|
func (c *Certificate) X509() (*x509.Certificate, error) {
block, _ := pem.Decode(c.CertPEM)
if block == nil {
return nil, errors.Errorf("invalid cert PEM (%d bytes)", len(c.CertPEM))
}
x509Cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, errors.Trace(err)
}
return x509Cert, nil
}
|
go
|
{
"resource": ""
}
|
q5206
|
AsCreateRequest
|
train
|
func (c *Certificate) AsCreateRequest() (api.CertificatesPost, error) {
block, _ := pem.Decode(c.CertPEM)
if block == nil {
return api.CertificatesPost{}, errors.New("failed to decode certificate PEM")
}
return api.CertificatesPost{
Certificate: base64.StdEncoding.EncodeToString(block.Bytes),
CertificatePut: api.CertificatePut{
Name: c.Name,
Type: "client",
},
}, nil
}
|
go
|
{
"resource": ""
}
|
q5207
|
stateStepsFor253
|
train
|
func stateStepsFor253() []Step {
return []Step{
&upgradeStep{
description: "update inherited controller config global key",
targets: []Target{DatabaseMaster},
run: func(context Context) error {
return context.State().UpdateInheritedControllerConfig()
},
},
}
}
|
go
|
{
"resource": ""
}
|
q5208
|
SetPodSpec
|
train
|
func (m *CAASModel) SetPodSpec(appTag names.ApplicationTag, spec string) error {
buildTxn := func(attempt int) ([]txn.Op, error) {
var prereqOps []txn.Op
app, err := m.State().Application(appTag.Id())
if err != nil {
return nil, errors.Trace(err)
}
if app.Life() != Alive {
return nil, errors.Errorf("application %s not alive", app.String())
}
prereqOps = append(prereqOps, txn.Op{
C: applicationsC,
Id: app.doc.DocID,
Assert: isAliveDoc,
})
op := txn.Op{
C: podSpecsC,
Id: applicationGlobalKey(appTag.Id()),
}
existing, err := m.PodSpec(appTag)
if err == nil {
if existing == spec {
return nil, jujutxn.ErrNoOperations
}
op.Assert = txn.DocExists
op.Update = bson.D{{"$set", bson.D{{"spec", spec}}}}
} else if errors.IsNotFound(err) {
op.Assert = txn.DocMissing
op.Insert = containerSpecDoc{Spec: spec}
} else {
return nil, err
}
return append(prereqOps, op), nil
}
return m.mb.db().Run(buildTxn)
}
|
go
|
{
"resource": ""
}
|
q5209
|
PodSpec
|
train
|
func (m *CAASModel) PodSpec(appTag names.ApplicationTag) (string, error) {
coll, cleanup := m.mb.db().GetCollection(podSpecsC)
defer cleanup()
var doc containerSpecDoc
if err := coll.FindId(applicationGlobalKey(appTag.Id())).One(&doc); err != nil {
if err == mgo.ErrNotFound {
return "", errors.NotFoundf(
"pod spec for %s",
names.ReadableString(appTag),
)
}
return "", errors.Trace(err)
}
return doc.Spec, nil
}
|
go
|
{
"resource": ""
}
|
q5210
|
addNetworkDevice
|
train
|
func (c *Client) addNetworkDevice(
ctx context.Context,
spec *types.VirtualMachineConfigSpec,
network *mo.Network,
mac string,
dvportgroupConfig map[types.ManagedObjectReference]types.DVPortgroupConfigInfo,
) (*types.VirtualVmxnet3, error) {
var networkBacking types.BaseVirtualDeviceBackingInfo
if dvportgroupConfig, ok := dvportgroupConfig[network.Reference()]; !ok {
// It's not a distributed virtual portgroup, so return
// a backing info for a plain old network interface.
networkBacking = &types.VirtualEthernetCardNetworkBackingInfo{
VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{
DeviceName: network.Name,
},
}
} else {
// It's a distributed virtual portgroup, so retrieve the details of
// the distributed virtual switch, and return a backing info for
// connecting the VM to the portgroup.
var dvs mo.DistributedVirtualSwitch
if err := c.client.RetrieveOne(
ctx, *dvportgroupConfig.DistributedVirtualSwitch, nil, &dvs,
); err != nil {
return nil, errors.Annotate(err, "retrieving distributed vSwitch details")
}
networkBacking = &types.VirtualEthernetCardDistributedVirtualPortBackingInfo{
Port: types.DistributedVirtualSwitchPortConnection{
SwitchUuid: dvs.Uuid,
PortgroupKey: dvportgroupConfig.Key,
},
}
}
var networkDevice types.VirtualVmxnet3
wakeOnLan := true
networkDevice.WakeOnLanEnabled = &wakeOnLan
networkDevice.Backing = networkBacking
if mac != "" {
if !VerifyMAC(mac) {
return nil, fmt.Errorf("Invalid MAC address: %q", mac)
}
networkDevice.AddressType = "Manual"
networkDevice.MacAddress = mac
}
networkDevice.Connectable = &types.VirtualDeviceConnectInfo{
StartConnected: true,
AllowGuestControl: true,
}
spec.DeviceChange = append(spec.DeviceChange, &types.VirtualDeviceConfigSpec{
Operation: types.VirtualDeviceConfigSpecOperationAdd,
Device: &networkDevice,
})
return &networkDevice, nil
}
|
go
|
{
"resource": ""
}
|
q5211
|
VerifyMAC
|
train
|
func VerifyMAC(mac string) bool {
parts := strings.Split(mac, ":")
if len(parts) != 6 {
return false
}
if parts[0] != "00" || parts[1] != "50" || parts[2] != "56" {
return false
}
for i, part := range parts[3:] {
v, err := strconv.ParseUint(part, 16, 8)
if err != nil {
return false
}
if i == 0 && v > 0x3f {
// 4th byte must be <= 0x3f
return false
}
}
return true
}
|
go
|
{
"resource": ""
}
|
q5212
|
computeResourceNetworks
|
train
|
func (c *Client) computeResourceNetworks(
ctx context.Context,
computeResource *mo.ComputeResource,
) ([]mo.Network, map[types.ManagedObjectReference]types.DVPortgroupConfigInfo, error) {
refsByType := make(map[string][]types.ManagedObjectReference)
for _, network := range computeResource.Network {
refsByType[network.Type] = append(refsByType[network.Type], network.Reference())
}
var networks []mo.Network
if refs := refsByType["Network"]; len(refs) > 0 {
if err := c.client.Retrieve(ctx, refs, nil, &networks); err != nil {
return nil, nil, errors.Annotate(err, "retrieving network details")
}
}
var opaqueNetworks []mo.OpaqueNetwork
if refs := refsByType["OpaqueNetwork"]; len(refs) > 0 {
if err := c.client.Retrieve(ctx, refs, nil, &opaqueNetworks); err != nil {
return nil, nil, errors.Annotate(err, "retrieving opaque network details")
}
for _, on := range opaqueNetworks {
networks = append(networks, on.Network)
}
}
var dvportgroups []mo.DistributedVirtualPortgroup
var dvportgroupConfig map[types.ManagedObjectReference]types.DVPortgroupConfigInfo
if refs := refsByType["DistributedVirtualPortgroup"]; len(refs) > 0 {
if err := c.client.Retrieve(ctx, refs, nil, &dvportgroups); err != nil {
return nil, nil, errors.Annotate(err, "retrieving distributed virtual portgroup details")
}
dvportgroupConfig = make(map[types.ManagedObjectReference]types.DVPortgroupConfigInfo)
allnetworks := make([]mo.Network, len(dvportgroups)+len(networks))
for i, d := range dvportgroups {
allnetworks[i] = d.Network
dvportgroupConfig[allnetworks[i].Reference()] = d.Config
}
copy(allnetworks[len(dvportgroups):], networks)
networks = allnetworks
}
return networks, dvportgroupConfig, nil
}
|
go
|
{
"resource": ""
}
|
q5213
|
Init
|
train
|
func (c *showMachineCommand) Init(args []string) error {
c.machineIds = args
return nil
}
|
go
|
{
"resource": ""
}
|
q5214
|
DiscoverService
|
train
|
func DiscoverService(name string, conf common.Conf) (Service, error) {
hostSeries := series.MustHostSeries()
initName, err := discoverInitSystem(hostSeries)
if err != nil {
return nil, errors.Trace(err)
}
service, err := newService(name, conf, initName, hostSeries)
if err != nil {
return nil, errors.Trace(err)
}
return service, nil
}
|
go
|
{
"resource": ""
}
|
q5215
|
VersionInitSystem
|
train
|
func VersionInitSystem(series string) (string, error) {
initName, err := versionInitSystem(series)
if err != nil {
return "", errors.Trace(err)
}
logger.Debugf("discovered init system %q from series %q", initName, series)
return initName, nil
}
|
go
|
{
"resource": ""
}
|
q5216
|
DiscoverInitSystemScript
|
train
|
func DiscoverInitSystemScript() string {
renderer := shell.BashRenderer{}
tests := []string{
discoverSystemd,
discoverUpstart,
"exit 1",
}
data := renderer.RenderScript(tests)
return string(data)
}
|
go
|
{
"resource": ""
}
|
q5217
|
newShellSelectCommand
|
train
|
func newShellSelectCommand(envVarName, defaultCase string, handler func(string) (string, bool)) string {
var cases []string
const shellCaseStatement = `
case "$%s" in
%s
*)
%s
;;
esac`
for _, initSystem := range linuxInitSystems {
cmd, ok := handler(initSystem)
if !ok {
continue
}
cases = append(cases, initSystem+")", " "+cmd, " ;;")
}
if len(cases) == 0 {
return ""
}
return fmt.Sprintf(shellCaseStatement[1:], envVarName, strings.Join(cases, "\n"), defaultCase)
}
|
go
|
{
"resource": ""
}
|
q5218
|
ContainerManagerConfig
|
train
|
func (m *MockState) ContainerManagerConfig(arg0 params.ContainerManagerConfigParams) (params.ContainerManagerConfig, error) {
ret := m.ctrl.Call(m, "ContainerManagerConfig", arg0)
ret0, _ := ret[0].(params.ContainerManagerConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q5219
|
HostChangesForContainer
|
train
|
func (mr *MockStateMockRecorder) HostChangesForContainer(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HostChangesForContainer", reflect.TypeOf((*MockState)(nil).HostChangesForContainer), arg0)
}
|
go
|
{
"resource": ""
}
|
q5220
|
Machines
|
train
|
func (m *MockState) Machines(arg0 ...names_v2.MachineTag) ([]provisioner.MachineResult, error) {
varargs := []interface{}{}
for _, a := range arg0 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Machines", varargs...)
ret0, _ := ret[0].([]provisioner.MachineResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q5221
|
ResolveFilename
|
train
|
func (c *downloadCommand) ResolveFilename() string {
filename := c.Filename
if filename == "" {
filename = backups.FilenamePrefix + c.ID + ".tar.gz"
}
return filename
}
|
go
|
{
"resource": ""
}
|
q5222
|
UnknownOrigin
|
train
|
func UnknownOrigin() Origin {
return Origin{
Model: UnknownString,
Machine: UnknownString,
Hostname: UnknownString,
Version: UnknownVersion,
}
}
|
go
|
{
"resource": ""
}
|
q5223
|
NewMetadata
|
train
|
func NewMetadata() *Metadata {
return &Metadata{
FileMetadata: filestorage.NewMetadata(),
// TODO(fwereade): 2016-03-17 lp:1558657
Started: time.Now().UTC(),
Origin: Origin{
Version: jujuversion.Current,
},
}
}
|
go
|
{
"resource": ""
}
|
q5224
|
NewMetadataState
|
train
|
func NewMetadataState(db DB, machine, series string) (*Metadata, error) {
// hostname could be derived from the model...
hostname, err := os.Hostname()
if err != nil {
// If os.Hostname() is not working, something is woefully wrong.
// Run for the hills.
return nil, errors.Annotate(err, "could not get hostname (system unstable?)")
}
meta := NewMetadata()
meta.Origin.Model = db.ModelTag().Id()
meta.Origin.Machine = machine
meta.Origin.Hostname = hostname
meta.Origin.Series = series
si, err := db.StateServingInfo()
if err != nil {
return nil, errors.Annotate(err, "could not get server secrets")
}
controllerCfg, err := db.ControllerConfig()
if err != nil {
return nil, errors.Annotate(err, "could not get controller config")
}
meta.CACert, _ = controllerCfg.CACert()
meta.CAPrivateKey = si.CAPrivateKey
return meta, nil
}
|
go
|
{
"resource": ""
}
|
q5225
|
MarkComplete
|
train
|
func (m *Metadata) MarkComplete(size int64, checksum string) error {
if size == 0 {
return errors.New("missing size")
}
if checksum == "" {
return errors.New("missing checksum")
}
format := checksumFormat
// TODO(fwereade): 2016-03-17 lp:1558657
finished := time.Now().UTC()
if err := m.SetFileInfo(size, checksum, format); err != nil {
return errors.Annotate(err, "unexpected failure")
}
m.Finished = &finished
return nil
}
|
go
|
{
"resource": ""
}
|
q5226
|
NewMetadataJSONReader
|
train
|
func NewMetadataJSONReader(in io.Reader) (*Metadata, error) {
var flat flatMetadata
if err := json.NewDecoder(in).Decode(&flat); err != nil {
return nil, errors.Trace(err)
}
meta := NewMetadata()
meta.SetID(flat.ID)
err := meta.SetFileInfo(flat.Size, flat.Checksum, flat.ChecksumFormat)
if err != nil {
return nil, errors.Trace(err)
}
if !flat.Stored.IsZero() {
meta.SetStored(&flat.Stored)
}
meta.Started = flat.Started
if !flat.Finished.IsZero() {
meta.Finished = &flat.Finished
}
meta.Notes = flat.Notes
meta.Origin = Origin{
Model: flat.Environment,
Machine: flat.Machine,
Hostname: flat.Hostname,
Version: flat.Version,
Series: flat.Series,
}
// TODO(wallyworld) - put these in a separate file.
meta.CACert = flat.CACert
meta.CAPrivateKey = flat.CAPrivateKey
return meta, nil
}
|
go
|
{
"resource": ""
}
|
q5227
|
BuildMetadata
|
train
|
func BuildMetadata(file *os.File) (*Metadata, error) {
// Extract the file size.
fi, err := file.Stat()
if err != nil {
return nil, errors.Trace(err)
}
size := fi.Size()
// Extract the timestamp.
timestamp := fileTimestamp(fi)
// Get the checksum.
hasher := sha1.New()
_, err = io.Copy(hasher, file)
if err != nil {
return nil, errors.Trace(err)
}
rawsum := hasher.Sum(nil)
checksum := base64.StdEncoding.EncodeToString(rawsum)
// Build the metadata.
meta := NewMetadata()
meta.Started = time.Time{}
meta.Origin = UnknownOrigin()
err = meta.MarkComplete(size, checksum)
if err != nil {
return nil, errors.Trace(err)
}
meta.Finished = ×tamp
return meta, nil
}
|
go
|
{
"resource": ""
}
|
q5228
|
ControllerInfo
|
train
|
func (rc *externalController) ControllerInfo() crossmodel.ControllerInfo {
return crossmodel.ControllerInfo{
ControllerTag: names.NewControllerTag(rc.doc.Id),
Alias: rc.doc.Alias,
Addrs: rc.doc.Addrs,
CACert: rc.doc.CACert,
}
}
|
go
|
{
"resource": ""
}
|
q5229
|
Save
|
train
|
func (ec *externalControllers) Save(controller crossmodel.ControllerInfo, modelUUIDs ...string) (ExternalController, error) {
if err := controller.Validate(); err != nil {
return nil, errors.Trace(err)
}
doc := externalControllerDoc{
Id: controller.ControllerTag.Id(),
Alias: controller.Alias,
Addrs: controller.Addrs,
CACert: controller.CACert,
}
buildTxn := func(int) ([]txn.Op, error) {
model, err := ec.st.Model()
if err != nil {
return nil, errors.Annotate(err, "failed to load model")
}
if err := checkModelActive(ec.st); err != nil {
return nil, errors.Trace(err)
}
existing, err := ec.controller(controller.ControllerTag.Id())
if err != nil && !errors.IsNotFound(err) {
return nil, errors.Trace(err)
}
var ops []txn.Op
if err == nil {
models := set.NewStrings(existing.Models...)
models = models.Union(set.NewStrings(modelUUIDs...))
ops = []txn.Op{{
C: externalControllersC,
Id: existing.Id,
Assert: txn.DocExists,
Update: bson.D{
{"$set",
bson.D{{"addresses", doc.Addrs},
{"alias", doc.Alias},
{"cacert", doc.CACert},
{"models", models.Values()}},
},
},
}, model.assertActiveOp()}
} else {
doc.Models = modelUUIDs
ops = []txn.Op{{
C: externalControllersC,
Id: doc.Id,
Assert: txn.DocMissing,
Insert: doc,
}, model.assertActiveOp()}
}
return ops, nil
}
if err := ec.st.db().Run(buildTxn); err != nil {
return nil, errors.Annotate(err, "failed to create external controllers")
}
return &externalController{
doc: doc,
}, nil
}
|
go
|
{
"resource": ""
}
|
q5230
|
Remove
|
train
|
func (ec *externalControllers) Remove(controllerUUID string) error {
ops := []txn.Op{{
C: externalControllersC,
Id: controllerUUID,
Remove: true,
}}
err := ec.st.db().RunTransaction(ops)
return errors.Annotate(err, "failed to remove external controller")
}
|
go
|
{
"resource": ""
}
|
q5231
|
Controller
|
train
|
func (ec *externalControllers) Controller(controllerUUID string) (ExternalController, error) {
doc, err := ec.controller(controllerUUID)
if err != nil {
return nil, errors.Trace(err)
}
return &externalController{*doc}, nil
}
|
go
|
{
"resource": ""
}
|
q5232
|
ControllerForModel
|
train
|
func (ec *externalControllers) ControllerForModel(modelUUID string) (ExternalController, error) {
coll, closer := ec.st.db().GetCollection(externalControllersC)
defer closer()
var doc []externalControllerDoc
err := coll.Find(bson.M{"models": bson.M{"$in": []string{modelUUID}}}).All(&doc)
if err != nil {
return nil, errors.Trace(err)
}
switch len(doc) {
case 0:
return nil, errors.NotFoundf("external controller with model %v", modelUUID)
case 1:
return &externalController{
doc: doc[0],
}, nil
}
return nil, errors.Errorf("expected 1 controller with model %v, got %d", modelUUID, len(doc))
}
|
go
|
{
"resource": ""
}
|
q5233
|
WatchController
|
train
|
func (ec *externalControllers) WatchController(controllerUUID string) NotifyWatcher {
return newEntityWatcher(ec.st, externalControllersC, controllerUUID)
}
|
go
|
{
"resource": ""
}
|
q5234
|
NewMacaroonCache
|
train
|
func NewMacaroonCache(clock clock.Clock) *MacaroonCache {
c := &cacheInternal{clock: clock, macaroons: make(map[string]*macaroonEntry)}
cache := &MacaroonCache{c}
// The interval to run the expiry worker is somewhat arbitrary.
// Expired macaroons will be re-issued as needed; we just want to ensure
// that those which fall out of use are eventually cleaned up.
c.runExpiryWorker(10 * time.Minute)
runtime.SetFinalizer(cache, stopMacaroonCacheExpiryWorker)
return cache
}
|
go
|
{
"resource": ""
}
|
q5235
|
Upsert
|
train
|
func (c *cacheInternal) Upsert(token string, ms macaroon.Slice) {
c.Lock()
defer c.Unlock()
var et *time.Time
if expiryTime, ok := checkers.MacaroonsExpiryTime(ms); ok {
et = &expiryTime
}
c.macaroons[token] = &macaroonEntry{
ms: ms,
expiryTime: et,
}
}
|
go
|
{
"resource": ""
}
|
q5236
|
Get
|
train
|
func (c *cacheInternal) Get(k string) (macaroon.Slice, bool) {
c.Lock()
defer c.Unlock()
entry, found := c.macaroons[k]
if !found {
return nil, false
}
if entry.expired(c.clock) {
delete(c.macaroons, k)
return nil, false
}
return entry.ms, true
}
|
go
|
{
"resource": ""
}
|
q5237
|
NewImageMetadataAPI
|
train
|
func (c *cloudImageMetadataCommandBase) NewImageMetadataAPI() (*imagemetadatamanager.Client, error) {
root, err := c.NewAPIRoot()
if err != nil {
return nil, err
}
return imagemetadatamanager.NewClient(root), nil
}
|
go
|
{
"resource": ""
}
|
q5238
|
NewMockFacade
|
train
|
func NewMockFacade(ctrl *gomock.Controller) *MockFacade {
mock := &MockFacade{ctrl: ctrl}
mock.recorder = &MockFacadeMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q5239
|
FinishUpgradeSeries
|
train
|
func (mr *MockFacadeMockRecorder) FinishUpgradeSeries(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinishUpgradeSeries", reflect.TypeOf((*MockFacade)(nil).FinishUpgradeSeries), arg0)
}
|
go
|
{
"resource": ""
}
|
q5240
|
MachineStatus
|
train
|
func (m *MockFacade) MachineStatus() (model.UpgradeSeriesStatus, error) {
ret := m.ctrl.Call(m, "MachineStatus")
ret0, _ := ret[0].(model.UpgradeSeriesStatus)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q5241
|
SetMachineStatus
|
train
|
func (m *MockFacade) SetMachineStatus(arg0 model.UpgradeSeriesStatus, arg1 string) error {
ret := m.ctrl.Call(m, "SetMachineStatus", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q5242
|
StartUnitCompletion
|
train
|
func (m *MockFacade) StartUnitCompletion(arg0 string) error {
ret := m.ctrl.Call(m, "StartUnitCompletion", arg0)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q5243
|
TargetSeries
|
train
|
func (m *MockFacade) TargetSeries() (string, error) {
ret := m.ctrl.Call(m, "TargetSeries")
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q5244
|
UnitsCompleted
|
train
|
func (m *MockFacade) UnitsCompleted() ([]names_v2.UnitTag, error) {
ret := m.ctrl.Call(m, "UnitsCompleted")
ret0, _ := ret[0].([]names_v2.UnitTag)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q5245
|
UnpinMachineApplications
|
train
|
func (mr *MockFacadeMockRecorder) UnpinMachineApplications() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnpinMachineApplications", reflect.TypeOf((*MockFacade)(nil).UnpinMachineApplications))
}
|
go
|
{
"resource": ""
}
|
q5246
|
Infof
|
train
|
func (m *MockLogger) Infof(arg0 string, arg1 ...interface{}) {
varargs := []interface{}{arg0}
for _, a := range arg1 {
varargs = append(varargs, a)
}
m.ctrl.Call(m, "Infof", varargs...)
}
|
go
|
{
"resource": ""
}
|
q5247
|
Infof
|
train
|
func (mr *MockLoggerMockRecorder) Infof(arg0 interface{}, arg1 ...interface{}) *gomock.Call {
varargs := append([]interface{}{arg0}, arg1...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Infof", reflect.TypeOf((*MockLogger)(nil).Infof), varargs...)
}
|
go
|
{
"resource": ""
}
|
q5248
|
NewMockAgentService
|
train
|
func NewMockAgentService(ctrl *gomock.Controller) *MockAgentService {
mock := &MockAgentService{ctrl: ctrl}
mock.recorder = &MockAgentServiceMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q5249
|
Running
|
train
|
func (m *MockAgentService) Running() (bool, error) {
ret := m.ctrl.Call(m, "Running")
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q5250
|
Running
|
train
|
func (mr *MockAgentServiceMockRecorder) Running() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Running", reflect.TypeOf((*MockAgentService)(nil).Running))
}
|
go
|
{
"resource": ""
}
|
q5251
|
NewMockServiceAccess
|
train
|
func NewMockServiceAccess(ctrl *gomock.Controller) *MockServiceAccess {
mock := &MockServiceAccess{ctrl: ctrl}
mock.recorder = &MockServiceAccessMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q5252
|
DiscoverService
|
train
|
func (m *MockServiceAccess) DiscoverService(arg0 string) (upgradeseries.AgentService, error) {
ret := m.ctrl.Call(m, "DiscoverService", arg0)
ret0, _ := ret[0].(upgradeseries.AgentService)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q5253
|
DiscoverService
|
train
|
func (mr *MockServiceAccessMockRecorder) DiscoverService(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiscoverService", reflect.TypeOf((*MockServiceAccess)(nil).DiscoverService), arg0)
}
|
go
|
{
"resource": ""
}
|
q5254
|
ListServices
|
train
|
func (m *MockServiceAccess) ListServices() ([]string, error) {
ret := m.ctrl.Call(m, "ListServices")
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q5255
|
ListServices
|
train
|
func (mr *MockServiceAccessMockRecorder) ListServices() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServices", reflect.TypeOf((*MockServiceAccess)(nil).ListServices))
}
|
go
|
{
"resource": ""
}
|
q5256
|
NewMockUpgrader
|
train
|
func NewMockUpgrader(ctrl *gomock.Controller) *MockUpgrader {
mock := &MockUpgrader{ctrl: ctrl}
mock.recorder = &MockUpgraderMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q5257
|
PerformUpgrade
|
train
|
func (m *MockUpgrader) PerformUpgrade() error {
ret := m.ctrl.Call(m, "PerformUpgrade")
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q5258
|
PerformUpgrade
|
train
|
func (mr *MockUpgraderMockRecorder) PerformUpgrade() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PerformUpgrade", reflect.TypeOf((*MockUpgrader)(nil).PerformUpgrade))
}
|
go
|
{
"resource": ""
}
|
q5259
|
get
|
train
|
func (cfo cacheForOperations) get(name string) (resource.Resource, io.ReadCloser, error) {
if cfo.EntityCache == nil {
return resource.Resource{}, nil, errors.NotFoundf("resource %q", name)
}
res, reader, err := cfo.OpenResource(name)
if errors.IsNotFound(err) {
reader = nil
res, err = cfo.GetResource(name)
}
if err != nil {
return resource.Resource{}, nil, errors.Trace(err)
}
return res, reader, nil
}
|
go
|
{
"resource": ""
}
|
q5260
|
set
|
train
|
func (cfo cacheForOperations) set(chRes charmresource.Resource, reader io.ReadCloser) (resource.Resource, io.ReadCloser, error) {
if cfo.EntityCache == nil {
res := resource.Resource{
Resource: chRes,
}
return res, reader, nil // a no-op
}
defer reader.Close()
res, err := cfo.SetResource(chRes, reader)
if err != nil {
return resource.Resource{}, nil, errors.Trace(err)
}
// Make sure to use the potentially updated resource details.
res, reader, err = cfo.OpenResource(res.Name)
if err != nil {
return resource.Resource{}, nil, errors.Trace(err)
}
return res, reader, nil
}
|
go
|
{
"resource": ""
}
|
q5261
|
Archive
|
train
|
func Archive(w io.Writer, dir string) error {
entries, err := ioutil.ReadDir(dir)
if err != nil {
return err
}
gzw := gzip.NewWriter(w)
defer closeErrorCheck(&err, gzw)
tarw := tar.NewWriter(gzw)
defer closeErrorCheck(&err, tarw)
for _, ent := range entries {
h := tarHeader(ent)
logger.Debugf("adding entry: %#v", h)
// ignore local umask
if isExecutable(ent) {
h.Mode = 0755
} else {
h.Mode = 0644
}
err := tarw.WriteHeader(h)
if err != nil {
return err
}
fileName := filepath.Join(dir, ent.Name())
if err := copyFile(tarw, fileName); err != nil {
return err
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5262
|
archiveAndSHA256
|
train
|
func archiveAndSHA256(w io.Writer, dir string) (sha256hash string, err error) {
h := sha256.New()
if err := Archive(io.MultiWriter(h, w), dir); err != nil {
return "", err
}
return fmt.Sprintf("%x", h.Sum(nil)), err
}
|
go
|
{
"resource": ""
}
|
q5263
|
copyFile
|
train
|
func copyFile(w io.Writer, file string) error {
f, err := os.Open(file)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(w, f)
return err
}
|
go
|
{
"resource": ""
}
|
q5264
|
tarHeader
|
train
|
func tarHeader(i os.FileInfo) *tar.Header {
return &tar.Header{
Typeflag: tar.TypeReg,
Name: i.Name(),
Size: i.Size(),
Mode: int64(i.Mode() & 0777),
ModTime: i.ModTime(),
AccessTime: i.ModTime(),
ChangeTime: i.ModTime(),
Uname: "ubuntu",
Gname: "ubuntu",
}
}
|
go
|
{
"resource": ""
}
|
q5265
|
closeErrorCheck
|
train
|
func closeErrorCheck(errp *error, c io.Closer) {
err := c.Close()
if *errp == nil {
*errp = err
}
}
|
go
|
{
"resource": ""
}
|
q5266
|
ExistingJujudLocation
|
train
|
func ExistingJujudLocation() (string, error) {
jujuLocation, err := findExecutable(os.Args[0])
if err != nil {
logger.Infof("%v", err)
return "", err
}
jujuDir := filepath.Dir(jujuLocation)
return jujuDir, nil
}
|
go
|
{
"resource": ""
}
|
q5267
|
bundleTools
|
train
|
func bundleTools(build bool, w io.Writer, forceVersion *version.Number) (_ version.Binary, official bool, sha256hash string, _ error) {
dir, err := ioutil.TempDir("", "juju-tools")
if err != nil {
return version.Binary{}, false, "", err
}
defer os.RemoveAll(dir)
if err := packageLocalTools(dir, build); err != nil {
return version.Binary{}, false, "", err
}
tvers, official, err := JujudVersion(dir)
if err != nil {
return version.Binary{}, false, "", errors.Trace(err)
}
if official {
logger.Debugf("using official version %s", tvers)
} else if forceVersion != nil {
logger.Debugf("forcing version to %s", forceVersion)
if err := ioutil.WriteFile(filepath.Join(dir, "FORCE-VERSION"), []byte(forceVersion.String()), 0666); err != nil {
return version.Binary{}, false, "", err
}
}
sha256hash, err = archiveAndSHA256(w, dir)
if err != nil {
return version.Binary{}, false, "", err
}
return tvers, official, sha256hash, err
}
|
go
|
{
"resource": ""
}
|
q5268
|
JujudVersion
|
train
|
func JujudVersion(dir string) (version.Binary, bool, error) {
tvers, err := getVersionFromFile(dir)
official := err == nil
if err != nil && !errors.IsNotFound(err) && !isNoMatchingToolsChecksum(err) {
return version.Binary{}, false, errors.Trace(err)
}
if errors.IsNotFound(err) || isNoMatchingToolsChecksum(err) {
// No signature file found.
// Extract the version number that the jujud binary was built with.
// This is used to check compatibility with the version of the client
// being used to bootstrap.
tvers, err = getVersionFromJujud(dir)
if err != nil {
return version.Binary{}, false, errors.Trace(err)
}
}
return tvers, official, nil
}
|
go
|
{
"resource": ""
}
|
q5269
|
AgentConf
|
train
|
func AgentConf(info AgentInfo, renderer shell.Renderer) common.Conf {
conf := common.Conf{
Desc: fmt.Sprintf("juju agent for %s", info.name),
ExecStart: info.cmd(renderer),
Logfile: info.logFile(renderer),
Env: osenv.FeatureFlags(),
Timeout: agentServiceTimeout,
ServiceBinary: info.jujud(renderer),
ServiceArgs: info.execArgs(renderer),
}
switch info.Kind {
case AgentKindMachine:
conf.Limit = map[string]string{
"nofile": "64000",
}
case AgentKindUnit:
conf.Desc = "juju unit agent for " + info.ID
}
return conf
}
|
go
|
{
"resource": ""
}
|
q5270
|
ShutdownAfterConf
|
train
|
func ShutdownAfterConf(serviceName string) (common.Conf, error) {
if serviceName == "" {
return common.Conf{}, errors.New(`missing "after" service name`)
}
desc := "juju shutdown job"
return shutdownAfterConf(serviceName, desc), nil
}
|
go
|
{
"resource": ""
}
|
q5271
|
instInfo
|
train
|
func (a *aggregator) instInfo(id instance.Id, inst instances.Instance) (instanceInfo, error) {
if inst == nil {
return instanceInfo{}, errors.NotFoundf("instance %v", id)
}
addr, err := inst.Addresses(a.callContext)
if err != nil {
return instanceInfo{}, err
}
return instanceInfo{
addr,
inst.Status(a.callContext),
}, nil
}
|
go
|
{
"resource": ""
}
|
q5272
|
Reset
|
train
|
func (t *Timer) Reset(d time.Duration) bool {
return t.timer.Reset(d)
}
|
go
|
{
"resource": ""
}
|
q5273
|
InitiateMongoServer
|
train
|
func InitiateMongoServer(p InitiateMongoParams) error {
logger.Debugf("Initiating mongo replicaset; dialInfo %#v; memberHostport %q; user %q; password %q", p.DialInfo, p.MemberHostPort, p.User, p.Password)
defer logger.Infof("finished InitiateMongoServer")
if len(p.DialInfo.Addrs) > 1 {
logger.Infof("more than one member; replica set must be already initiated")
return nil
}
p.DialInfo.Direct = true
// Initiate may fail while mongo is initialising, so we retry until
// we successfully populate the replicaset config.
var err error
for attempt := initiateAttemptStrategy.Start(); attempt.Next(); {
err = attemptInitiateMongoServer(p.DialInfo, p.MemberHostPort)
if err == nil {
logger.Infof("replica set initiated")
return err
}
if attempt.HasNext() {
logger.Debugf("replica set initiation failed, will retry: %v", err)
}
}
return errors.Annotatef(err, "cannot initiate replica set")
}
|
go
|
{
"resource": ""
}
|
q5274
|
attemptInitiateMongoServer
|
train
|
func attemptInitiateMongoServer(dialInfo *mgo.DialInfo, memberHostPort string) error {
session, err := mgo.DialWithInfo(dialInfo)
if err != nil {
return errors.Annotatef(err, "cannot dial mongo to initiate replicaset")
}
defer session.Close()
session.SetSocketTimeout(mongo.SocketTimeout)
return replicaset.Initiate(
session,
memberHostPort,
mongo.ReplicaSetName,
map[string]string{
jujuMachineKey: agent.BootstrapMachineId,
},
)
}
|
go
|
{
"resource": ""
}
|
q5275
|
resetReplicaSet
|
train
|
func resetReplicaSet(dialInfo *mgo.DialInfo, memberHostPort string) error {
params := peergrouper.InitiateMongoParams{
DialInfo: dialInfo,
MemberHostPort: memberHostPort,
User: dialInfo.Username,
Password: dialInfo.Password,
}
return peergrouper.InitiateMongoServer(params)
}
|
go
|
{
"resource": ""
}
|
q5276
|
tagUserCredentials
|
train
|
func tagUserCredentials(conf agent.Config) (string, string, error) {
username := conf.Tag().String()
var password string
// TODO(perrito) we might need an accessor for the actual state password
// just in case it ever changes from the same as api password.
apiInfo, ok := conf.APIInfo()
if ok {
password = apiInfo.Password
} else {
// There seems to be no way to reach this inconsistence other than making a
// backup on a machine where these fields are corrupted and even so I find
// no reasonable way to reach this state, yet since APIInfo has it as a
// possibility I prefer to handle it, we cannot recover from this since
// it would mean that the agent.conf is corrupted.
return "", "", errors.New("cannot obtain password to access the controller")
}
return username, password, nil
}
|
go
|
{
"resource": ""
}
|
q5277
|
newDialInfo
|
train
|
func newDialInfo(privateAddr string, conf agent.Config) (*mgo.DialInfo, error) {
dialOpts := mongo.DialOpts{Direct: true}
ssi, ok := conf.StateServingInfo()
if !ok {
return nil, errors.Errorf("cannot get state serving info to dial")
}
info := mongo.Info{
Addrs: []string{net.JoinHostPort(privateAddr, strconv.Itoa(ssi.StatePort))},
CACert: conf.CACert(),
}
dialInfo, err := mongo.DialInfo(info, dialOpts)
if err != nil {
return nil, errors.Annotate(err, "cannot produce a dial info")
}
oldPassword := conf.OldPassword()
if oldPassword != "" {
dialInfo.Username = "admin"
dialInfo.Password = conf.OldPassword()
} else {
dialInfo.Username, dialInfo.Password, err = tagUserCredentials(conf)
if err != nil {
return nil, errors.Trace(err)
}
}
return dialInfo, nil
}
|
go
|
{
"resource": ""
}
|
q5278
|
updateMachineAddresses
|
train
|
func updateMachineAddresses(machine *state.Machine, privateAddress, publicAddress string) error {
privateAddressAddress := network.Address{
Value: privateAddress,
Type: network.DeriveAddressType(privateAddress),
}
publicAddressAddress := network.Address{
Value: publicAddress,
Type: network.DeriveAddressType(publicAddress),
}
if err := machine.SetProviderAddresses(publicAddressAddress, privateAddressAddress); err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5279
|
connectToDB
|
train
|
func connectToDB(controllerTag names.ControllerTag, modelTag names.ModelTag, info *mongo.MongoInfo) (*state.StatePool, error) {
// We need to retry here to allow mongo to come up on the restored controller.
// The connection might succeed due to the mongo dial retries but there may still
// be a problem issuing database commands.
var (
pool *state.StatePool
err error
)
const (
newStateConnDelay = 15 * time.Second
newStateConnMinAttempts = 8
)
// TODO(katco): 2016-08-09: lp:1611427
attempt := utils.AttemptStrategy{Delay: newStateConnDelay, Min: newStateConnMinAttempts}
session, err := mongo.DialWithInfo(*info, mongoDefaultDialOpts())
if err != nil {
return nil, errors.Trace(err)
}
defer session.Close()
for a := attempt.Start(); a.Next(); {
pool, err = state.OpenStatePool(state.OpenParams{
Clock: clock.WallClock,
ControllerTag: controllerTag,
ControllerModelTag: modelTag,
MongoSession: session,
NewPolicy: environsGetNewPolicyFunc(),
})
if err == nil {
return pool, nil
}
logger.Errorf("cannot open state, retrying: %v", err)
}
return nil, errors.Annotate(err, "cannot open state")
}
|
go
|
{
"resource": ""
}
|
q5280
|
updateAllMachines
|
train
|
func updateAllMachines(privateAddress, publicAddress string, machines []machineModel) error {
var machineUpdating sync.WaitGroup
for _, item := range machines {
machine := item.machine
// A newly resumed controller requires no updating, and more
// than one controller is not yet supported by this code.
if machine.IsManager() || machine.Life() == state.Dead {
continue
}
machineUpdating.Add(1)
go func(machine *state.Machine, model *state.Model) {
defer machineUpdating.Done()
logger.Debugf("updating addresses for machine %s in model %s/%s", machine.Tag().Id(), model.Owner().Id(), model.Name())
// TODO: thumper 2016-09-20
// runMachineUpdate only handles linux machines, what about windows?
err := runMachineUpdate(machine, setAgentAddressScript(privateAddress, publicAddress))
if err != nil {
logger.Errorf("failed updating machine: %v", err)
}
}(machine, item.model)
}
machineUpdating.Wait()
// We should return errors encapsulated in a digest.
return nil
}
|
go
|
{
"resource": ""
}
|
q5281
|
setAgentAddressScript
|
train
|
func setAgentAddressScript(stateAddr, statePubAddr string) string {
var buf bytes.Buffer
err := agentAddressAndRelationsTemplate.Execute(&buf, struct {
Address string
PubAddress string
}{stateAddr, statePubAddr})
if err != nil {
panic(errors.Annotate(err, "template error"))
}
return buf.String()
}
|
go
|
{
"resource": ""
}
|
q5282
|
runMachineUpdate
|
train
|
func runMachineUpdate(machine *state.Machine, sshArg string) error {
addr, err := machine.PublicAddress()
if err != nil {
if network.IsNoAddressError(err) {
return errors.Annotatef(err, "no appropriate public address found")
}
return errors.Trace(err)
}
return runViaSSH(addr.Value, sshArg)
}
|
go
|
{
"resource": ""
}
|
q5283
|
runViaSSH
|
train
|
func runViaSSH(addr string, script string) error {
sshOptions := ssh.Options{}
sshOptions.SetIdentities("/var/lib/juju/system-identity")
// Disable host key checking. We're not pushing across anything
// sensitive, and there's no guarantee that the machine would
// have published up-to-date host key information.
sshOptions.SetStrictHostKeyChecking(ssh.StrictHostChecksNo)
sshOptions.SetKnownHostsFile(os.DevNull)
userAddr := "ubuntu@" + addr
userCmd := sshCommand(userAddr, []string{"sudo", "-n", "bash", "-c " + utils.ShQuote(script)}, &sshOptions)
var stdoutBuf bytes.Buffer
var stderrBuf bytes.Buffer
userCmd.Stdout = &stdoutBuf
userCmd.Stderr = &stderrBuf
logger.Debugf("updating %s, script:\n%s", addr, script)
if err := userCmd.Run(); err != nil {
return errors.Annotatef(err, "ssh command failed: %q", stderrBuf.String())
}
logger.Debugf("result %s\nstdout: \n%s\nstderr: %s", addr, stdoutBuf.String(), stderrBuf.String())
return nil
}
|
go
|
{
"resource": ""
}
|
q5284
|
ParsePlacement
|
train
|
func ParsePlacement(directive string) (*Placement, error) {
if directive == "" {
return nil, nil
}
if colon := strings.IndexRune(directive, ':'); colon != -1 {
scope, directive := directive[:colon], directive[colon+1:]
if scope == "" {
return nil, ErrPlacementScopeMissing
}
// Sanity check: machine/container scopes require a machine ID as the value.
if (scope == MachineScope || isContainerType(scope)) && !names.IsValidMachine(directive) {
return nil, fmt.Errorf("invalid value %q for %q scope: expected machine-id", directive, scope)
}
return &Placement{Scope: scope, Directive: directive}, nil
}
if names.IsValidMachine(directive) {
return &Placement{Scope: MachineScope, Directive: directive}, nil
}
if isContainerType(directive) {
return &Placement{Scope: directive}, nil
}
return nil, ErrPlacementScopeMissing
}
|
go
|
{
"resource": ""
}
|
q5285
|
MustParsePlacement
|
train
|
func MustParsePlacement(directive string) *Placement {
placement, err := ParsePlacement(directive)
if err != nil {
panic(err)
}
return placement
}
|
go
|
{
"resource": ""
}
|
q5286
|
SetMinUnits
|
train
|
func (a *Application) SetMinUnits(minUnits int) (err error) {
defer errors.DeferredAnnotatef(&err, "cannot set minimum units for application %q", a)
defer func() {
if err == nil {
a.doc.MinUnits = minUnits
}
}()
if minUnits < 0 {
return errors.New("cannot set a negative minimum number of units")
}
app := &Application{st: a.st, doc: a.doc}
// Removing the document never fails. Racing clients trying to create the
// document generate one failure, but the second attempt should succeed.
// If one client tries to update the document, and a racing client removes
// it, the former should be able to re-create the document in the second
// attempt. If the referred-to application advanced its life cycle to a not
// alive state, an error is returned after the first failing attempt.
buildTxn := func(attempt int) ([]txn.Op, error) {
if attempt > 0 {
if err := app.Refresh(); err != nil {
return nil, err
}
}
if app.doc.Life != Alive {
return nil, errors.New("application is no longer alive")
}
if minUnits == app.doc.MinUnits {
return nil, jujutxn.ErrNoOperations
}
return setMinUnitsOps(app, minUnits), nil
}
return a.st.db().Run(buildTxn)
}
|
go
|
{
"resource": ""
}
|
q5287
|
doesMinUnitsExist
|
train
|
func doesMinUnitsExist(unit *Unit) (bool, error) {
minUnits, closer := unit.st.db().GetCollection(minUnitsC)
defer closer()
var result bson.D
err := minUnits.FindId(unit.ApplicationName()).Select(bson.M{"_id": 1}).One(&result)
if err == nil {
return true, nil
} else if err == mgo.ErrNotFound {
return false, nil
} else {
return false, errors.Trace(err)
}
}
|
go
|
{
"resource": ""
}
|
q5288
|
minUnitsRemoveOp
|
train
|
func minUnitsRemoveOp(st *State, applicationname string) txn.Op {
return txn.Op{
C: minUnitsC,
Id: st.docID(applicationname),
Remove: true,
}
}
|
go
|
{
"resource": ""
}
|
q5289
|
EnsureMinUnits
|
train
|
func (a *Application) EnsureMinUnits() (err error) {
defer errors.DeferredAnnotatef(&err, "cannot ensure minimum units for application %q", a)
app := &Application{st: a.st, doc: a.doc}
for {
// Ensure the application is alive.
if app.doc.Life != Alive {
return errors.New("application is not alive")
}
// Exit without errors if the MinUnits for the application is not set.
if app.doc.MinUnits == 0 {
return nil
}
// Retrieve the number of alive units for the application.
aliveUnits, err := aliveUnitsCount(app)
if err != nil {
return err
}
// Calculate the number of required units to be added.
missing := app.doc.MinUnits - aliveUnits
if missing <= 0 {
return nil
}
name, ops, err := ensureMinUnitsOps(app)
if err != nil {
return err
}
// Add missing unit.
switch err := a.st.db().RunTransaction(ops); err {
case nil:
// Assign the new unit.
unit, err := a.st.Unit(name)
if err != nil {
return err
}
if err := app.st.AssignUnit(unit, AssignNew); err != nil {
return err
}
// No need to proceed and refresh the application if this was the
// last/only missing unit.
if missing == 1 {
return nil
}
case txn.ErrAborted:
// Refresh the application and restart the loop.
default:
return err
}
if err := app.Refresh(); err != nil {
return err
}
}
}
|
go
|
{
"resource": ""
}
|
q5290
|
aliveUnitsCount
|
train
|
func aliveUnitsCount(app *Application) (int, error) {
units, closer := app.st.db().GetCollection(unitsC)
defer closer()
query := bson.D{{"application", app.doc.Name}, {"life", Alive}}
return units.Find(query).Count()
}
|
go
|
{
"resource": ""
}
|
q5291
|
ensureMinUnitsOps
|
train
|
func ensureMinUnitsOps(app *Application) (string, []txn.Op, error) {
asserts := bson.D{{"txn-revno", app.doc.TxnRevno}}
return app.addUnitOps("", AddUnitParams{}, asserts)
}
|
go
|
{
"resource": ""
}
|
q5292
|
AutoConfigureContainerNetworking
|
train
|
func (m *Model) AutoConfigureContainerNetworking(environ environs.BootstrapEnviron) error {
updateAttrs := make(map[string]interface{})
modelConfig, err := m.ModelConfig()
if err != nil {
return err
}
fanConfigured, err := m.discoverFan(environ, modelConfig, updateAttrs)
if err != nil {
return err
}
if modelConfig.ContainerNetworkingMethod() != "" {
// Do nothing, user has decided what to do
} else if environs.SupportsContainerAddresses(CallContext(m.st), environ) {
updateAttrs["container-networking-method"] = "provider"
} else if fanConfigured {
updateAttrs["container-networking-method"] = "fan"
} else {
updateAttrs["container-networking-method"] = "local"
}
err = m.UpdateModelConfig(updateAttrs, nil)
return err
}
|
go
|
{
"resource": ""
}
|
q5293
|
NewFacade
|
train
|
func NewFacade(backend Backend, claimer lease.Claimer, auth facade.Authorizer) (*Facade, error) {
if !auth.AuthController() {
return nil, common.ErrPerm
}
return &Facade{
auth: auth,
modelTag: backend.ModelTag(),
controllerTag: backend.ControllerTag(),
singularClaimer: claimer,
}, nil
}
|
go
|
{
"resource": ""
}
|
q5294
|
addPackageSourceCmds
|
train
|
func addPackageSourceCmds(cfg CloudConfig, src packaging.PackageSource) []string {
cmds := []string{}
// if keyfile is required, add it first
if src.Key != "" {
keyFilePath := config.YumKeyfileDir + src.KeyFileName()
cmds = append(cmds, addFileCmds(keyFilePath, []byte(src.Key), 0644, false)...)
}
repoPath := filepath.Join(config.YumSourcesDir, src.Name+".repo")
sourceFile, _ := cfg.getPackagingConfigurer().RenderSource(src)
data := []byte(sourceFile)
cmds = append(cmds, addFileCmds(repoPath, data, 0644, false)...)
return cmds
}
|
go
|
{
"resource": ""
}
|
q5295
|
removeStringFromSlice
|
train
|
func removeStringFromSlice(slice []string, val string) []string {
for i, str := range slice {
if str == val {
slice = append(slice[:i], slice[i+1:]...)
}
}
return slice
}
|
go
|
{
"resource": ""
}
|
q5296
|
machineName
|
train
|
func (s machineStatus) machineName() string {
if s.DisplayName == "" {
return string(s.InstanceId)
}
return s.DisplayName
}
|
go
|
{
"resource": ""
}
|
q5297
|
NewDestroyCommand
|
train
|
func NewDestroyCommand() cmd.Command {
destroyCmd := &destroyCommand{
clock: jujuclock.WallClock,
}
destroyCmd.CanClearCurrentModel = true
return modelcmd.Wrap(
destroyCmd,
modelcmd.WrapSkipDefaultModel,
modelcmd.WrapSkipModelFlags,
)
}
|
go
|
{
"resource": ""
}
|
q5298
|
DestroyContainer
|
train
|
func (m *containerManager) DestroyContainer(id instance.Id) error {
return errors.Trace(m.server.RemoveContainer(string(id)))
}
|
go
|
{
"resource": ""
}
|
q5299
|
CreateContainer
|
train
|
func (m *containerManager) CreateContainer(
instanceConfig *instancecfg.InstanceConfig,
cons constraints.Value,
series string,
networkConfig *container.NetworkConfig,
storageConfig *container.StorageConfig,
callback environs.StatusCallbackFunc,
) (instances.Instance, *instance.HardwareCharacteristics, error) {
callback(status.Provisioning, "Creating container spec", nil)
spec, err := m.getContainerSpec(instanceConfig, cons, series, networkConfig, storageConfig, callback)
if err != nil {
callback(status.ProvisioningError, fmt.Sprintf("Creating container spec: %v", err), nil)
return nil, nil, errors.Trace(err)
}
callback(status.Provisioning, "Creating container", nil)
c, err := m.server.CreateContainerFromSpec(spec)
if err != nil {
callback(status.ProvisioningError, fmt.Sprintf("Creating container: %v", err), nil)
return nil, nil, errors.Trace(err)
}
callback(status.Running, "Container started", nil)
return &lxdInstance{c.Name, m.server.ContainerServer},
&instance.HardwareCharacteristics{AvailabilityZone: &m.availabilityZone}, nil
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.