_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q3400
|
PlanInfo
|
train
|
func (v *volumeAttachmentPlan) PlanInfo() (VolumeAttachmentPlanInfo, error) {
if v.doc.PlanInfo == nil {
return VolumeAttachmentPlanInfo{}, errors.NotProvisionedf("volume attachment plan %q on %q", v.doc.Volume, v.doc.Machine)
}
return *v.doc.PlanInfo, nil
}
|
go
|
{
"resource": ""
}
|
q3401
|
Volume
|
train
|
func (v *volumeAttachment) Volume() names.VolumeTag {
return names.NewVolumeTag(v.doc.Volume)
}
|
go
|
{
"resource": ""
}
|
q3402
|
Info
|
train
|
func (v *volumeAttachment) Info() (VolumeAttachmentInfo, error) {
if v.doc.Info == nil {
host := storageAttachmentHost(v.doc.Host)
return VolumeAttachmentInfo{}, errors.NotProvisionedf("volume attachment %q on %q", v.doc.Volume, names.ReadableString(host))
}
return *v.doc.Info, nil
}
|
go
|
{
"resource": ""
}
|
q3403
|
Params
|
train
|
func (v *volumeAttachment) Params() (VolumeAttachmentParams, bool) {
if v.doc.Params == nil {
return VolumeAttachmentParams{}, false
}
return *v.doc.Params, true
}
|
go
|
{
"resource": ""
}
|
q3404
|
Volume
|
train
|
func (sb *storageBackend) Volume(tag names.VolumeTag) (Volume, error) {
v, err := getVolumeByTag(sb.mb, tag)
return v, err
}
|
go
|
{
"resource": ""
}
|
q3405
|
StorageInstanceVolume
|
train
|
func (sb *storageBackend) StorageInstanceVolume(tag names.StorageTag) (Volume, error) {
v, err := sb.storageInstanceVolume(tag)
return v, err
}
|
go
|
{
"resource": ""
}
|
q3406
|
VolumeAttachment
|
train
|
func (sb *storageBackend) VolumeAttachment(host names.Tag, volume names.VolumeTag) (VolumeAttachment, error) {
coll, cleanup := sb.mb.db().GetCollection(volumeAttachmentsC)
defer cleanup()
var att volumeAttachment
err := coll.FindId(volumeAttachmentId(host.Id(), volume.Id())).One(&att.doc)
if err == mgo.ErrNotFound {
return nil, errors.NotFoundf("volume %q on %q", volume.Id(), names.ReadableString(host))
} else if err != nil {
return nil, errors.Annotatef(err, "getting volume %q on %q", volume.Id(), names.ReadableString(host))
}
return &att, nil
}
|
go
|
{
"resource": ""
}
|
q3407
|
MachineVolumeAttachments
|
train
|
func (sb *storageBackend) MachineVolumeAttachments(machine names.MachineTag) ([]VolumeAttachment, error) {
attachments, err := sb.volumeAttachments(bson.D{{"hostid", machine.Id()}})
if err != nil {
return nil, errors.Annotatef(err, "getting volume attachments for machine %q", machine.Id())
}
return attachments, nil
}
|
go
|
{
"resource": ""
}
|
q3408
|
UnitVolumeAttachments
|
train
|
func (sb *storageBackend) UnitVolumeAttachments(unit names.UnitTag) ([]VolumeAttachment, error) {
attachments, err := sb.volumeAttachments(bson.D{{"hostid", unit.Id()}})
if err != nil {
return nil, errors.Annotatef(err, "getting volume attachments for unit %q", unit.Id())
}
return attachments, nil
}
|
go
|
{
"resource": ""
}
|
q3409
|
VolumeAttachments
|
train
|
func (sb *storageBackend) VolumeAttachments(volume names.VolumeTag) ([]VolumeAttachment, error) {
attachments, err := sb.volumeAttachments(bson.D{{"volumeid", volume.Id()}})
if err != nil {
return nil, errors.Annotatef(err, "getting volume attachments for volume %q", volume.Id())
}
return attachments, nil
}
|
go
|
{
"resource": ""
}
|
q3410
|
VolumeAttachmentPlans
|
train
|
func (sb *storageBackend) VolumeAttachmentPlans(volume names.VolumeTag) ([]VolumeAttachmentPlan, error) {
attachmentPlans, err := sb.volumeAttachmentPlans(bson.D{{"volumeid", volume.Id()}})
if err != nil {
return nil, errors.Annotatef(err, "getting volume attachment plans for volume %q", volume.Id())
}
return attachmentPlans, nil
}
|
go
|
{
"resource": ""
}
|
q3411
|
removeMachineVolumesOps
|
train
|
func (sb *storageBackend) removeMachineVolumesOps(m *Machine) ([]txn.Op, error) {
// A machine cannot transition to Dead if it has any detachable storage
// attached, so any attachments are for machine-bound storage.
//
// Even if a volume is "non-detachable", there still exist volume
// attachments, and they may be removed independently of the volume.
// For example, the user may request that the volume be destroyed.
// This will cause the volume to become Dying, and the attachment to
// be Dying, then Dead, and finally removed. Only once the attachment
// is removed will the volume transition to Dead and then be removed.
// Therefore, there may be volumes that are bound, but not attached,
// to the machine.
machineVolumes, err := sb.volumes(bson.D{{"hostid", m.Id()}})
if err != nil {
return nil, errors.Trace(err)
}
ops := make([]txn.Op, 0, 2*len(machineVolumes)+len(m.doc.Volumes))
for _, volumeId := range m.doc.Volumes {
ops = append(ops, txn.Op{
C: volumeAttachmentsC,
Id: volumeAttachmentId(m.Id(), volumeId),
Assert: txn.DocExists,
Remove: true,
})
}
for _, v := range machineVolumes {
if v.doc.StorageId != "" {
// The volume is assigned to a storage instance;
// make sure we also remove the storage instance.
// There should be no storage attachments remaining,
// as the units must have been removed before the
// machine can be; and the storage attachments must
// have been removed before the unit can be.
ops = append(ops,
txn.Op{
C: storageInstancesC,
Id: v.doc.StorageId,
Assert: txn.DocExists,
Remove: true,
},
)
}
ops = append(ops, sb.removeVolumeOps(v.VolumeTag())...)
}
return ops, nil
}
|
go
|
{
"resource": ""
}
|
q3412
|
isDetachableVolumeTag
|
train
|
func isDetachableVolumeTag(db Database, tag names.VolumeTag) (bool, error) {
doc, err := getVolumeDocByTag(db, tag)
if err != nil {
return false, errors.Trace(err)
}
return detachableVolumeDoc(&doc), nil
}
|
go
|
{
"resource": ""
}
|
q3413
|
isDetachableVolumePool
|
train
|
func isDetachableVolumePool(im *storageBackend, pool string) (bool, error) {
_, provider, _, err := poolStorageProvider(im, pool)
if err != nil {
return false, errors.Trace(err)
}
if provider.Scope() == storage.ScopeMachine {
// Any storage created by a machine cannot be detached from
// the machine, and must be destroyed along with it.
return false, nil
}
if provider.Dynamic() {
// NOTE(axw) In theory, we don't know ahead of time
// whether the storage will be Persistent, as the model
// allows for a dynamic storage provider to create non-
// persistent storage. None of the storage providers do
// this, so we assume it will be persistent for now.
//
// TODO(axw) get rid of the Persistent field from Volume
// and Filesystem. We only need to care whether the
// storage is dynamic and model-scoped.
return true, nil
}
// Volume is static, so it will be tied to the machine.
return false, nil
}
|
go
|
{
"resource": ""
}
|
q3414
|
DetachVolume
|
train
|
func (sb *storageBackend) DetachVolume(host names.Tag, volume names.VolumeTag) (err error) {
defer errors.DeferredAnnotatef(&err, "detaching volume %s from %s", volume.Id(), names.ReadableString(host))
// If the volume is backing a filesystem, the volume cannot be detached
// until the filesystem has been detached.
if _, err := sb.volumeFilesystemAttachment(host, volume); err == nil {
return &errContainsFilesystem{errors.New("volume contains attached filesystem")}
} else if !errors.IsNotFound(err) {
return errors.Trace(err)
}
buildTxn := func(attempt int) ([]txn.Op, error) {
va, err := sb.VolumeAttachment(host, volume)
if err != nil {
return nil, errors.Trace(err)
}
if va.Life() != Alive {
return nil, jujutxn.ErrNoOperations
}
detachable, err := isDetachableVolumeTag(sb.mb.db(), volume)
if err != nil {
return nil, errors.Trace(err)
}
if !detachable {
return nil, errors.New("volume is not detachable")
}
if plans, err := sb.machineVolumeAttachmentPlans(host, volume); err != nil {
return nil, errors.Trace(err)
} else {
if len(plans) > 0 {
return detachStorageAttachmentOps(host, volume), nil
}
}
return detachVolumeOps(host, volume), nil
}
return sb.mb.db().Run(buildTxn)
}
|
go
|
{
"resource": ""
}
|
q3415
|
RemoveVolumeAttachment
|
train
|
func (sb *storageBackend) RemoveVolumeAttachment(host names.Tag, volume names.VolumeTag) (err error) {
defer errors.DeferredAnnotatef(&err, "removing attachment of volume %s from %s", volume.Id(), names.ReadableString(host))
buildTxn := func(attempt int) ([]txn.Op, error) {
attachment, err := sb.VolumeAttachment(host, volume)
if errors.IsNotFound(err) && attempt > 0 {
// We only ignore IsNotFound on attempts after the
// first, since we expect the volume attachment to
// be there initially.
return nil, jujutxn.ErrNoOperations
}
if err != nil {
return nil, errors.Trace(err)
}
if attachment.Life() != Dying {
return nil, errors.New("volume attachment is not dying")
}
v, err := getVolumeByTag(sb.mb, volume)
if err != nil {
return nil, errors.Trace(err)
}
return removeVolumeAttachmentOps(host, v), nil
}
return sb.mb.db().Run(buildTxn)
}
|
go
|
{
"resource": ""
}
|
q3416
|
DestroyVolume
|
train
|
func (sb *storageBackend) DestroyVolume(tag names.VolumeTag) (err error) {
defer errors.DeferredAnnotatef(&err, "destroying volume %s", tag.Id())
if _, err := sb.VolumeFilesystem(tag); err == nil {
return &errContainsFilesystem{errors.New("volume contains filesystem")}
} else if !errors.IsNotFound(err) {
return errors.Trace(err)
}
buildTxn := func(attempt int) ([]txn.Op, error) {
volume, err := getVolumeByTag(sb.mb, tag)
if errors.IsNotFound(err) && attempt > 0 {
// On the first attempt, we expect it to exist.
return nil, jujutxn.ErrNoOperations
} else if err != nil {
return nil, errors.Trace(err)
}
if volume.doc.StorageId != "" {
return nil, errors.Errorf(
"volume is assigned to %s",
names.ReadableString(names.NewStorageTag(volume.doc.StorageId)),
)
}
if volume.Life() != Alive {
return nil, jujutxn.ErrNoOperations
}
hasNoStorageAssignment := bson.D{{"$or", []bson.D{
{{"storageid", ""}},
{{"storageid", bson.D{{"$exists", false}}}},
}}}
return destroyVolumeOps(sb, volume, false, hasNoStorageAssignment)
}
return sb.mb.db().Run(buildTxn)
}
|
go
|
{
"resource": ""
}
|
q3417
|
RemoveVolume
|
train
|
func (sb *storageBackend) RemoveVolume(tag names.VolumeTag) (err error) {
defer errors.DeferredAnnotatef(&err, "removing volume %s", tag.Id())
buildTxn := func(attempt int) ([]txn.Op, error) {
volume, err := sb.Volume(tag)
if errors.IsNotFound(err) {
return nil, jujutxn.ErrNoOperations
} else if err != nil {
return nil, errors.Trace(err)
}
if volume.Life() != Dead {
return nil, errors.New("volume is not dead")
}
return sb.removeVolumeOps(tag), nil
}
return sb.mb.db().Run(buildTxn)
}
|
go
|
{
"resource": ""
}
|
q3418
|
addVolumeOps
|
train
|
func (sb *storageBackend) addVolumeOps(params VolumeParams, hostId string) ([]txn.Op, names.VolumeTag, error) {
params, err := sb.volumeParamsWithDefaults(params)
if err != nil {
return nil, names.VolumeTag{}, errors.Trace(err)
}
detachable, err := isDetachableVolumePool(sb, params.Pool)
if err != nil {
return nil, names.VolumeTag{}, errors.Trace(err)
}
origHostId := hostId
hostId, err = sb.validateVolumeParams(params, hostId)
if err != nil {
return nil, names.VolumeTag{}, errors.Annotate(err, "validating volume params")
}
name, err := newVolumeName(sb.mb, hostId)
if err != nil {
return nil, names.VolumeTag{}, errors.Annotate(err, "cannot generate volume name")
}
statusDoc := statusDoc{
Status: status.Pending,
Updated: sb.mb.clock().Now().UnixNano(),
}
doc := volumeDoc{
Name: name,
StorageId: params.storage.Id(),
}
if params.volumeInfo != nil {
// We're importing an already provisioned volume into the
// model. Set provisioned info rather than params, and set
// the status to "detached".
statusDoc.Status = status.Detached
doc.Info = params.volumeInfo
} else {
// Every new volume is created with one attachment.
doc.Params = ¶ms
doc.AttachmentCount = 1
}
if !detachable {
doc.HostId = origHostId
}
return sb.newVolumeOps(doc, statusDoc), names.NewVolumeTag(name), nil
}
|
go
|
{
"resource": ""
}
|
q3419
|
validateVolumeParams
|
train
|
func (sb *storageBackend) validateVolumeParams(params VolumeParams, machineId string) (maybeMachineId string, _ error) {
if err := validateStoragePool(sb, params.Pool, storage.StorageKindBlock, &machineId); err != nil {
return "", err
}
if params.Size == 0 {
return "", errors.New("invalid size 0")
}
return machineId, nil
}
|
go
|
{
"resource": ""
}
|
q3420
|
ParseVolumeAttachmentId
|
train
|
func ParseVolumeAttachmentId(id string) (names.Tag, names.VolumeTag, error) {
fields := strings.SplitN(id, ":", 2)
isValidHost := names.IsValidMachine(fields[0]) || names.IsValidUnit(fields[0])
if len(fields) != 2 || !isValidHost || !names.IsValidVolume(fields[1]) {
return names.MachineTag{}, names.VolumeTag{}, errors.Errorf("invalid volume attachment ID %q", id)
}
var hostTag names.Tag
if names.IsValidMachine(fields[0]) {
hostTag = names.NewMachineTag(fields[0])
} else {
hostTag = names.NewUnitTag(fields[0])
}
volumeTag := names.NewVolumeTag(fields[1])
return hostTag, volumeTag, nil
}
|
go
|
{
"resource": ""
}
|
q3421
|
createMachineVolumeAttachmentsOps
|
train
|
func createMachineVolumeAttachmentsOps(hostId string, attachments []volumeAttachmentTemplate) []txn.Op {
ops := make([]txn.Op, len(attachments))
for i, attachment := range attachments {
paramsCopy := attachment.params
ops[i] = txn.Op{
C: volumeAttachmentsC,
Id: volumeAttachmentId(hostId, attachment.tag.Id()),
Assert: txn.DocMissing,
Insert: &volumeAttachmentDoc{
Volume: attachment.tag.Id(),
Host: hostId,
Params: ¶msCopy,
},
}
if attachment.existing {
ops = append(ops, txn.Op{
C: volumesC,
Id: attachment.tag.Id(),
Assert: txn.DocExists,
Update: bson.D{{"$inc", bson.D{{"attachmentcount", 1}}}},
})
}
}
return ops
}
|
go
|
{
"resource": ""
}
|
q3422
|
setMachineVolumeAttachmentInfo
|
train
|
func setMachineVolumeAttachmentInfo(sb *storageBackend, machineId string, attachments map[names.VolumeTag]VolumeAttachmentInfo) (err error) {
defer errors.DeferredAnnotatef(&err, "cannot set volume attachment info for machine %s", machineId)
machineTag := names.NewMachineTag(machineId)
for volumeTag, info := range attachments {
if err := sb.setVolumeAttachmentInfo(machineTag, volumeTag, info); err != nil {
return errors.Annotatef(err, "setting attachment info for volume %s", volumeTag.Id())
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3423
|
SetVolumeAttachmentInfo
|
train
|
func (sb *storageBackend) SetVolumeAttachmentInfo(hostTag names.Tag, volumeTag names.VolumeTag, info VolumeAttachmentInfo) (err error) {
defer errors.DeferredAnnotatef(&err, "cannot set info for volume attachment %s:%s", volumeTag.Id(), hostTag.Id())
v, err := sb.Volume(volumeTag)
if err != nil {
return errors.Trace(err)
}
// Ensure volume is provisioned before setting attachment info.
// A volume cannot go from being provisioned to unprovisioned,
// so there is no txn.Op for this below.
if _, err := v.Info(); err != nil {
return errors.Trace(err)
}
// Also ensure the machine is provisioned.
if _, ok := hostTag.(names.MachineTag); ok {
m, err := sb.machine(hostTag.Id())
if err != nil {
return errors.Trace(err)
}
if _, err := m.InstanceId(); err != nil {
return errors.Trace(err)
}
}
return sb.setVolumeAttachmentInfo(hostTag, volumeTag, info)
}
|
go
|
{
"resource": ""
}
|
q3424
|
RemoveVolumeAttachmentPlan
|
train
|
func (sb *storageBackend) RemoveVolumeAttachmentPlan(hostTag names.Tag, volume names.VolumeTag) (err error) {
defer errors.DeferredAnnotatef(&err, "removing attachment plan of volume %s from machine %s", volume.Id(), hostTag.Id())
buildTxn := func(attempt int) ([]txn.Op, error) {
plans, err := sb.machineVolumeAttachmentPlans(hostTag, volume)
if errors.IsNotFound(err) {
return nil, jujutxn.ErrNoOperations
}
if err != nil {
return nil, errors.Trace(err)
}
// We should only have one plan for a volume
if plans != nil && len(plans) > 0 {
if plans[0].Life() != Dying {
return nil, jujutxn.ErrNoOperations
}
} else {
return nil, jujutxn.ErrNoOperations
}
return removeVolumeAttachmentPlanOps(hostTag, volume), nil
}
return sb.mb.db().Run(buildTxn)
}
|
go
|
{
"resource": ""
}
|
q3425
|
removeVolumeAttachmentPlanOps
|
train
|
func removeVolumeAttachmentPlanOps(hostTag names.Tag, volume names.VolumeTag) []txn.Op {
detachOps := detachVolumeOps(hostTag, volume)
removeOps := []txn.Op{{
C: volumeAttachmentPlanC,
Id: volumeAttachmentId(hostTag.Id(), volume.Id()),
Assert: bson.D{{"life", Dying}},
Remove: true,
}}
removeOps = append(removeOps, detachOps...)
return removeOps
}
|
go
|
{
"resource": ""
}
|
q3426
|
setProvisionedVolumeInfo
|
train
|
func setProvisionedVolumeInfo(sb *storageBackend, volumes map[names.VolumeTag]VolumeInfo) error {
for volumeTag, info := range volumes {
if err := sb.SetVolumeInfo(volumeTag, info); err != nil {
return errors.Trace(err)
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3427
|
SetVolumeInfo
|
train
|
func (sb *storageBackend) SetVolumeInfo(tag names.VolumeTag, info VolumeInfo) (err error) {
defer errors.DeferredAnnotatef(&err, "cannot set info for volume %q", tag.Id())
if info.VolumeId == "" {
return errors.New("volume ID not set")
}
// TODO(axw) we should reject info without VolumeId set; can't do this
// until the providers all set it correctly.
buildTxn := func(attempt int) ([]txn.Op, error) {
v, err := sb.Volume(tag)
if err != nil {
return nil, errors.Trace(err)
}
// If the volume has parameters, unset them when
// we set info for the first time, ensuring that
// params and info are mutually exclusive.
var unsetParams bool
var ops []txn.Op
if params, ok := v.Params(); ok {
info.Pool = params.Pool
unsetParams = true
} else {
// Ensure immutable properties do not change.
oldInfo, err := v.Info()
if err != nil {
return nil, err
}
if err := validateVolumeInfoChange(info, oldInfo); err != nil {
return nil, err
}
}
ops = append(ops, setVolumeInfoOps(tag, info, unsetParams)...)
return ops, nil
}
return sb.mb.db().Run(buildTxn)
}
|
go
|
{
"resource": ""
}
|
q3428
|
AllVolumes
|
train
|
func (sb *storageBackend) AllVolumes() ([]Volume, error) {
volumes, err := sb.volumes(nil)
if err != nil {
return nil, errors.Annotate(err, "cannot get volumes")
}
return volumesToInterfaces(volumes), nil
}
|
go
|
{
"resource": ""
}
|
q3429
|
watchMachinesLoop
|
train
|
func watchMachinesLoop(context updaterContext, machinesWatcher watcher.StringsWatcher) (err error) {
p := &updater{
context: context,
machines: make(map[names.MachineTag]chan struct{}),
machineDead: make(chan machine),
}
defer func() {
// TODO(fwereade): is this a home-grown sync.WaitGroup or something?
// strongly suspect these machine goroutines could be managed rather
// less opaquely if we made them all workers.
for len(p.machines) > 0 {
delete(p.machines, (<-p.machineDead).Tag())
}
}()
for {
select {
case <-p.context.dying():
return p.context.errDying()
case ids, ok := <-machinesWatcher.Changes():
if !ok {
return errors.New("machines watcher closed")
}
tags := make([]names.MachineTag, len(ids))
for i := range ids {
tags[i] = names.NewMachineTag(ids[i])
}
if err := p.startMachines(tags); err != nil {
return err
}
case m := <-p.machineDead:
delete(p.machines, m.Tag())
}
}
}
|
go
|
{
"resource": ""
}
|
q3430
|
runMachine
|
train
|
func runMachine(context machineContext, m machine, changed <-chan struct{}, died chan<- machine, clock clock.Clock) {
defer func() {
// We can't just send on the died channel because the
// central loop might be trying to write to us on the
// changed channel.
for {
select {
case died <- m:
return
case <-changed:
}
}
}()
if err := machineLoop(context, m, changed, clock); err != nil {
context.kill(err)
}
}
|
go
|
{
"resource": ""
}
|
q3431
|
pollInstanceInfo
|
train
|
func pollInstanceInfo(context machineContext, m machine) (instInfo instanceInfo, err error) {
instInfo = instanceInfo{}
instId, err := m.InstanceId()
// We can't ask the machine for its addresses if it isn't provisioned yet.
if params.IsCodeNotProvisioned(err) {
return instanceInfo{}, err
}
if err != nil {
return instanceInfo{}, errors.Annotate(err, "cannot get machine's instance id")
}
instInfo, err = context.instanceInfo(instId)
if err != nil {
// TODO (anastasiamac 2016-02-01) This does not look like it needs to be removed now.
if params.IsCodeNotImplemented(err) {
return instanceInfo{}, err
}
logger.Warningf("cannot get instance info for instance %q: %v", instId, err)
return instInfo, nil
}
if instStat, err := m.InstanceStatus(); err != nil {
// This should never occur since the machine is provisioned.
// But just in case, we reset polled status so we try again next time.
logger.Warningf("cannot get current instance status for machine %v: %v", m.Id(), err)
instInfo.status = instance.Status{status.Unknown, ""}
} else {
// TODO(perrito666) add status validation.
currentInstStatus := instance.Status{
Status: status.Status(instStat.Status),
Message: instStat.Info,
}
if instInfo.status != currentInstStatus {
logger.Infof("machine %q instance status changed from %q to %q", m.Id(), currentInstStatus, instInfo.status)
if err = m.SetInstanceStatus(instInfo.status.Status, instInfo.status.Message, nil); err != nil {
logger.Errorf("cannot set instance status on %q: %v", m, err)
return instanceInfo{}, err
}
}
}
if m.Life() != params.Dead {
providerAddresses, err := m.ProviderAddresses()
if err != nil {
return instanceInfo{}, err
}
if !addressesEqual(providerAddresses, instInfo.addresses) {
logger.Infof("machine %q has new addresses: %v", m.Id(), instInfo.addresses)
if err := m.SetProviderAddresses(instInfo.addresses...); err != nil {
logger.Errorf("cannot set addresses on %q: %v", m, err)
return instanceInfo{}, err
}
}
}
return instInfo, nil
}
|
go
|
{
"resource": ""
}
|
q3432
|
addressesEqual
|
train
|
func addressesEqual(a0, a1 []network.Address) bool {
if len(a0) != len(a1) {
logger.Tracef("address lists have different lengths %d != %d for %v != %v",
len(a0), len(a1), a0, a1)
return false
}
ca0 := make([]network.Address, len(a0))
copy(ca0, a0)
network.SortAddresses(ca0)
ca1 := make([]network.Address, len(a1))
copy(ca1, a1)
network.SortAddresses(ca1)
for i := range ca0 {
if ca0[i] != ca1[i] {
logger.Tracef("address entry at offset %d has a different value for %v != %v",
i, ca0, ca1)
return false
}
}
return true
}
|
go
|
{
"resource": ""
}
|
q3433
|
Count
|
train
|
func (c *modelStateCollection) Count() (int, error) {
return c.WriteCollection.Find(bson.D{{"model-uuid", c.modelUUID}}).Count()
}
|
go
|
{
"resource": ""
}
|
q3434
|
Insert
|
train
|
func (c *modelStateCollection) Insert(docs ...interface{}) error {
var mungedDocs []interface{}
for _, doc := range docs {
mungedDoc, err := mungeDocForMultiModel(doc, c.modelUUID, modelUUIDRequired)
if err != nil {
return errors.Trace(err)
}
mungedDocs = append(mungedDocs, mungedDoc)
}
return c.WriteCollection.Insert(mungedDocs...)
}
|
go
|
{
"resource": ""
}
|
q3435
|
UpdateId
|
train
|
func (c *modelStateCollection) UpdateId(id interface{}, update interface{}) error {
if sid, ok := id.(string); ok {
return c.WriteCollection.UpdateId(ensureModelUUID(c.modelUUID, sid), update)
}
return c.WriteCollection.UpdateId(bson.D{{"_id", id}}, update)
}
|
go
|
{
"resource": ""
}
|
q3436
|
getAllUnitNames
|
train
|
func getAllUnitNames(st *state.State, units, services []string) (result []names.Tag, err error) {
var leaders map[string]string
getLeader := func(appName string) (string, error) {
if leaders == nil {
var err error
leaders, err = st.ApplicationLeaders()
if err != nil {
return "", err
}
}
if leader, ok := leaders[appName]; ok {
return leader, nil
}
return "", errors.Errorf("could not determine leader for %q", appName)
}
// Replace units matching $app/leader with the appropriate unit for
// the leader.
unitsSet := set.NewStrings()
for _, unit := range units {
if !strings.HasSuffix(unit, "leader") {
unitsSet.Add(unit)
continue
}
app := strings.Split(unit, "/")[0]
leaderUnit, err := getLeader(app)
if err != nil {
return nil, common.ServerError(err)
}
unitsSet.Add(leaderUnit)
}
for _, name := range services {
service, err := st.Application(name)
if err != nil {
return nil, err
}
units, err := service.AllUnits()
if err != nil {
return nil, err
}
for _, unit := range units {
unitsSet.Add(unit.Name())
}
}
for _, unitName := range unitsSet.SortedValues() {
if !names.IsValidUnit(unitName) {
return nil, errors.Errorf("invalid unit name %q", unitName)
}
result = append(result, names.NewUnitTag(unitName))
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3437
|
Run
|
train
|
func (a *ActionAPI) Run(run params.RunParams) (results params.ActionResults, err error) {
if err := a.checkCanAdmin(); err != nil {
return results, err
}
if err := a.check.ChangeAllowed(); err != nil {
return results, errors.Trace(err)
}
units, err := getAllUnitNames(a.state, run.Units, run.Applications)
if err != nil {
return results, errors.Trace(err)
}
machines := make([]names.Tag, len(run.Machines))
for i, machineId := range run.Machines {
if !names.IsValidMachine(machineId) {
return results, errors.Errorf("invalid machine id %q", machineId)
}
machines[i] = names.NewMachineTag(machineId)
}
actionParams := a.createActionsParams(append(units, machines...), run.Commands, run.Timeout)
return queueActions(a, actionParams)
}
|
go
|
{
"resource": ""
}
|
q3438
|
RunOnAllMachines
|
train
|
func (a *ActionAPI) RunOnAllMachines(run params.RunParams) (results params.ActionResults, err error) {
if err := a.checkCanAdmin(); err != nil {
return results, err
}
if err := a.check.ChangeAllowed(); err != nil {
return results, errors.Trace(err)
}
machines, err := a.state.AllMachines()
if err != nil {
return results, err
}
machineTags := make([]names.Tag, len(machines))
for i, machine := range machines {
machineTags[i] = machine.Tag()
}
actionParams := a.createActionsParams(machineTags, run.Commands, run.Timeout)
return queueActions(a, actionParams)
}
|
go
|
{
"resource": ""
}
|
q3439
|
bounceErrChanged
|
train
|
func bounceErrChanged(err error) error {
if errors.Cause(err) == ErrChanged {
return dependency.ErrBounce
}
return err
}
|
go
|
{
"resource": ""
}
|
q3440
|
stateStepsFor237
|
train
|
func stateStepsFor237() []Step {
return []Step{
&upgradeStep{
description: "ensure container-image-stream isn't set in applications",
targets: []Target{DatabaseMaster},
run: func(context Context) error {
return context.State().RemoveContainerImageStreamFromNonModelSettings()
},
},
}
}
|
go
|
{
"resource": ""
}
|
q3441
|
hookToolMain
|
train
|
func hookToolMain(commandName string, ctx *cmd.Context, args []string) (code int, err error) {
code = 1
contextId, err := getenv("JUJU_CONTEXT_ID")
if err != nil {
return
}
dir, err := getwd()
if err != nil {
return
}
req := jujuc.Request{
ContextId: contextId,
Dir: dir,
CommandName: commandName,
Args: args[1:],
}
socketPath, err := getenv("JUJU_AGENT_SOCKET")
if err != nil {
return
}
client, err := sockets.Dial(socketPath)
if err != nil {
return
}
defer client.Close()
var resp exec.ExecResponse
err = client.Call("Jujuc.Main", req, &resp)
if err != nil && err.Error() == jujuc.ErrNoStdin.Error() {
req.Stdin, err = ioutil.ReadAll(os.Stdin)
if err != nil {
err = errors.Annotate(err, "cannot read stdin")
return
}
req.StdinSet = true
err = client.Call("Jujuc.Main", req, &resp)
}
if err != nil {
return
}
os.Stdout.Write(resp.Stdout)
os.Stderr.Write(resp.Stderr)
return resp.Code, nil
}
|
go
|
{
"resource": ""
}
|
q3442
|
jujuDMain
|
train
|
func jujuDMain(args []string, ctx *cmd.Context) (code int, err error) {
// Assuming an average of 200 bytes per log message, use up to
// 200MB for the log buffer.
defer logger.Debugf("jujud complete, code %d, err %v", code, err)
bufferedLogger, err := logsender.InstallBufferedLogWriter(1048576)
if err != nil {
return 1, errors.Trace(err)
}
// Set the default transport to use the in-process proxy
// configuration.
if err := proxy.DefaultConfig.Set(proxyutils.DetectProxies()); err != nil {
return 1, errors.Trace(err)
}
if err := proxy.DefaultConfig.InstallInDefaultTransport(); err != nil {
return 1, errors.Trace(err)
}
jujud := jujucmd.NewSuperCommand(cmd.SuperCommandParams{
Name: "jujud",
Doc: jujudDoc,
})
jujud.Log.NewWriter = func(target io.Writer) loggo.Writer {
return &jujudWriter{target: target}
}
jujud.Register(agentcmd.NewBootstrapCommand())
// TODO(katco-): AgentConf type is doing too much. The
// MachineAgent type has called out the separate concerns; the
// AgentConf should be split up to follow suit.
agentConf := agentcmd.NewAgentConf("")
machineAgentFactory := agentcmd.MachineAgentFactoryFn(
agentConf,
bufferedLogger,
agentcmd.DefaultIntrospectionSocketName,
upgrades.PreUpgradeSteps,
"",
)
jujud.Register(agentcmd.NewMachineAgentCmd(ctx, machineAgentFactory, agentConf, agentConf))
unitAgent, err := agentcmd.NewUnitAgent(ctx, bufferedLogger)
if err != nil {
return -1, errors.Trace(err)
}
jujud.Register(unitAgent)
caasOperatorAgent, err := agentcmd.NewCaasOperatorAgent(ctx, bufferedLogger)
if err != nil {
return -1, errors.Trace(err)
}
jujud.Register(caasOperatorAgent)
jujud.Register(agentcmd.NewCheckConnectionCommand(agentConf, agentcmd.ConnectAsAgent))
code = cmd.Main(jujud, ctx, args[1:])
return code, nil
}
|
go
|
{
"resource": ""
}
|
q3443
|
saveApplicationWorker
|
train
|
func (p *provisioner) saveApplicationWorker(appName string, aw worker.Worker) {
p.mu.Lock()
defer p.mu.Unlock()
if p.provisioners == nil {
p.provisioners = make(map[string]worker.Worker)
}
p.provisioners[appName] = aw
}
|
go
|
{
"resource": ""
}
|
q3444
|
RunOnAllMachines
|
train
|
func (c *Client) RunOnAllMachines(commands string, timeout time.Duration) ([]params.ActionResult, error) {
var results params.ActionResults
args := params.RunParams{Commands: commands, Timeout: timeout}
err := c.facade.FacadeCall("RunOnAllMachines", args, &results)
return results.Results, err
}
|
go
|
{
"resource": ""
}
|
q3445
|
Run
|
train
|
func (c *Client) Run(run params.RunParams) ([]params.ActionResult, error) {
var results params.ActionResults
err := c.facade.FacadeCall("Run", run, &results)
return results.Results, err
}
|
go
|
{
"resource": ""
}
|
q3446
|
CloudAPIVersion
|
train
|
func (g EnvironConfigGetter) CloudAPIVersion(spec environs.CloudSpec) (string, error) {
// Only CAAS models have an API version we care about right now.
if g.Model.Type() == state.ModelTypeIAAS {
return "", nil
}
cfg, err := g.ModelConfig()
if err != nil {
return "", errors.Trace(err)
}
ctrlCfg, err := g.ControllerConfig()
if err != nil {
return "", errors.Trace(err)
}
newBroker := g.NewContainerBroker
if newBroker == nil {
newBroker = caas.New
}
broker, err := newBroker(environs.OpenParams{
ControllerUUID: ctrlCfg.ControllerUUID(),
Cloud: spec,
Config: cfg,
})
if err != nil {
return "", errors.Trace(err)
}
return broker.APIVersion()
}
|
go
|
{
"resource": ""
}
|
q3447
|
GetNewEnvironFunc
|
train
|
func GetNewEnvironFunc(newEnviron environs.NewEnvironFunc) NewEnvironFunc {
return func(st *state.State) (environs.Environ, error) {
m, err := st.Model()
if err != nil {
return nil, errors.Trace(err)
}
g := EnvironConfigGetter{State: st, Model: m}
return environs.GetEnviron(g, newEnviron)
}
}
|
go
|
{
"resource": ""
}
|
q3448
|
GetNewCAASBrokerFunc
|
train
|
func GetNewCAASBrokerFunc(newBroker caas.NewContainerBrokerFunc) NewCAASBrokerFunc {
return func(st *state.State) (caas.Broker, error) {
m, err := st.Model()
if err != nil {
return nil, errors.Trace(err)
}
g := EnvironConfigGetter{State: st, Model: m}
cloudSpec, err := g.CloudSpec()
if err != nil {
return nil, errors.Trace(err)
}
cfg, err := g.ModelConfig()
if err != nil {
return nil, errors.Trace(err)
}
ctrlCfg, err := g.ControllerConfig()
if err != nil {
return nil, errors.Trace(err)
}
return newBroker(environs.OpenParams{
ControllerUUID: ctrlCfg.ControllerUUID(),
Cloud: cloudSpec,
Config: cfg,
})
}
}
|
go
|
{
"resource": ""
}
|
q3449
|
setModelAccess
|
train
|
func (st *State) setModelAccess(access permission.Access, userGlobalKey, modelUUID string) error {
if err := permission.ValidateModelAccess(access); err != nil {
return errors.Trace(err)
}
op := updatePermissionOp(modelKey(modelUUID), userGlobalKey, access)
err := st.db().RunTransactionFor(modelUUID, []txn.Op{op})
if err == txn.ErrAborted {
return errors.NotFoundf("existing permissions")
}
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3450
|
LastModelConnection
|
train
|
func (m *Model) LastModelConnection(user names.UserTag) (time.Time, error) {
lastConnections, closer := m.st.db().GetRawCollection(modelUserLastConnectionC)
defer closer()
username := user.Id()
var lastConn modelUserLastConnectionDoc
err := lastConnections.FindId(m.st.docID(username)).Select(bson.D{{"last-connection", 1}}).One(&lastConn)
if err != nil {
if err == mgo.ErrNotFound {
err = errors.Wrap(err, NeverConnectedError(username))
}
return time.Time{}, errors.Trace(err)
}
return lastConn.LastConnection.UTC(), nil
}
|
go
|
{
"resource": ""
}
|
q3451
|
IsNeverConnectedError
|
train
|
func IsNeverConnectedError(err error) bool {
_, ok := errors.Cause(err).(NeverConnectedError)
return ok
}
|
go
|
{
"resource": ""
}
|
q3452
|
UpdateLastModelConnection
|
train
|
func (m *Model) UpdateLastModelConnection(user names.UserTag) error {
return m.updateLastModelConnection(user, m.st.nowToTheSecond())
}
|
go
|
{
"resource": ""
}
|
q3453
|
modelUser
|
train
|
func (st *State) modelUser(modelUUID string, user names.UserTag) (userAccessDoc, error) {
modelUser := userAccessDoc{}
modelUsers, closer := st.db().GetCollectionFor(modelUUID, modelUsersC)
defer closer()
username := strings.ToLower(user.Id())
err := modelUsers.FindId(username).One(&modelUser)
if err == mgo.ErrNotFound {
return userAccessDoc{}, errors.NotFoundf("model user %q", username)
}
if err != nil {
return userAccessDoc{}, errors.Trace(err)
}
// DateCreated is inserted as UTC, but read out as local time. So we
// convert it back to UTC here.
modelUser.DateCreated = modelUser.DateCreated.UTC()
return modelUser, nil
}
|
go
|
{
"resource": ""
}
|
q3454
|
removeModelUser
|
train
|
func (st *State) removeModelUser(user names.UserTag) error {
ops := removeModelUserOps(st.ModelUUID(), user)
err := st.db().RunTransaction(ops)
if err == txn.ErrAborted {
err = errors.NewNotFound(nil, fmt.Sprintf("model user %q does not exist", user.Id()))
}
if err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3455
|
isUserSuperuser
|
train
|
func (st *State) isUserSuperuser(user names.UserTag) (bool, error) {
access, err := st.UserAccess(user, st.controllerTag)
if err != nil {
// TODO(jam): 2017-11-27 We weren't suppressing NotFound here so that we would know when someone asked for
// the list of models of a user that doesn't exist.
// However, now we will not even check if its a known user if they aren't asking for all=true.
return false, errors.Trace(err)
}
isControllerSuperuser := (access.Access == permission.SuperuserAccess)
return isControllerSuperuser, nil
}
|
go
|
{
"resource": ""
}
|
q3456
|
modelQueryForUser
|
train
|
func (st *State) modelQueryForUser(user names.UserTag, isSuperuser bool) (mongo.Query, SessionCloser, error) {
var modelQuery mongo.Query
models, closer := st.db().GetCollection(modelsC)
if isSuperuser {
// Fast path, we just return all the models that aren't Importing
modelQuery = models.Find(bson.M{"migration-mode": bson.M{"$ne": MigrationModeImporting}})
} else {
// Start by looking up model uuids that the user has access to, and then load only the records that are
// included in that set
var modelUUID struct {
UUID string `bson:"object-uuid"`
}
modelUsers, userCloser := st.db().GetRawCollection(modelUsersC)
defer userCloser()
query := modelUsers.Find(bson.D{{"user", user.Id()}})
query.Select(bson.M{"object-uuid": 1, "_id": 0})
query.Batch(100)
iter := query.Iter()
var modelUUIDs []string
for iter.Next(&modelUUID) {
modelUUIDs = append(modelUUIDs, modelUUID.UUID)
}
if err := iter.Close(); err != nil {
closer()
return nil, nil, errors.Trace(err)
}
modelQuery = models.Find(bson.M{
"_id": bson.M{"$in": modelUUIDs},
"migration-mode": bson.M{"$ne": MigrationModeImporting},
})
}
modelQuery.Sort("name", "owner")
return modelQuery, closer, nil
}
|
go
|
{
"resource": ""
}
|
q3457
|
ModelBasicInfoForUser
|
train
|
func (st *State) ModelBasicInfoForUser(user names.UserTag) ([]ModelAccessInfo, error) {
isSuperuser, err := st.isUserSuperuser(user)
if err != nil {
return nil, errors.Trace(err)
}
modelQuery, closer1, err := st.modelQueryForUser(user, isSuperuser)
if err != nil {
return nil, errors.Trace(err)
}
defer closer1()
modelQuery.Select(bson.M{"_id": 1, "name": 1, "owner": 1, "type": 1})
var accessInfo []ModelAccessInfo
if err := modelQuery.All(&accessInfo); err != nil {
return nil, errors.Trace(err)
}
// Now we need to find the last-connection time for each model for this user
username := user.Id()
connDocIds := make([]string, len(accessInfo))
for i, acc := range accessInfo {
connDocIds[i] = acc.UUID + ":" + username
}
lastConnections, closer2 := st.db().GetRawCollection(modelUserLastConnectionC)
defer closer2()
query := lastConnections.Find(bson.M{"_id": bson.M{"$in": connDocIds}})
query.Select(bson.M{"last-connection": 1, "_id": 0, "model-uuid": 1})
query.Batch(100)
iter := query.Iter()
lastConns := make(map[string]time.Time, len(connDocIds))
var connInfo modelUserLastConnectionDoc
for iter.Next(&connInfo) {
lastConns[connInfo.ModelUUID] = connInfo.LastConnection
}
if err := iter.Close(); err != nil {
return nil, errors.Trace(err)
}
for i := range accessInfo {
uuid := accessInfo[i].UUID
accessInfo[i].LastConnection = lastConns[uuid]
}
return accessInfo, nil
}
|
go
|
{
"resource": ""
}
|
q3458
|
IsControllerAdmin
|
train
|
func (st *State) IsControllerAdmin(user names.UserTag) (bool, error) {
model, err := st.Model()
if err != nil {
return false, errors.Trace(err)
}
ua, err := st.UserAccess(user, model.ControllerTag())
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, errors.Trace(err)
}
return ua.Access == permission.SuperuserAccess, nil
}
|
go
|
{
"resource": ""
}
|
q3459
|
SetAPIHostPorts
|
train
|
func (s APIHostPortsSetter) SetAPIHostPorts(servers [][]network.HostPort) error {
return s.ChangeConfig(func(c ConfigSetter) error {
c.SetAPIHostPorts(servers)
return nil
})
}
|
go
|
{
"resource": ""
}
|
q3460
|
Migrate
|
train
|
func (p *Paths) Migrate(newPaths Paths) {
if newPaths.DataDir != "" {
p.DataDir = newPaths.DataDir
}
if newPaths.LogDir != "" {
p.LogDir = newPaths.LogDir
}
if newPaths.MetricsSpoolDir != "" {
p.MetricsSpoolDir = newPaths.MetricsSpoolDir
}
if newPaths.ConfDir != "" {
p.ConfDir = newPaths.ConfDir
}
}
|
go
|
{
"resource": ""
}
|
q3461
|
NewPathsWithDefaults
|
train
|
func NewPathsWithDefaults(p Paths) Paths {
paths := DefaultPaths
if p.DataDir != "" {
paths.DataDir = p.DataDir
}
if p.LogDir != "" {
paths.LogDir = p.LogDir
}
if p.MetricsSpoolDir != "" {
paths.MetricsSpoolDir = p.MetricsSpoolDir
}
if p.ConfDir != "" {
paths.ConfDir = p.ConfDir
}
return paths
}
|
go
|
{
"resource": ""
}
|
q3462
|
LogFilename
|
train
|
func LogFilename(c Config) string {
return filepath.Join(c.LogDir(), c.Tag().String()+".log")
}
|
go
|
{
"resource": ""
}
|
q3463
|
MachineLockLogFilename
|
train
|
func MachineLockLogFilename(c Config) string {
return filepath.Join(c.LogDir(), machinelock.Filename)
}
|
go
|
{
"resource": ""
}
|
q3464
|
NewAgentConfig
|
train
|
func NewAgentConfig(configParams AgentConfigParams) (ConfigSetterWriter, error) {
if configParams.Paths.DataDir == "" {
return nil, errors.Trace(requiredError("data directory"))
}
if configParams.Tag == nil {
return nil, errors.Trace(requiredError("entity tag"))
}
switch configParams.Tag.(type) {
case names.MachineTag, names.UnitTag, names.ApplicationTag:
// These are the only three type of tags that can represent an agent
// IAAS - machine and unit
// CAAS - application
default:
return nil, errors.Errorf("entity tag must be MachineTag, UnitTag or ApplicationTag, got %T", configParams.Tag)
}
if configParams.UpgradedToVersion == version.Zero {
return nil, errors.Trace(requiredError("upgradedToVersion"))
}
if configParams.Password == "" {
return nil, errors.Trace(requiredError("password"))
}
if uuid := configParams.Controller.Id(); uuid == "" {
return nil, errors.Trace(requiredError("controller"))
} else if !names.IsValidController(uuid) {
return nil, errors.Errorf("%q is not a valid controller uuid", uuid)
}
if uuid := configParams.Model.Id(); uuid == "" {
return nil, errors.Trace(requiredError("model"))
} else if !names.IsValidModel(uuid) {
return nil, errors.Errorf("%q is not a valid model uuid", uuid)
}
if len(configParams.CACert) == 0 {
return nil, errors.Trace(requiredError("CA certificate"))
}
// Note that the password parts of the state and api information are
// blank. This is by design: we want to generate a secure password
// for new agents. So, we create this config without a current password
// which signals to apicaller worker that it should try to connect using old password.
// When/if this connection is successful, apicaller worker will generate
// a new secure password and update this agent's config.
config := &configInternal{
paths: NewPathsWithDefaults(configParams.Paths),
jobs: configParams.Jobs,
upgradedToVersion: configParams.UpgradedToVersion,
tag: configParams.Tag,
nonce: configParams.Nonce,
controller: configParams.Controller,
model: configParams.Model,
caCert: configParams.CACert,
oldPassword: configParams.Password,
values: configParams.Values,
mongoVersion: configParams.MongoVersion.String(),
mongoMemoryProfile: configParams.MongoMemoryProfile.String(),
}
if len(configParams.APIAddresses) > 0 {
config.apiDetails = &apiDetails{
addresses: configParams.APIAddresses,
}
}
if err := config.check(); err != nil {
return nil, err
}
if config.values == nil {
config.values = make(map[string]string)
}
config.configFilePath = ConfigPath(config.paths.DataDir, config.tag)
return config, nil
}
|
go
|
{
"resource": ""
}
|
q3465
|
NewStateMachineConfig
|
train
|
func NewStateMachineConfig(configParams AgentConfigParams, serverInfo params.StateServingInfo) (ConfigSetterWriter, error) {
if serverInfo.Cert == "" {
return nil, errors.Trace(requiredError("controller cert"))
}
if serverInfo.PrivateKey == "" {
return nil, errors.Trace(requiredError("controller key"))
}
if serverInfo.CAPrivateKey == "" {
return nil, errors.Trace(requiredError("ca cert key"))
}
if serverInfo.StatePort == 0 {
return nil, errors.Trace(requiredError("state port"))
}
if serverInfo.APIPort == 0 {
return nil, errors.Trace(requiredError("api port"))
}
config, err := NewAgentConfig(configParams)
if err != nil {
return nil, err
}
config.SetStateServingInfo(serverInfo)
return config, nil
}
|
go
|
{
"resource": ""
}
|
q3466
|
Dir
|
train
|
func Dir(dataDir string, tag names.Tag) string {
// Note: must use path, not filepath, as this
// function is used by the client on Windows.
return path.Join(BaseDir(dataDir), tag.String())
}
|
go
|
{
"resource": ""
}
|
q3467
|
ReadConfig
|
train
|
func ReadConfig(configFilePath string) (ConfigSetterWriter, error) {
var (
format formatter
config *configInternal
)
configData, err := ioutil.ReadFile(configFilePath)
if err != nil {
return nil, errors.Annotatef(err, "cannot read agent config %q", configFilePath)
}
format, config, err = parseConfigData(configData)
if err != nil {
return nil, err
}
logger.Debugf("read agent config, format %q", format.version())
config.configFilePath = configFilePath
return config, nil
}
|
go
|
{
"resource": ""
}
|
q3468
|
MongoVersion
|
train
|
func (c *configInternal) MongoVersion() mongo.Version {
v, err := mongo.NewVersion(c.mongoVersion)
if err != nil {
return mongo.Mongo24
}
return v
}
|
go
|
{
"resource": ""
}
|
q3469
|
MongoMemoryProfile
|
train
|
func (c *configInternal) MongoMemoryProfile() mongo.MemoryProfile {
mprof := mongo.MemoryProfile(c.mongoMemoryProfile)
if err := mprof.Validate(); err != nil {
return mongo.MemoryProfileLow
}
return mongo.MemoryProfile(c.mongoMemoryProfile)
}
|
go
|
{
"resource": ""
}
|
q3470
|
SetMongoVersion
|
train
|
func (c *configInternal) SetMongoVersion(v mongo.Version) {
c.mongoVersion = v.String()
}
|
go
|
{
"resource": ""
}
|
q3471
|
SetMongoMemoryProfile
|
train
|
func (c *configInternal) SetMongoMemoryProfile(v mongo.MemoryProfile) {
c.mongoMemoryProfile = v.String()
}
|
go
|
{
"resource": ""
}
|
q3472
|
WriteCommands
|
train
|
func (c *configInternal) WriteCommands(renderer shell.Renderer) ([]string, error) {
data, err := c.Render()
if err != nil {
return nil, errors.Trace(err)
}
commands := renderer.MkdirAll(c.Dir())
filename := c.File(AgentConfigFilename)
commands = append(commands, renderer.WriteFile(filename, data)...)
commands = append(commands, renderer.Chmod(filename, 0600)...)
return commands, nil
}
|
go
|
{
"resource": ""
}
|
q3473
|
APIInfo
|
train
|
func (c *configInternal) APIInfo() (*api.Info, bool) {
if c.apiDetails == nil || c.apiDetails.addresses == nil {
return nil, false
}
servingInfo, isController := c.StateServingInfo()
addrs := c.apiDetails.addresses
// For controller we return only localhost - we should not connect
// to other controllers if we can talk locally.
if isController {
port := servingInfo.APIPort
// If the controller has been configured with a controller api port,
// we return that instead of the normal api port.
if servingInfo.ControllerAPIPort != 0 {
port = servingInfo.ControllerAPIPort
}
// TODO(macgreagoir) IPv6. Ubuntu still always provides IPv4
// loopback, and when/if this changes localhost should resolve
// to IPv6 loopback in any case (lp:1644009). Review.
localAPIAddr := net.JoinHostPort("localhost", strconv.Itoa(port))
addrs = []string{localAPIAddr}
}
return &api.Info{
Addrs: addrs,
Password: c.apiDetails.password,
CACert: c.caCert,
Tag: c.tag,
Nonce: c.nonce,
ModelTag: c.model,
}, true
}
|
go
|
{
"resource": ""
}
|
q3474
|
MongoInfo
|
train
|
func (c *configInternal) MongoInfo() (info *mongo.MongoInfo, ok bool) {
ssi, ok := c.StateServingInfo()
if !ok {
return nil, false
}
// We return localhost first and then all addresses of known API
// endpoints - this lets us connect to other Mongo instances and start
// state even if our own Mongo has not started yet (see lp:1749383 #1).
// TODO(macgreagoir) IPv6. Ubuntu still always provides IPv4 loopback,
// and when/if this changes localhost should resolve to IPv6 loopback
// in any case (lp:1644009). Review.
local := net.JoinHostPort("localhost", strconv.Itoa(ssi.StatePort))
addrs := []string{local}
for _, addr := range c.apiDetails.addresses {
host, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, false
}
if host := net.JoinHostPort(host, strconv.Itoa(ssi.StatePort)); host != local {
addrs = append(addrs, host)
}
}
return &mongo.MongoInfo{
Info: mongo.Info{
Addrs: addrs,
CACert: c.caCert,
},
Password: c.statePassword,
Tag: c.tag,
}, true
}
|
go
|
{
"resource": ""
}
|
q3475
|
isCAASModelFacade
|
train
|
func isCAASModelFacade(facadeName string) bool {
return caasModelFacadeNames.Contains(facadeName) ||
commonModelFacadeNames.Contains(facadeName) ||
commonFacadeNames.Contains(facadeName)
}
|
go
|
{
"resource": ""
}
|
q3476
|
UserErr
|
train
|
func (e *TermsRequiredError) UserErr() error {
terms := strings.Join(e.Terms, " ")
return errors.Wrap(e,
errors.Errorf(`Declined: some terms require agreement. Try: "juju agree %s"`, terms))
}
|
go
|
{
"resource": ""
}
|
q3477
|
String
|
train
|
func (ic *InstanceConstraint) String() string {
return fmt.Sprintf(
"{region: %s, series: %s, arches: %s, constraints: %s, storage: %s}",
ic.Region,
ic.Series,
ic.Arches,
ic.Constraints,
ic.Storage,
)
}
|
go
|
{
"resource": ""
}
|
q3478
|
match
|
train
|
func (image Image) match(itype InstanceType) imageMatch {
if !image.matchArch(itype.Arches) {
return nonMatch
}
if itype.VirtType == nil || image.VirtType == *itype.VirtType {
return exactMatch
}
if image.VirtType == "" {
// Image doesn't specify virtualisation type. We allow it
// to match, but prefer exact matches.
return partialMatch
}
return nonMatch
}
|
go
|
{
"resource": ""
}
|
q3479
|
New
|
train
|
func New(
externalControllers ExternalControllerUpdaterClient,
newExternalControllerWatcherClient NewExternalControllerWatcherClientFunc,
clock clock.Clock,
) (worker.Worker, error) {
w := updaterWorker{
watchExternalControllers: externalControllers.WatchExternalControllers,
externalControllerInfo: externalControllers.ExternalControllerInfo,
setExternalControllerInfo: externalControllers.SetExternalControllerInfo,
newExternalControllerWatcherClient: newExternalControllerWatcherClient,
runner: worker.NewRunner(worker.RunnerParams{
// One of the controller watchers fails should not
// prevent the others from running.
IsFatal: func(error) bool { return false },
// If the API connection fails, try again in 1 minute.
RestartDelay: time.Minute,
Clock: clock,
}),
}
if err := catacomb.Invoke(catacomb.Plan{
Site: &w.catacomb,
Work: w.loop,
Init: []worker.Worker{w.runner},
}); err != nil {
return nil, errors.Trace(err)
}
return &w, nil
}
|
go
|
{
"resource": ""
}
|
q3480
|
NewMockAvailabilityZone
|
train
|
func NewMockAvailabilityZone(ctrl *gomock.Controller) *MockAvailabilityZone {
mock := &MockAvailabilityZone{ctrl: ctrl}
mock.recorder = &MockAvailabilityZoneMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q3481
|
Available
|
train
|
func (m *MockAvailabilityZone) Available() bool {
ret := m.ctrl.Call(m, "Available")
ret0, _ := ret[0].(bool)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3482
|
Available
|
train
|
func (mr *MockAvailabilityZoneMockRecorder) Available() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Available", reflect.TypeOf((*MockAvailabilityZone)(nil).Available))
}
|
go
|
{
"resource": ""
}
|
q3483
|
NewMockMetricsCollector
|
train
|
func NewMockMetricsCollector(ctrl *gomock.Controller) *MockMetricsCollector {
mock := &MockMetricsCollector{ctrl: ctrl}
mock.recorder = &MockMetricsCollectorMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q3484
|
Connections
|
train
|
func (m *MockMetricsCollector) Connections() prometheus.Gauge {
ret := m.ctrl.Call(m, "Connections")
ret0, _ := ret[0].(prometheus.Gauge)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3485
|
LogReadCount
|
train
|
func (m *MockMetricsCollector) LogReadCount(arg0, arg1 string) prometheus.Counter {
ret := m.ctrl.Call(m, "LogReadCount", arg0, arg1)
ret0, _ := ret[0].(prometheus.Counter)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3486
|
LogReadCount
|
train
|
func (mr *MockMetricsCollectorMockRecorder) LogReadCount(arg0, arg1 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogReadCount", reflect.TypeOf((*MockMetricsCollector)(nil).LogReadCount), arg0, arg1)
}
|
go
|
{
"resource": ""
}
|
q3487
|
PingFailureCount
|
train
|
func (m *MockMetricsCollector) PingFailureCount(arg0 string) prometheus.Counter {
ret := m.ctrl.Call(m, "PingFailureCount", arg0)
ret0, _ := ret[0].(prometheus.Counter)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3488
|
TotalConnections
|
train
|
func (m *MockMetricsCollector) TotalConnections() prometheus.Counter {
ret := m.ctrl.Call(m, "TotalConnections")
ret0, _ := ret[0].(prometheus.Counter)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3489
|
NewFacade
|
train
|
func NewFacade(apiCaller base.APICaller) (Facade, error) {
return applicationscaler.NewAPI(
apiCaller,
watcher.NewStringsWatcher,
), nil
}
|
go
|
{
"resource": ""
}
|
q3490
|
Close
|
train
|
func (l *streamLayer) Close() error {
l.tomb.Kill(nil)
return l.tomb.Wait()
}
|
go
|
{
"resource": ""
}
|
q3491
|
Addr
|
train
|
func (l *streamLayer) Addr() net.Addr {
select {
case <-l.tomb.Dying():
return invalidAddr
case <-l.clock.After(AddrTimeout):
logger.Errorf("streamLayer.Addr timed out waiting for API address")
// Stop this (and parent) worker.
l.tomb.Kill(ErrAddressTimeout)
return invalidAddr
case addr := <-l.addr:
return addr
}
}
|
go
|
{
"resource": ""
}
|
q3492
|
registerForServer
|
train
|
func (r resources) registerForServer() error {
r.registerState()
r.registerAgentWorkers()
r.registerHookContext()
return nil
}
|
go
|
{
"resource": ""
}
|
q3493
|
registerAgentWorkers
|
train
|
func (r resources) registerAgentWorkers() {
if !markRegistered(resource.ComponentName, "agent-workers") {
return
}
charmrevisionupdater.RegisterLatestCharmHandler("resources", resourceadapters.NewLatestCharmHandler)
}
|
go
|
{
"resource": ""
}
|
q3494
|
NewMockZonedEnviron
|
train
|
func NewMockZonedEnviron(ctrl *gomock.Controller) *MockZonedEnviron {
mock := &MockZonedEnviron{ctrl: ctrl}
mock.recorder = &MockZonedEnvironMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q3495
|
AdoptResources
|
train
|
func (m *MockZonedEnviron) AdoptResources(arg0 context.ProviderCallContext, arg1 string, arg2 version.Number) error {
ret := m.ctrl.Call(m, "AdoptResources", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3496
|
AvailabilityZones
|
train
|
func (m *MockZonedEnviron) AvailabilityZones(arg0 context.ProviderCallContext) ([]common.AvailabilityZone, error) {
ret := m.ctrl.Call(m, "AvailabilityZones", arg0)
ret0, _ := ret[0].([]common.AvailabilityZone)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3497
|
ConstraintsValidator
|
train
|
func (m *MockZonedEnviron) ConstraintsValidator(arg0 context.ProviderCallContext) (constraints.Validator, error) {
ret := m.ctrl.Call(m, "ConstraintsValidator", arg0)
ret0, _ := ret[0].(constraints.Validator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3498
|
DeriveAvailabilityZones
|
train
|
func (m *MockZonedEnviron) DeriveAvailabilityZones(arg0 context.ProviderCallContext, arg1 environs.StartInstanceParams) ([]string, error) {
ret := m.ctrl.Call(m, "DeriveAvailabilityZones", arg0, arg1)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3499
|
Destroy
|
train
|
func (m *MockZonedEnviron) Destroy(arg0 context.ProviderCallContext) error {
ret := m.ctrl.Call(m, "Destroy", arg0)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.