_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q4800
|
SetMongoPassword
|
train
|
func (m *Machine) SetMongoPassword(password string) error {
if !m.IsManager() {
return errors.NotSupportedf("setting mongo password for non-controller machine %v", m)
}
return mongo.SetAdminMongoPassword(m.st.session, m.Tag().String(), password)
}
|
go
|
{
"resource": ""
}
|
q4801
|
PasswordValid
|
train
|
func (m *Machine) PasswordValid(password string) bool {
agentHash := utils.AgentPasswordHash(password)
return agentHash == m.doc.PasswordHash
}
|
go
|
{
"resource": ""
}
|
q4802
|
ForceDestroy
|
train
|
func (m *Machine) ForceDestroy(maxWait time.Duration) error {
ops, err := m.forceDestroyOps(maxWait)
if err != nil {
return errors.Trace(err)
}
if err := m.st.db().RunTransaction(ops); err != txn.ErrAborted {
return errors.Annotatef(err, "failed to run transaction: %s", pretty.Sprint(ops))
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4803
|
ParentId
|
train
|
func (m *Machine) ParentId() (string, bool) {
parentId := ParentId(m.Id())
return parentId, parentId != ""
}
|
go
|
{
"resource": ""
}
|
q4804
|
IsHasContainersError
|
train
|
func IsHasContainersError(err error) bool {
_, ok := errors.Cause(err).(*HasContainersError)
return ok
}
|
go
|
{
"resource": ""
}
|
q4805
|
IsHasAttachmentsError
|
train
|
func IsHasAttachmentsError(err error) bool {
_, ok := errors.Cause(err).(*HasAttachmentsError)
return ok
}
|
go
|
{
"resource": ""
}
|
q4806
|
Remove
|
train
|
func (m *Machine) Remove() (err error) {
defer errors.DeferredAnnotatef(&err, "cannot remove machine %s", m.doc.Id)
logger.Tracef("removing machine %q", m.Id())
// Local variable so we can re-get the machine without disrupting
// the caller.
machine := m
buildTxn := func(attempt int) ([]txn.Op, error) {
if attempt != 0 {
machine, err = machine.st.Machine(machine.Id())
if errors.IsNotFound(err) {
// The machine's gone away, that's fine.
return nil, jujutxn.ErrNoOperations
}
if err != nil {
return nil, errors.Trace(err)
}
}
ops, err := machine.removeOps()
if err != nil {
return nil, errors.Trace(err)
}
return ops, nil
}
return m.st.db().Run(buildTxn)
}
|
go
|
{
"resource": ""
}
|
q4807
|
Refresh
|
train
|
func (m *Machine) Refresh() error {
mdoc, err := m.st.getMachineDoc(m.Id())
if err != nil {
if errors.IsNotFound(err) {
return err
}
return errors.Annotatef(err, "cannot refresh machine %v", m)
}
m.doc = *mdoc
return nil
}
|
go
|
{
"resource": ""
}
|
q4808
|
SetAgentPresence
|
train
|
func (m *Machine) SetAgentPresence() (*presence.Pinger, error) {
presenceCollection := m.st.getPresenceCollection()
recorder := m.st.getPingBatcher()
p := presence.NewPinger(presenceCollection, m.st.modelTag, m.globalKey(),
func() presence.PingRecorder { return m.st.getPingBatcher() })
err := p.Start()
if err != nil {
return nil, err
}
// Make sure this Agent status is written to the database before returning.
recorder.Sync()
// We preform a manual sync here so that the
// presence pinger has the most up-to-date information when it
// starts. This ensures that commands run immediately after bootstrap
// like status or enable-ha will have an accurate values
// for agent-state.
//
// TODO: Does not work for multiple controllers. Trigger a sync across all controllers.
if m.IsManager() {
m.st.workers.presenceWatcher().Sync()
}
return p, nil
}
|
go
|
{
"resource": ""
}
|
q4809
|
InstanceId
|
train
|
func (m *Machine) InstanceId() (instance.Id, error) {
instId, _, err := m.InstanceNames()
return instId, err
}
|
go
|
{
"resource": ""
}
|
q4810
|
InstanceNames
|
train
|
func (m *Machine) InstanceNames() (instance.Id, string, error) {
instData, err := getInstanceData(m.st, m.Id())
if errors.IsNotFound(err) {
err = errors.NotProvisionedf("machine %v", m.Id())
}
if err != nil {
return "", "", err
}
return instData.InstanceId, instData.DisplayName, nil
}
|
go
|
{
"resource": ""
}
|
q4811
|
InstanceStatus
|
train
|
func (m *Machine) InstanceStatus() (status.StatusInfo, error) {
machineStatus, err := getStatus(m.st.db(), m.globalInstanceKey(), "instance")
if err != nil {
logger.Warningf("error when retrieving instance status for machine: %s, %v", m.Id(), err)
return status.StatusInfo{}, err
}
return machineStatus, nil
}
|
go
|
{
"resource": ""
}
|
q4812
|
SetInstanceStatus
|
train
|
func (m *Machine) SetInstanceStatus(sInfo status.StatusInfo) (err error) {
return setStatus(m.st.db(), setStatusParams{
badge: "instance",
globalKey: m.globalInstanceKey(),
status: sInfo.Status,
message: sInfo.Message,
rawData: sInfo.Data,
updated: timeOrNow(sInfo.Since, m.st.clock()),
})
}
|
go
|
{
"resource": ""
}
|
q4813
|
ModificationStatus
|
train
|
func (m *Machine) ModificationStatus() (status.StatusInfo, error) {
machineStatus, err := getStatus(m.st.db(), m.globalModificationKey(), "modification")
if err != nil {
logger.Warningf("error when retrieving instance status for machine: %s, %v", m.Id(), err)
return status.StatusInfo{}, err
}
return machineStatus, nil
}
|
go
|
{
"resource": ""
}
|
q4814
|
AvailabilityZone
|
train
|
func (m *Machine) AvailabilityZone() (string, error) {
instData, err := getInstanceData(m.st, m.Id())
if errors.IsNotFound(err) {
return "", errors.Trace(errors.NotProvisionedf("machine %v", m.Id()))
}
if err != nil {
return "", errors.Trace(err)
}
var zone string
if instData.AvailZone != nil {
zone = *instData.AvailZone
}
return zone, nil
}
|
go
|
{
"resource": ""
}
|
q4815
|
ApplicationNames
|
train
|
func (m *Machine) ApplicationNames() ([]string, error) {
units, err := m.Units()
if err != nil {
return nil, errors.Trace(err)
}
apps := set.NewStrings()
for _, unit := range units {
apps.Add(unit.ApplicationName())
}
return apps.SortedValues(), nil
}
|
go
|
{
"resource": ""
}
|
q4816
|
Units
|
train
|
func (m *Machine) Units() (units []*Unit, err error) {
defer errors.DeferredAnnotatef(&err, "cannot get units assigned to machine %v", m)
unitsCollection, closer := m.st.db().GetCollection(unitsC)
defer closer()
pudocs := []unitDoc{}
err = unitsCollection.Find(bson.D{{"machineid", m.doc.Id}}).All(&pudocs)
if err != nil {
return nil, err
}
model, err := m.st.Model()
if err != nil {
return nil, errors.Trace(err)
}
for _, pudoc := range pudocs {
units = append(units, newUnit(m.st, model.Type(), &pudoc))
docs := []unitDoc{}
err = unitsCollection.Find(bson.D{{"principal", pudoc.Name}}).All(&docs)
if err != nil {
return nil, err
}
for _, doc := range docs {
units = append(units, newUnit(m.st, model.Type(), &doc))
}
}
return units, nil
}
|
go
|
{
"resource": ""
}
|
q4817
|
DesiredSpaces
|
train
|
func (m *Machine) DesiredSpaces() (set.Strings, error) {
spaces := set.NewStrings()
units, err := m.Units()
if err != nil {
return nil, errors.Trace(err)
}
constraints, err := m.Constraints()
if err != nil {
return nil, errors.Trace(err)
}
// We ignore negative spaces as it doesn't change what spaces we do want.
positiveSpaces, _ := convertSpacesFromConstraints(constraints.Spaces)
for _, space := range positiveSpaces {
spaces.Add(space)
}
bindings := set.NewStrings()
for _, unit := range units {
app, err := unit.Application()
if err != nil {
return nil, errors.Trace(err)
}
endpointBindings, err := app.EndpointBindings()
for _, space := range endpointBindings {
if space != "" {
bindings.Add(space)
}
}
}
logger.Tracef("machine %q found constraints %s and bindings %s",
m.Id(), network.QuoteSpaceSet(spaces), network.QuoteSpaceSet(bindings))
return spaces.Union(bindings), nil
}
|
go
|
{
"resource": ""
}
|
q4818
|
SetInstanceInfo
|
train
|
func (m *Machine) SetInstanceInfo(
id instance.Id, displayName string, nonce string, characteristics *instance.HardwareCharacteristics,
devicesArgs []LinkLayerDeviceArgs, devicesAddrs []LinkLayerDeviceAddress,
volumes map[names.VolumeTag]VolumeInfo,
volumeAttachments map[names.VolumeTag]VolumeAttachmentInfo,
charmProfiles []string,
) error {
logger.Tracef(
"setting instance info: machine %v, deviceAddrs: %#v, devicesArgs: %#v",
m.Id(), devicesAddrs, devicesArgs)
if err := m.SetParentLinkLayerDevicesBeforeTheirChildren(devicesArgs); err != nil {
return errors.Trace(err)
}
if err := m.SetDevicesAddressesIdempotently(devicesAddrs); err != nil {
return errors.Trace(err)
}
sb, err := NewStorageBackend(m.st)
if err != nil {
return errors.Trace(err)
}
// Record volumes and volume attachments, and set the initial
// status: attached or attaching.
if err := setProvisionedVolumeInfo(sb, volumes); err != nil {
return errors.Trace(err)
}
if err := setMachineVolumeAttachmentInfo(sb, m.Id(), volumeAttachments); err != nil {
return errors.Trace(err)
}
volumeStatus := make(map[names.VolumeTag]status.Status)
for tag := range volumes {
volumeStatus[tag] = status.Attaching
}
for tag := range volumeAttachments {
volumeStatus[tag] = status.Attached
}
for tag, volStatus := range volumeStatus {
vol, err := sb.Volume(tag)
if err != nil {
return errors.Trace(err)
}
if err := vol.SetStatus(status.StatusInfo{
Status: volStatus,
}); err != nil {
return errors.Annotatef(
err, "setting status of %s", names.ReadableString(tag),
)
}
}
if err := m.SetProvisioned(id, displayName, nonce, characteristics); err != nil {
return errors.Trace(err)
}
return m.SetCharmProfiles(charmProfiles)
}
|
go
|
{
"resource": ""
}
|
q4819
|
Addresses
|
train
|
func (m *Machine) Addresses() (addresses []network.Address) {
return network.MergedAddresses(networkAddresses(m.doc.MachineAddresses), networkAddresses(m.doc.Addresses))
}
|
go
|
{
"resource": ""
}
|
q4820
|
maybeGetNewAddress
|
train
|
func maybeGetNewAddress(
addr address,
providerAddresses,
machineAddresses []address,
getAddr func([]address) network.Address,
checkScope func(address) bool,
) (address, bool) {
// For picking the best address, try provider addresses first.
var newAddr address
netAddr := getAddr(providerAddresses)
if netAddr.Value == "" {
netAddr = getAddr(machineAddresses)
newAddr = fromNetworkAddress(netAddr, OriginMachine)
} else {
newAddr = fromNetworkAddress(netAddr, OriginProvider)
}
// The order of these checks is important. If the stored address is
// empty we *always* want to check for a new address so we do that
// first. If the stored address is unavailable we also *must* check for
// a new address so we do that next. If the original is a machine
// address and a provider address is available we want to switch to
// that. Finally we check to see if a better match on scope from the
// same origin is available.
if addr.Value == "" {
return newAddr, newAddr.Value != ""
}
if !containsAddress(providerAddresses, addr) && !containsAddress(machineAddresses, addr) {
return newAddr, true
}
if Origin(addr.Origin) != OriginProvider && Origin(newAddr.Origin) == OriginProvider {
return newAddr, true
}
if !checkScope(addr) {
// If addr.Origin is machine and newAddr.Origin is provider we will
// have already caught that, and for the inverse we don't want to
// replace the address.
if addr.Origin == newAddr.Origin {
return newAddr, checkScope(newAddr)
}
}
return addr, false
}
|
go
|
{
"resource": ""
}
|
q4821
|
SetProviderAddresses
|
train
|
func (m *Machine) SetProviderAddresses(addresses ...network.Address) error {
err := m.setAddresses(nil, &addresses)
return errors.Annotatef(err, "cannot set addresses of machine %v", m)
}
|
go
|
{
"resource": ""
}
|
q4822
|
ProviderAddresses
|
train
|
func (m *Machine) ProviderAddresses() (addresses []network.Address) {
for _, address := range m.doc.Addresses {
addresses = append(addresses, address.networkAddress())
}
return
}
|
go
|
{
"resource": ""
}
|
q4823
|
MachineAddresses
|
train
|
func (m *Machine) MachineAddresses() (addresses []network.Address) {
for _, address := range m.doc.MachineAddresses {
addresses = append(addresses, address.networkAddress())
}
return
}
|
go
|
{
"resource": ""
}
|
q4824
|
CheckProvisioned
|
train
|
func (m *Machine) CheckProvisioned(nonce string) bool {
return nonce == m.doc.Nonce && nonce != ""
}
|
go
|
{
"resource": ""
}
|
q4825
|
Constraints
|
train
|
func (m *Machine) Constraints() (constraints.Value, error) {
return readConstraints(m.st, m.globalKey())
}
|
go
|
{
"resource": ""
}
|
q4826
|
SetConstraints
|
train
|
func (m *Machine) SetConstraints(cons constraints.Value) (err error) {
op := m.UpdateOperation()
op.Constraints = &cons
return m.st.ApplyOperation(op)
}
|
go
|
{
"resource": ""
}
|
q4827
|
Status
|
train
|
func (m *Machine) Status() (status.StatusInfo, error) {
mStatus, err := getStatus(m.st.db(), m.globalKey(), "machine")
if err != nil {
return mStatus, err
}
return mStatus, nil
}
|
go
|
{
"resource": ""
}
|
q4828
|
SetStatus
|
train
|
func (m *Machine) SetStatus(statusInfo status.StatusInfo) error {
switch statusInfo.Status {
case status.Started, status.Stopped:
case status.Error:
if statusInfo.Message == "" {
return errors.Errorf("cannot set status %q without info", statusInfo.Status)
}
case status.Pending:
// If a machine is not yet provisioned, we allow its status
// to be set back to pending (when a retry is to occur).
_, err := m.InstanceId()
allowPending := errors.IsNotProvisioned(err)
if allowPending {
break
}
fallthrough
case status.Down:
return errors.Errorf("cannot set status %q", statusInfo.Status)
default:
return errors.Errorf("cannot set invalid status %q", statusInfo.Status)
}
return setStatus(m.st.db(), setStatusParams{
badge: "machine",
globalKey: m.globalKey(),
status: statusInfo.Status,
message: statusInfo.Message,
rawData: statusInfo.Data,
updated: timeOrNow(statusInfo.Since, m.st.clock()),
})
}
|
go
|
{
"resource": ""
}
|
q4829
|
SupportedContainers
|
train
|
func (m *Machine) SupportedContainers() ([]instance.ContainerType, bool) {
return m.doc.SupportedContainers, m.doc.SupportedContainersKnown
}
|
go
|
{
"resource": ""
}
|
q4830
|
SupportsNoContainers
|
train
|
func (m *Machine) SupportsNoContainers() (err error) {
if err = m.updateSupportedContainers([]instance.ContainerType{}); err != nil {
return err
}
return m.markInvalidContainers()
}
|
go
|
{
"resource": ""
}
|
q4831
|
SetSupportedContainers
|
train
|
func (m *Machine) SetSupportedContainers(containers []instance.ContainerType) (err error) {
if len(containers) == 0 {
return fmt.Errorf("at least one valid container type is required")
}
for _, container := range containers {
if container == instance.NONE {
return fmt.Errorf("%q is not a valid container type", container)
}
}
if err = m.updateSupportedContainers(containers); err != nil {
return err
}
return m.markInvalidContainers()
}
|
go
|
{
"resource": ""
}
|
q4832
|
updateSupportedContainers
|
train
|
func (m *Machine) updateSupportedContainers(supportedContainers []instance.ContainerType) (err error) {
if m.doc.SupportedContainersKnown {
if len(m.doc.SupportedContainers) == len(supportedContainers) {
equal := true
types := make(map[instance.ContainerType]struct{}, len(m.doc.SupportedContainers))
for _, v := range m.doc.SupportedContainers {
types[v] = struct{}{}
}
for _, v := range supportedContainers {
if _, ok := types[v]; !ok {
equal = false
break
}
}
if equal {
return nil
}
}
}
ops := []txn.Op{
{
C: machinesC,
Id: m.doc.DocID,
Assert: notDeadDoc,
Update: bson.D{
{"$set", bson.D{
{"supportedcontainers", supportedContainers},
{"supportedcontainersknown", true},
}}},
},
}
if err = m.st.db().RunTransaction(ops); err != nil {
err = onAbort(err, ErrDead)
logger.Errorf("cannot update supported containers of machine %v: %v", m, err)
return err
}
m.doc.SupportedContainers = supportedContainers
m.doc.SupportedContainersKnown = true
return nil
}
|
go
|
{
"resource": ""
}
|
q4833
|
markInvalidContainers
|
train
|
func (m *Machine) markInvalidContainers() error {
currentContainers, err := m.Containers()
if err != nil {
return err
}
for _, containerId := range currentContainers {
if !isSupportedContainer(ContainerTypeFromId(containerId), m.doc.SupportedContainers) {
container, err := m.st.Machine(containerId)
if err != nil {
logger.Errorf("loading container %v to mark as invalid: %v", containerId, err)
continue
}
// There should never be a circumstance where an unsupported container is started.
// Nonetheless, we check and log an error if such a situation arises.
statusInfo, err := container.Status()
if err != nil {
logger.Errorf("finding status of container %v to mark as invalid: %v", containerId, err)
continue
}
if statusInfo.Status == status.Pending {
containerType := ContainerTypeFromId(containerId)
now := m.st.clock().Now()
s := status.StatusInfo{
Status: status.Error,
Message: "unsupported container",
Data: map[string]interface{}{"type": containerType},
Since: &now,
}
container.SetStatus(s)
} else {
logger.Errorf("unsupported container %v has unexpected status %v", containerId, statusInfo.Status)
}
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4834
|
SetMachineBlockDevices
|
train
|
func (m *Machine) SetMachineBlockDevices(info ...BlockDeviceInfo) error {
return setMachineBlockDevices(m.st, m.Id(), info)
}
|
go
|
{
"resource": ""
}
|
q4835
|
VolumeAttachments
|
train
|
func (m *Machine) VolumeAttachments() ([]VolumeAttachment, error) {
sb, err := NewStorageBackend(m.st)
if err != nil {
return nil, errors.Trace(err)
}
return sb.MachineVolumeAttachments(m.MachineTag())
}
|
go
|
{
"resource": ""
}
|
q4836
|
AddAction
|
train
|
func (m *Machine) AddAction(name string, payload map[string]interface{}) (Action, error) {
spec, ok := actions.PredefinedActionsSpec[name]
if !ok {
return nil, errors.Errorf("cannot add action %q to a machine; only predefined actions allowed", name)
}
// Reject bad payloads before attempting to insert defaults.
err := spec.ValidateParams(payload)
if err != nil {
return nil, err
}
payloadWithDefaults, err := spec.InsertDefaults(payload)
if err != nil {
return nil, err
}
model, err := m.st.Model()
if err != nil {
return nil, errors.Trace(err)
}
return model.EnqueueAction(m.Tag(), name, payloadWithDefaults)
}
|
go
|
{
"resource": ""
}
|
q4837
|
CancelAction
|
train
|
func (m *Machine) CancelAction(action Action) (Action, error) {
return action.Finish(ActionResults{Status: ActionCancelled})
}
|
go
|
{
"resource": ""
}
|
q4838
|
UpdateMachineSeries
|
train
|
func (m *Machine) UpdateMachineSeries(series string, force bool) error {
buildTxn := func(attempt int) ([]txn.Op, error) {
if attempt > 0 {
if err := m.Refresh(); err != nil {
return nil, errors.Trace(err)
}
}
// Exit early if the Machine series doesn't need to change.
if m.Series() == series {
return nil, jujutxn.ErrNoOperations
}
principals := m.Principals() // unit names
verifiedUnits, err := m.VerifyUnitsSeries(principals, series, force)
if err != nil {
return nil, err
}
ops := []txn.Op{{
C: machinesC,
Id: m.doc.DocID,
Assert: bson.D{{"life", Alive}, {"principals", principals}},
Update: bson.D{{"$set", bson.D{{"series", series}}}},
}}
for _, unit := range verifiedUnits {
curl, _ := unit.CharmURL()
ops = append(ops, txn.Op{
C: unitsC,
Id: unit.doc.DocID,
Assert: bson.D{{"life", Alive},
{"charmurl", curl},
{"subordinates", unit.SubordinateNames()}},
Update: bson.D{{"$set", bson.D{{"series", series}}}},
})
}
return ops, nil
}
err := m.st.db().Run(buildTxn)
return errors.Annotatef(err, "updating series for machine %q", m)
}
|
go
|
{
"resource": ""
}
|
q4839
|
VerifyUnitsSeries
|
train
|
func (m *Machine) VerifyUnitsSeries(unitNames []string, series string, force bool) ([]*Unit, error) {
var results []*Unit
for _, u := range unitNames {
unit, err := m.st.Unit(u)
if err != nil {
return nil, err
}
app, err := unit.Application()
if err != nil {
return nil, err
}
err = app.VerifySupportedSeries(series, force)
if err != nil {
return nil, err
}
subordinates := unit.SubordinateNames()
subUnits, err := m.VerifyUnitsSeries(subordinates, series, force)
if err != nil {
return nil, err
}
results = append(results, unit)
results = append(results, subUnits...)
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4840
|
UpdateOperation
|
train
|
func (m *Machine) UpdateOperation() *UpdateMachineOperation {
return &UpdateMachineOperation{m: &Machine{st: m.st, doc: m.doc}}
}
|
go
|
{
"resource": ""
}
|
q4841
|
NewLifeGetter
|
train
|
func NewLifeGetter(st state.EntityFinder, getCanRead GetAuthFunc) *LifeGetter {
return &LifeGetter{
st: st,
getCanRead: getCanRead,
}
}
|
go
|
{
"resource": ""
}
|
q4842
|
Life
|
train
|
func (lg *LifeGetter) Life(args params.Entities) (params.LifeResults, error) {
result := params.LifeResults{
Results: make([]params.LifeResult, len(args.Entities)),
}
if len(args.Entities) == 0 {
return result, nil
}
canRead, err := lg.getCanRead()
if err != nil {
return params.LifeResults{}, errors.Trace(err)
}
for i, entity := range args.Entities {
tag, err := names.ParseTag(entity.Tag)
if err != nil {
result.Results[i].Error = ServerError(ErrPerm)
continue
}
err = ErrPerm
if canRead(tag) {
result.Results[i].Life, err = lg.oneLife(tag)
}
result.Results[i].Error = ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4843
|
entry
|
train
|
func (doc leaseDoc) entry() (string, entry, error) {
if err := doc.validate(); err != nil {
return "", entry{}, errors.Trace(err)
}
entry := entry{
holder: doc.Holder,
start: toTime(doc.Start),
duration: doc.Duration,
writer: doc.Writer,
}
return doc.Name, entry, nil
}
|
go
|
{
"resource": ""
}
|
q4844
|
newLeaseDoc
|
train
|
func newLeaseDoc(namespace, name string, entry entry) (*leaseDoc, error) {
doc := &leaseDoc{
Id: leaseDocId(namespace, name),
Namespace: namespace,
Name: name,
Holder: entry.holder,
Start: toInt64(entry.start),
Duration: entry.duration,
Writer: entry.writer,
}
if err := doc.validate(); err != nil {
return nil, errors.Trace(err)
}
return doc, nil
}
|
go
|
{
"resource": ""
}
|
q4845
|
Validate
|
train
|
func (res Resource) Validate() error {
// TODO(ericsnow) Ensure that the "placeholder" fields are not set
// if IsLocalPlaceholder() returns true (and that they *are* set
// otherwise)? Also ensure an "upload" origin in the "placeholder"
// case?
if err := res.Resource.Validate(); err != nil {
return errors.Annotate(err, "bad info")
}
if res.ApplicationID == "" {
return errors.NewNotValid(nil, "missing application ID")
}
// TODO(ericsnow) Require that Username be set if timestamp is?
if res.Timestamp.IsZero() && res.Username != "" {
return errors.NewNotValid(nil, "missing timestamp")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4846
|
TimestampGranular
|
train
|
func (res Resource) TimestampGranular() time.Time {
return time.Unix(res.Timestamp.Unix(), 0)
}
|
go
|
{
"resource": ""
}
|
q4847
|
RevisionString
|
train
|
func (res Resource) RevisionString() string {
switch res.Origin {
case resource.OriginUpload:
if res.IsPlaceholder() {
return "-"
}
return res.TimestampGranular().UTC().String()
case resource.OriginStore:
return fmt.Sprintf("%d", res.Revision)
default:
// note: this should probably never happen.
return "-"
}
}
|
go
|
{
"resource": ""
}
|
q4848
|
AsMap
|
train
|
func AsMap(resources []Resource) map[string]Resource {
results := make(map[string]Resource, len(resources))
for _, res := range resources {
results[res.Name] = res
}
return results
}
|
go
|
{
"resource": ""
}
|
q4849
|
newSender
|
train
|
func newSender(client metricsmanager.MetricsManagerClient, notify chan string) worker.Worker {
f := func(stopCh <-chan struct{}) error {
err := client.SendMetrics()
if err != nil {
senderLogger.Warningf("failed to send metrics %v - will retry later", err)
return nil
}
select {
case notify <- "senderCalled":
default:
}
return nil
}
return jworker.NewPeriodicWorker(f, senderPeriod, jworker.NewTimer)
}
|
go
|
{
"resource": ""
}
|
q4850
|
TabWriter
|
train
|
func TabWriter(writer io.Writer) *ansiterm.TabWriter {
const (
// To format things into columns.
minwidth = 0
tabwidth = 1
padding = 2
padchar = ' '
flags = 0
)
return ansiterm.NewTabWriter(writer, minwidth, tabwidth, padding, padchar, flags)
}
|
go
|
{
"resource": ""
}
|
q4851
|
Print
|
train
|
func (w *Wrapper) Print(values ...interface{}) {
for _, v := range values {
fmt.Fprintf(w, "%v\t", v)
}
}
|
go
|
{
"resource": ""
}
|
q4852
|
Printf
|
train
|
func (w *Wrapper) Printf(format string, values ...interface{}) {
fmt.Fprintf(w, format+"\t", values...)
}
|
go
|
{
"resource": ""
}
|
q4853
|
Println
|
train
|
func (w *Wrapper) Println(values ...interface{}) {
for i, v := range values {
if i != len(values)-1 {
fmt.Fprintf(w, "%v\t", v)
} else {
fmt.Fprintf(w, "%v", v)
}
}
fmt.Fprintln(w)
}
|
go
|
{
"resource": ""
}
|
q4854
|
PrintColor
|
train
|
func (w *Wrapper) PrintColor(ctx *ansiterm.Context, value interface{}) {
if ctx != nil {
ctx.Fprintf(w.TabWriter, "%v\t", value)
} else {
fmt.Fprintf(w, "%v\t", value)
}
}
|
go
|
{
"resource": ""
}
|
q4855
|
PrintStatus
|
train
|
func (w *Wrapper) PrintStatus(status status.Status) {
w.PrintColor(statusColors[status], status)
}
|
go
|
{
"resource": ""
}
|
q4856
|
Run
|
train
|
func (c *debugHooksCommand) Run(ctx *cmd.Context) error {
err := c.initRun()
if err != nil {
return err
}
defer c.cleanupRun()
err = c.validateHooksOrActions()
if err != nil {
return err
}
debugctx := unitdebug.NewHooksContext(c.Target)
script := base64.StdEncoding.EncodeToString([]byte(unitdebug.ClientScript(debugctx, c.hooks)))
innercmd := fmt.Sprintf(`F=$(mktemp); echo %s | base64 -d > $F; . $F`, script)
args := []string{fmt.Sprintf("sudo /bin/bash -c '%s'", innercmd)}
c.Args = args
return c.sshCommand.Run(ctx)
}
|
go
|
{
"resource": ""
}
|
q4857
|
DestroyController
|
train
|
func DestroyController(
st ModelManagerBackend,
destroyHostedModels bool,
destroyStorage *bool,
) error {
modelTag := st.ModelTag()
controllerModelTag := st.ControllerModelTag()
if modelTag != controllerModelTag {
return errors.Errorf(
"expected state for controller model UUID %v, got %v",
controllerModelTag.Id(),
modelTag.Id(),
)
}
if destroyHostedModels {
uuids, err := st.AllModelUUIDs()
if err != nil {
return errors.Trace(err)
}
for _, uuid := range uuids {
modelSt, release, err := st.GetBackend(uuid)
if err != nil {
if errors.IsNotFound(err) {
// Model is already in the process of being destroyed.
continue
}
return errors.Trace(err)
}
defer release()
check := NewBlockChecker(modelSt)
if err = check.DestroyAllowed(); err != nil {
return errors.Trace(err)
}
err = sendMetrics(modelSt)
if err != nil {
logger.Errorf("failed to send leftover metrics: %v", err)
}
}
}
return destroyModel(st, state.DestroyModelParams{
DestroyHostedModels: destroyHostedModels,
DestroyStorage: destroyStorage,
})
}
|
go
|
{
"resource": ""
}
|
q4858
|
DestroyModel
|
train
|
func DestroyModel(
st ModelManagerBackend,
destroyStorage *bool,
force *bool,
maxWait *time.Duration,
) error {
return destroyModel(st, state.DestroyModelParams{
DestroyStorage: destroyStorage,
Force: force,
MaxWait: MaxWait(maxWait),
})
}
|
go
|
{
"resource": ""
}
|
q4859
|
CAASManifolds
|
train
|
func CAASManifolds(config ManifoldsConfig) dependency.Manifolds {
agentConfig := config.Agent.CurrentConfig()
modelTag := agentConfig.Model()
manifolds := dependency.Manifolds{
// The undertaker is currently the only ifNotAlive worker.
undertakerName: ifNotUpgrading(ifNotAlive(ifCredentialValid(undertaker.Manifold(undertaker.ManifoldConfig{
APICallerName: apiCallerName,
CloudDestroyerName: caasBrokerTrackerName,
NewFacade: undertaker.NewFacade,
NewWorker: undertaker.NewWorker,
NewCredentialValidatorFacade: common.NewCredentialInvalidatorFacade,
})))),
caasBrokerTrackerName: ifResponsible(caasbroker.Manifold(caasbroker.ManifoldConfig{
APICallerName: apiCallerName,
NewContainerBrokerFunc: config.NewContainerBrokerFunc,
})),
caasFirewallerName: ifNotMigrating(caasfirewaller.Manifold(
caasfirewaller.ManifoldConfig{
APICallerName: apiCallerName,
BrokerName: caasBrokerTrackerName,
ControllerUUID: agentConfig.Controller().Id(),
ModelUUID: agentConfig.Model().Id(),
NewClient: func(caller base.APICaller) caasfirewaller.Client {
return caasfirewallerapi.NewClient(caller)
},
NewWorker: caasfirewaller.NewWorker,
},
)),
caasOperatorProvisionerName: ifNotMigrating(caasoperatorprovisioner.Manifold(
caasoperatorprovisioner.ManifoldConfig{
AgentName: agentName,
APICallerName: apiCallerName,
BrokerName: caasBrokerTrackerName,
ClockName: clockName,
NewWorker: caasoperatorprovisioner.NewProvisionerWorker,
},
)),
caasUnitProvisionerName: ifNotMigrating(caasunitprovisioner.Manifold(
caasunitprovisioner.ManifoldConfig{
APICallerName: apiCallerName,
BrokerName: caasBrokerTrackerName,
NewClient: func(caller base.APICaller) caasunitprovisioner.Client {
return caasunitprovisionerapi.NewClient(caller)
},
NewWorker: caasunitprovisioner.NewWorker,
},
)),
environUpgraderName: caasenvironupgrader.Manifold(caasenvironupgrader.ManifoldConfig{
APICallerName: apiCallerName,
GateName: environUpgradeGateName,
ModelTag: modelTag,
NewFacade: caasenvironupgrader.NewFacade,
NewWorker: caasenvironupgrader.NewWorker,
}),
caasStorageProvisionerName: ifNotMigrating(ifCredentialValid(storageprovisioner.ModelManifold(storageprovisioner.ModelManifoldConfig{
APICallerName: apiCallerName,
ClockName: clockName,
StorageRegistryName: caasBrokerTrackerName,
Model: modelTag,
NewCredentialValidatorFacade: common.NewCredentialInvalidatorFacade,
NewWorker: storageprovisioner.NewCaasWorker,
}))),
}
result := commonManifolds(config)
for name, manifold := range manifolds {
result[name] = manifold
}
return result
}
|
go
|
{
"resource": ""
}
|
q4860
|
NewWorker
|
train
|
func NewWorker(config Config) (worker.Worker, error) {
if err := config.Validate(); err != nil {
return nil, errors.Trace(err)
}
w := &Worker{
config: config,
serverDetails: make(chan apiserver.Details),
}
// Subscribe to API server address changes.
unsubscribe, err := config.Hub.Subscribe(
apiserver.DetailsTopic,
w.apiserverDetailsChanged,
)
if err != nil {
return nil, errors.Annotate(err, "subscribing to apiserver details")
}
// Now that we're subscribed, request the current API server details.
req := apiserver.DetailsRequest{
Requester: "raft-clusterer",
LocalOnly: true,
}
if _, err := config.Hub.Publish(apiserver.DetailsRequestTopic, req); err != nil {
return nil, errors.Annotate(err, "requesting current apiserver details")
}
if err := catacomb.Invoke(catacomb.Plan{
Site: &w.catacomb,
Work: func() error {
defer unsubscribe()
return w.loop()
},
}); err != nil {
unsubscribe()
return nil, errors.Trace(err)
}
return w, nil
}
|
go
|
{
"resource": ""
}
|
q4861
|
String
|
train
|
func (m MeterStatusCode) String() string {
s, ok := meterString[m]
if !ok {
return MeterNotAvailable.String()
}
return s
}
|
go
|
{
"resource": ""
}
|
q4862
|
MeterStatusFromString
|
train
|
func MeterStatusFromString(str string) MeterStatusCode {
for m, s := range meterString {
if s == str {
return m
}
}
return MeterNotAvailable
}
|
go
|
{
"resource": ""
}
|
q4863
|
SetMeterStatus
|
train
|
func (u *Unit) SetMeterStatus(codeStr, info string) error {
code, err := isValidMeterStatusCode(codeStr)
if err != nil {
return errors.Trace(err)
}
meterDoc, err := u.getMeterStatusDoc()
if err != nil {
return errors.Annotatef(err, "cannot update meter status for unit %s", u.Name())
}
if meterDoc.Code == code.String() && meterDoc.Info == info {
return nil
}
buildTxn := func(attempt int) ([]txn.Op, error) {
if attempt > 0 {
err := u.Refresh()
if err != nil {
return nil, errors.Trace(err)
}
meterDoc, err = u.getMeterStatusDoc()
if err != nil {
return nil, errors.Annotatef(err, "cannot update meter status for unit %s", u.Name())
}
if meterDoc.Code == code.String() && meterDoc.Info == info {
return nil, jujutxn.ErrNoOperations
}
}
return []txn.Op{
{
C: unitsC,
Id: u.doc.DocID,
Assert: isAliveDoc,
}, {
C: meterStatusC,
Id: u.st.docID(u.globalMeterStatusKey()),
Assert: txn.DocExists,
Update: bson.D{{"$set", bson.D{{"code", code.String()}, {"info", info}}}},
}}, nil
}
return errors.Annotatef(u.st.db().Run(buildTxn), "cannot set meter state for unit %s", u.Name())
}
|
go
|
{
"resource": ""
}
|
q4864
|
createMeterStatusOp
|
train
|
func createMeterStatusOp(mb modelBackend, globalKey string, doc *meterStatusDoc) txn.Op {
doc.ModelUUID = mb.modelUUID()
return txn.Op{
C: meterStatusC,
Id: mb.docID(globalKey),
Assert: txn.DocMissing,
Insert: doc,
}
}
|
go
|
{
"resource": ""
}
|
q4865
|
removeMeterStatusOp
|
train
|
func removeMeterStatusOp(mb modelBackend, globalKey string) txn.Op {
return txn.Op{
C: meterStatusC,
Id: mb.docID(globalKey),
Remove: true,
}
}
|
go
|
{
"resource": ""
}
|
q4866
|
GetMeterStatus
|
train
|
func (u *Unit) GetMeterStatus() (MeterStatus, error) {
mm, err := u.st.MetricsManager()
if err != nil {
return MeterStatus{MeterNotAvailable, ""}, errors.Annotatef(err, "cannot retrieve meter status for metrics manager")
}
mmStatus := mm.MeterStatus()
if mmStatus.Code == MeterRed {
return mmStatus, nil
}
status, err := u.getMeterStatusDoc()
if err != nil {
return MeterStatus{MeterNotAvailable, ""}, errors.Annotatef(err, "cannot retrieve meter status for unit %s", u.Name())
}
code := MeterStatusFromString(status.Code)
unitMeterStatus := MeterStatus{code, status.Info}
return combineMeterStatus(mmStatus, unitMeterStatus), nil
}
|
go
|
{
"resource": ""
}
|
q4867
|
Start
|
train
|
func (dlr Downloader) Start(req Request) *Download {
return StartDownload(req, dlr.OpenBlob)
}
|
go
|
{
"resource": ""
}
|
q4868
|
Download
|
train
|
func (dlr Downloader) Download(req Request) (string, error) {
if err := os.MkdirAll(req.TargetDir, 0755); err != nil {
return "", errors.Trace(err)
}
dl := dlr.Start(req)
filename, err := dl.Wait()
return filename, errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4869
|
NewMockMachineProvisioner
|
train
|
func NewMockMachineProvisioner(ctrl *gomock.Controller) *MockMachineProvisioner {
mock := &MockMachineProvisioner{ctrl: ctrl}
mock.recorder = &MockMachineProvisionerMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q4870
|
AvailabilityZone
|
train
|
func (m *MockMachineProvisioner) AvailabilityZone() (string, error) {
ret := m.ctrl.Call(m, "AvailabilityZone")
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q4871
|
InstanceStatus
|
train
|
func (m *MockMachineProvisioner) InstanceStatus() (status.Status, string, error) {
ret := m.ctrl.Call(m, "InstanceStatus")
ret0, _ := ret[0].(status.Status)
ret1, _ := ret[1].(string)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
|
go
|
{
"resource": ""
}
|
q4872
|
KeepInstance
|
train
|
func (m *MockMachineProvisioner) KeepInstance() (bool, error) {
ret := m.ctrl.Call(m, "KeepInstance")
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q4873
|
MachineTag
|
train
|
func (m *MockMachineProvisioner) MachineTag() names_v2.MachineTag {
ret := m.ctrl.Call(m, "MachineTag")
ret0, _ := ret[0].(names_v2.MachineTag)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4874
|
ModelAgentVersion
|
train
|
func (m *MockMachineProvisioner) ModelAgentVersion() (*version.Number, error) {
ret := m.ctrl.Call(m, "ModelAgentVersion")
ret0, _ := ret[0].(*version.Number)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q4875
|
ProvisioningInfo
|
train
|
func (m *MockMachineProvisioner) ProvisioningInfo() (*params.ProvisioningInfo, error) {
ret := m.ctrl.Call(m, "ProvisioningInfo")
ret0, _ := ret[0].(*params.ProvisioningInfo)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q4876
|
SetCharmProfiles
|
train
|
func (m *MockMachineProvisioner) SetCharmProfiles(arg0 []string) error {
ret := m.ctrl.Call(m, "SetCharmProfiles", arg0)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4877
|
SetInstanceInfo
|
train
|
func (m *MockMachineProvisioner) SetInstanceInfo(arg0 instance.Id, arg1, arg2 string, arg3 *instance.HardwareCharacteristics, arg4 []params.NetworkConfig, arg5 []params.Volume, arg6 map[string]params.VolumeAttachmentInfo, arg7 []string) error {
ret := m.ctrl.Call(m, "SetInstanceInfo", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4878
|
SetInstanceInfo
|
train
|
func (mr *MockMachineProvisionerMockRecorder) SetInstanceInfo(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetInstanceInfo", reflect.TypeOf((*MockMachineProvisioner)(nil).SetInstanceInfo), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
}
|
go
|
{
"resource": ""
}
|
q4879
|
SetStatus
|
train
|
func (m *MockMachineProvisioner) SetStatus(arg0 status.Status, arg1 string, arg2 map[string]interface{}) error {
ret := m.ctrl.Call(m, "SetStatus", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4880
|
SetStatus
|
train
|
func (mr *MockMachineProvisionerMockRecorder) SetStatus(arg0, arg1, arg2 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetStatus", reflect.TypeOf((*MockMachineProvisioner)(nil).SetStatus), arg0, arg1, arg2)
}
|
go
|
{
"resource": ""
}
|
q4881
|
SetSupportedContainers
|
train
|
func (m *MockMachineProvisioner) SetSupportedContainers(arg0 ...instance.ContainerType) error {
varargs := []interface{}{}
for _, a := range arg0 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SetSupportedContainers", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4882
|
SetSupportedContainers
|
train
|
func (mr *MockMachineProvisionerMockRecorder) SetSupportedContainers(arg0 ...interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSupportedContainers", reflect.TypeOf((*MockMachineProvisioner)(nil).SetSupportedContainers), arg0...)
}
|
go
|
{
"resource": ""
}
|
q4883
|
SupportedContainers
|
train
|
func (m *MockMachineProvisioner) SupportedContainers() ([]instance.ContainerType, bool, error) {
ret := m.ctrl.Call(m, "SupportedContainers")
ret0, _ := ret[0].([]instance.ContainerType)
ret1, _ := ret[1].(bool)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
|
go
|
{
"resource": ""
}
|
q4884
|
WatchAllContainers
|
train
|
func (m *MockMachineProvisioner) WatchAllContainers() (watcher.StringsWatcher, error) {
ret := m.ctrl.Call(m, "WatchAllContainers")
ret0, _ := ret[0].(watcher.StringsWatcher)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q4885
|
WatchAllContainers
|
train
|
func (mr *MockMachineProvisionerMockRecorder) WatchAllContainers() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchAllContainers", reflect.TypeOf((*MockMachineProvisioner)(nil).WatchAllContainers))
}
|
go
|
{
"resource": ""
}
|
q4886
|
TokenResource
|
train
|
func TokenResource(uri string) string {
resource := uri
if !strings.HasSuffix(resource, "/") {
resource += "/"
}
return resource
}
|
go
|
{
"resource": ""
}
|
q4887
|
run
|
train
|
func run(dir, command string, args ...string) (string, error) {
logger.Debugf("(%s) %s %v", dir, command, args)
cmd := exec.Command(command, args...)
if dir != "" {
cmd.Dir = dir
}
out, err := cmd.CombinedOutput()
output := string(out)
logger.Debugf("output: %v", output)
return output, err
}
|
go
|
{
"resource": ""
}
|
q4888
|
GetBundle
|
train
|
func (m *MockInterface) GetBundle(arg0 *charm_v6.URL) (charm_v6.Bundle, error) {
ret := m.ctrl.Call(m, "GetBundle", arg0)
ret0, _ := ret[0].(charm_v6.Bundle)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q4889
|
stringKeysFromMap
|
train
|
func stringKeysFromMap(m interface{}) (keys []string) {
for _, k := range reflect.ValueOf(m).MapKeys() {
keys = append(keys, k.String())
}
return
}
|
go
|
{
"resource": ""
}
|
q4890
|
indent
|
train
|
func indent(prepend string, level int, append string) string {
return fmt.Sprintf("%s%*s%s", prepend, level, "", append)
}
|
go
|
{
"resource": ""
}
|
q4891
|
Logs
|
train
|
func (fsm *SimpleFSM) Logs() [][]byte {
fsm.mu.Lock()
defer fsm.mu.Unlock()
copied := make([][]byte, len(fsm.logs))
copy(copied, fsm.logs)
return copied
}
|
go
|
{
"resource": ""
}
|
q4892
|
Apply
|
train
|
func (fsm *SimpleFSM) Apply(log *raft.Log) interface{} {
fsm.mu.Lock()
defer fsm.mu.Unlock()
fsm.logs = append(fsm.logs, log.Data)
return len(fsm.logs)
}
|
go
|
{
"resource": ""
}
|
q4893
|
Snapshot
|
train
|
func (fsm *SimpleFSM) Snapshot() (raft.FSMSnapshot, error) {
fsm.mu.Lock()
defer fsm.mu.Unlock()
copied := make([][]byte, len(fsm.logs))
copy(copied, fsm.logs)
return &SimpleSnapshot{copied, len(copied)}, nil
}
|
go
|
{
"resource": ""
}
|
q4894
|
Restore
|
train
|
func (fsm *SimpleFSM) Restore(rc io.ReadCloser) error {
defer rc.Close()
var logs [][]byte
if err := gob.NewDecoder(rc).Decode(&logs); err != nil {
return err
}
fsm.mu.Lock()
fsm.logs = logs
fsm.mu.Unlock()
return nil
}
|
go
|
{
"resource": ""
}
|
q4895
|
Persist
|
train
|
func (snap *SimpleSnapshot) Persist(sink raft.SnapshotSink) error {
if err := gob.NewEncoder(sink).Encode(snap.logs[:snap.n]); err != nil {
sink.Cancel()
return err
}
sink.Close()
return nil
}
|
go
|
{
"resource": ""
}
|
q4896
|
NewAPIAuthenticator
|
train
|
func NewAPIAuthenticator(st *apiprovisioner.State) (AuthenticationProvider, error) {
stateAddresses, err := st.StateAddresses()
if err != nil {
return nil, errors.Trace(err)
}
apiAddresses, err := st.APIAddresses()
if err != nil {
return nil, errors.Trace(err)
}
caCert, err := st.CACert()
if err != nil {
return nil, errors.Trace(err)
}
modelUUID, err := st.ModelUUID()
if err != nil {
return nil, errors.Trace(err)
}
stateInfo := &mongo.MongoInfo{
Info: mongo.Info{
Addrs: stateAddresses,
CACert: caCert,
},
}
apiInfo := &api.Info{
Addrs: apiAddresses,
CACert: caCert,
ModelTag: names.NewModelTag(modelUUID),
}
return &simpleAuth{stateInfo, apiInfo}, nil
}
|
go
|
{
"resource": ""
}
|
q4897
|
SetupAuthentication
|
train
|
func SetupAuthentication(
machine TaggedPasswordChanger,
stateInfo *mongo.MongoInfo,
apiInfo *api.Info,
) (*mongo.MongoInfo, *api.Info, error) {
auth := simpleAuth{stateInfo, apiInfo}
return auth.SetupAuthentication(machine)
}
|
go
|
{
"resource": ""
}
|
q4898
|
NewAPI
|
train
|
func NewAPI(
backend Backend,
authorizer facade.Authorizer,
blockChecker BlockChecker,
) (*API, error) {
if !authorizer.AuthClient() {
return nil, common.ErrPerm
}
return &API{
backend: backend,
authorizer: authorizer,
check: blockChecker,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4899
|
SetFirewallRules
|
train
|
func (api *API) SetFirewallRules(args params.FirewallRuleArgs) (params.ErrorResults, error) {
var errResults params.ErrorResults
if err := api.checkAdmin(); err != nil {
return errResults, errors.Trace(err)
}
if err := api.check.ChangeAllowed(); err != nil {
return errResults, errors.Trace(err)
}
results := make([]params.ErrorResult, len(args.Args))
for i, arg := range args.Args {
logger.Debugf("saving firewall rule %+v", arg)
err := api.backend.SaveFirewallRule(state.FirewallRule{
WellKnownService: state.WellKnownServiceType(arg.KnownService),
WhitelistCIDRs: arg.WhitelistCIDRS,
})
results[i].Error = common.ServerError(err)
}
errResults.Results = results
return errResults, nil
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.