_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q4700
|
AssignedUnits
|
train
|
func (mr *MockGenerationMockRecorder) AssignedUnits() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssignedUnits", reflect.TypeOf((*MockGeneration)(nil).AssignedUnits))
}
|
go
|
{
"resource": ""
}
|
q4701
|
Commit
|
train
|
func (m *MockGeneration) Commit(arg0 string) (int, error) {
ret := m.ctrl.Call(m, "Commit", arg0)
ret0, _ := ret[0].(int)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q4702
|
Created
|
train
|
func (m *MockGeneration) Created() int64 {
ret := m.ctrl.Call(m, "Created")
ret0, _ := ret[0].(int64)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4703
|
CreatedBy
|
train
|
func (m *MockGeneration) CreatedBy() string {
ret := m.ctrl.Call(m, "CreatedBy")
ret0, _ := ret[0].(string)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4704
|
DefaultCharmConfig
|
train
|
func (m *MockApplication) DefaultCharmConfig() (charm_v6.Settings, error) {
ret := m.ctrl.Call(m, "DefaultCharmConfig")
ret0, _ := ret[0].(charm_v6.Settings)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q4705
|
UnitNames
|
train
|
func (m *MockApplication) UnitNames() ([]string, error) {
ret := m.ctrl.Call(m, "UnitNames")
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q4706
|
invoke
|
train
|
func (b block) invoke(ch chan<- block) error {
for {
select {
case <-b.stop:
return errStopped
case <-b.cancel:
return lease.ErrWaitCancelled
case ch <- b:
ch = nil
case <-b.unblock:
return nil
}
}
}
|
go
|
{
"resource": ""
}
|
q4707
|
add
|
train
|
func (b blocks) add(block block) {
b[block.leaseKey] = append(b[block.leaseKey], block.unblock)
}
|
go
|
{
"resource": ""
}
|
q4708
|
unblock
|
train
|
func (b blocks) unblock(lease lease.Key) {
unblocks := b[lease]
delete(b, lease)
for _, unblock := range unblocks {
close(unblock)
}
}
|
go
|
{
"resource": ""
}
|
q4709
|
NewLock
|
train
|
func NewLock(agentConfig agent.Config) gate.Lock {
lock := gate.NewLock()
if wrench.IsActive(wrenchKey(agentConfig), "always-try-upgrade") {
// Always enter upgrade mode. This allows test of upgrades
// even when there's actually no upgrade steps to run.
return lock
}
// Build numbers are irrelevant to upgrade steps.
upgradedToVersion := agentConfig.UpgradedToVersion()
upgradedToVersion.Build = 0
currentVersion := jujuversion.Current
currentVersion.Build = 0
if upgradedToVersion == currentVersion {
logger.Infof(
"upgrade steps for %v have already been run.",
jujuversion.Current,
)
lock.Unlock()
}
return lock
}
|
go
|
{
"resource": ""
}
|
q4710
|
NewWorker
|
train
|
func NewWorker(
upgradeComplete gate.Lock,
agent agent.Agent,
apiConn api.Connection,
jobs []multiwatcher.MachineJob,
openState func() (*state.StatePool, error),
preUpgradeSteps func(st *state.StatePool, agentConf agent.Config, isController, isMasterServer, isCaas bool) error,
machine StatusSetter,
isCaas bool,
) (worker.Worker, error) {
w := &upgradesteps{
upgradeComplete: upgradeComplete,
agent: agent,
apiConn: apiConn,
jobs: jobs,
openState: openState,
preUpgradeSteps: preUpgradeSteps,
machine: machine,
tag: agent.CurrentConfig().Tag(),
isCaas: isCaas,
}
w.tomb.Go(w.run)
return w, nil
}
|
go
|
{
"resource": ""
}
|
q4711
|
runUpgrades
|
train
|
func (w *upgradesteps) runUpgrades() error {
upgradeInfo, err := w.prepareForUpgrade()
if err != nil {
return err
}
if wrench.IsActive(w.wrenchKey(), "fail-upgrade") {
return errors.New("wrench")
}
if err := w.agent.ChangeConfig(w.runUpgradeSteps); err != nil {
return err
}
if err := w.finaliseUpgrade(upgradeInfo); err != nil {
return err
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4712
|
runUpgradeSteps
|
train
|
func (w *upgradesteps) runUpgradeSteps(agentConfig agent.ConfigSetter) error {
var upgradeErr error
w.machine.SetStatus(status.Started, fmt.Sprintf("upgrading to %v", w.toVersion), nil)
stBackend := upgrades.NewStateBackend(w.pool)
context := upgrades.NewContext(agentConfig, w.apiConn, stBackend)
logger.Infof("starting upgrade from %v to %v for %q", w.fromVersion, w.toVersion, w.tag)
targets := jobsToTargets(w.jobs, w.isMaster)
attempts := getUpgradeRetryStrategy()
for attempt := attempts.Start(); attempt.Next(); {
upgradeErr = PerformUpgrade(w.fromVersion, targets, context)
if upgradeErr == nil {
break
}
if cmdutil.ConnectionIsDead(logger, w.apiConn) {
// API connection has gone away - abort!
return &apiLostDuringUpgrade{upgradeErr}
}
if attempt.HasNext() {
w.reportUpgradeFailure(upgradeErr, true)
}
}
if upgradeErr != nil {
return upgradeErr
}
agentConfig.SetUpgradedToVersion(w.toVersion)
return nil
}
|
go
|
{
"resource": ""
}
|
q4713
|
jobsToTargets
|
train
|
func jobsToTargets(jobs []multiwatcher.MachineJob, isMaster bool) (targets []upgrades.Target) {
if jobs == nil {
return
}
for _, job := range jobs {
switch job {
case multiwatcher.JobManageModel:
targets = append(targets, upgrades.Controller)
if isMaster {
targets = append(targets, upgrades.DatabaseMaster)
}
case multiwatcher.JobHostUnits:
targets = append(targets, upgrades.HostMachine)
}
}
return
}
|
go
|
{
"resource": ""
}
|
q4714
|
Validate
|
train
|
func (config Config) Validate() error {
if config.Facade == nil {
return errors.NotValidf("nil Facade")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4715
|
New
|
train
|
func New(config Config) (worker.Worker, error) {
if err := config.Validate(); err != nil {
return nil, errors.Trace(err)
}
swConfig := watcher.StringsConfig{
Handler: &handler{config},
}
return watcher.NewStringsWorker(swConfig)
}
|
go
|
{
"resource": ""
}
|
q4716
|
Handle
|
train
|
func (handler *handler) Handle(_ <-chan struct{}, applications []string) error {
return handler.config.Facade.Rescale(applications)
}
|
go
|
{
"resource": ""
}
|
q4717
|
docIDInt64
|
train
|
func docIDInt64(modelUUID string, localID int64) string {
return modelUUID + ":" + strconv.FormatInt(localID, 10)
}
|
go
|
{
"resource": ""
}
|
q4718
|
NewDeadWatcher
|
train
|
func NewDeadWatcher(err error) *Watcher {
var w Watcher
w.tomb.Kill(errors.Trace(err))
return &w
}
|
go
|
{
"resource": ""
}
|
q4719
|
NewWatcher
|
train
|
func NewWatcher(base *mgo.Collection, modelTag names.ModelTag) *Watcher {
w := &Watcher{
modelUUID: modelTag.Id(),
base: base,
pings: pingsC(base),
beings: beingsC(base),
beingKey: make(map[int64]string),
beingSeq: make(map[string]int64),
watches: make(map[string][]chan<- Change),
request: make(chan interface{}),
ignoredSeqs: make(map[int64]string),
}
w.tomb.Go(func() error {
err := w.loop()
cause := errors.Cause(err)
// tomb expects ErrDying or ErrStillAlive as
// exact values, so we need to log and unwrap
// the error first.
if err != nil && cause != tomb.ErrDying {
logger.Infof("watcher loop failed: %v", err)
}
return cause
})
return w
}
|
go
|
{
"resource": ""
}
|
q4720
|
Watch
|
train
|
func (w *Watcher) Watch(key string, ch chan<- Change) {
w.sendReq(reqWatch{key, ch})
}
|
go
|
{
"resource": ""
}
|
q4721
|
Unwatch
|
train
|
func (w *Watcher) Unwatch(key string, ch chan<- Change) {
w.sendReq(reqUnwatch{key, ch})
}
|
go
|
{
"resource": ""
}
|
q4722
|
Sync
|
train
|
func (w *Watcher) Sync() {
done := make(chan bool)
w.sendReq(reqSync{done})
select {
case <-done:
case <-w.tomb.Dying():
}
}
|
go
|
{
"resource": ""
}
|
q4723
|
Alive
|
train
|
func (w *Watcher) Alive(key string) (bool, error) {
result := make(chan bool, 1)
w.sendReq(reqAlive{key, result})
var alive bool
select {
case alive = <-result:
case <-w.tomb.Dying():
return false, errors.Errorf("cannot check liveness: watcher is dying")
}
logger.Tracef("[%s] Alive(%q) -> %v", w.modelUUID[:6], key, alive)
return alive, nil
}
|
go
|
{
"resource": ""
}
|
q4724
|
loop
|
train
|
func (w *Watcher) loop() error {
var err error
if w.delta, err = clockDelta(w.base); err != nil {
return errors.Trace(err)
}
// Always sync before handling request.
if err := w.sync(); err != nil {
return errors.Trace(err)
}
w.next = time.After(time.Duration(period) * time.Second)
for {
select {
case <-w.tomb.Dying():
return errors.Trace(tomb.ErrDying)
case <-w.next:
w.next = time.After(time.Duration(period) * time.Second)
syncDone := w.syncDone
w.syncDone = nil
if err := w.sync(); err != nil {
return errors.Trace(err)
}
w.flush()
for _, done := range syncDone {
close(done)
}
w.syncsSinceLastPrune++
w.checkShouldPrune()
case req := <-w.request:
w.handle(req)
w.flush()
}
}
}
|
go
|
{
"resource": ""
}
|
q4725
|
checkShouldPrune
|
train
|
func (w *Watcher) checkShouldPrune() {
chanceToPrune := float64(w.syncsSinceLastPrune) * psuedoRandomFactor
if chanceToPrune < 1.0 && rand.Float64() > chanceToPrune {
return
}
// When we decide to prune, we also drop our old cached beings
logger.Debugf("watcher %q decided to prune %q and %q", w.modelUUID, w.beings.Name, w.pings.Name)
w.syncsSinceLastPrune = 0
pruner := NewPruner(w.modelUUID, w.beings, w.pings, w.delta)
err := pruner.Prune(w.ignoredSeqs)
if err != nil {
logger.Warningf("error while pruning %q for %q: %v", w.beings.Name, w.modelUUID, err)
}
}
|
go
|
{
"resource": ""
}
|
q4726
|
decompressPings
|
train
|
func decompressPings(maps []map[string]int64) ([]int64, error) {
if len(maps) == 0 {
return nil, nil
}
// First step, merge all value structures together.
// Every ping has a bit field in an int64. However, bitwise-or preserves
// everything that was ever alive without having to decode them multiple times.
baseToBits := make(map[string]int64, len(maps[0]))
for i := range maps {
for hexbase, bits := range maps[i] {
baseToBits[hexbase] |= bits
}
}
sequences := make([]int64, 0, len(baseToBits)*30)
for hexbase, bits := range baseToBits {
base, err := strconv.ParseInt(hexbase, 16, 64)
if err != nil {
return nil, errors.Annotatef(err, "presence cannot parse alive key: %q", base)
}
base *= 63
for i := int64(0); i < 63 && bits > 0; i++ {
on := (bits&1 == 1)
bits >>= 1
if !on {
continue
}
seq := base + i
sequences = append(sequences, seq)
}
}
return sequences, nil
}
|
go
|
{
"resource": ""
}
|
q4727
|
sync
|
train
|
func (w *Watcher) sync() error {
session := w.pings.Database.Session.Copy()
defer session.Close()
pings, err := w.lookupPings(session)
if err != nil {
return err
}
dead, err := w.lookForDead(pings)
if err != nil {
return err
}
alive, unknownSeqs, err := w.handleAlive(pings)
if err != nil {
return err
}
err = w.lookupUnknownSeqs(unknownSeqs, dead, session)
if err != nil {
return err
}
// Pingers that were known to be alive and haven't reported
// in the last two slots are now considered dead. Dispatch
// the respective events and forget their sequences.
for seq, key := range w.beingKey {
if dead[seq] || !alive[seq] {
logger.Tracef("[%s] removing seq=%d with key %q", w.modelUUID[:6], seq, key)
delete(w.beingKey, seq)
delete(w.beingSeq, key)
for _, ch := range w.watches[key] {
w.pending = append(w.pending, event{ch, key, false})
}
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4728
|
NewPinger
|
train
|
func NewPinger(base *mgo.Collection, modelTag names.ModelTag, key string, recorderFunc func() PingRecorder) *Pinger {
return &Pinger{
base: base,
pings: pingsC(base),
beingKey: key,
modelUUID: modelTag.Id(),
recorderFunc: recorderFunc,
}
}
|
go
|
{
"resource": ""
}
|
q4729
|
Start
|
train
|
func (p *Pinger) Start() error {
p.mu.Lock()
defer p.mu.Unlock()
if p.started {
return errors.Errorf("pinger already started")
}
p.tomb = tomb.Tomb{}
if err := p.prepare(); err != nil {
return errors.Trace(err)
}
logger.Tracef("[%s] starting pinger for %q with seq=%d", p.modelUUID[:6], p.beingKey, p.beingSeq)
if err := p.ping(); err != nil {
return errors.Trace(err)
}
p.started = true
p.tomb.Go(func() error {
err := p.loop()
cause := errors.Cause(err)
// tomb expects ErrDying or ErrStillAlive as
// exact values, so we need to log and unwrap
// the error first.
if err != nil && cause != tomb.ErrDying {
logger.Infof("pinger loop failed: %v", err)
}
return cause
})
return nil
}
|
go
|
{
"resource": ""
}
|
q4730
|
Stop
|
train
|
func (p *Pinger) Stop() error {
p.mu.Lock()
defer p.mu.Unlock()
if p.started {
logger.Tracef("[%s] stopping pinger for %q with seq=%d", p.modelUUID[:6], p.beingKey, p.beingSeq)
}
p.tomb.Kill(nil)
err := p.tomb.Wait()
// TODO ping one more time to guarantee a late timeout.
p.started = false
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4731
|
killStarted
|
train
|
func (p *Pinger) killStarted() error {
p.tomb.Kill(nil)
killErr := p.tomb.Wait()
p.started = false
slot := p.lastSlot
udoc := bson.D{
{"$set", bson.D{{"slot", slot}}},
{"$inc", bson.D{{"dead." + p.fieldKey, p.fieldBit}}}}
session := p.pings.Database.Session.Copy()
defer session.Close()
pings := p.pings.With(session)
if _, err := pings.UpsertId(docIDInt64(p.modelUUID, slot), udoc); err != nil {
return errors.Trace(err)
}
return errors.Trace(killErr)
}
|
go
|
{
"resource": ""
}
|
q4732
|
killStopped
|
train
|
func (p *Pinger) killStopped() error {
if err := p.prepare(); err != nil {
return err
}
// TODO(perrito666) 2016-05-02 lp:1558657
slot := timeSlot(time.Now(), p.delta)
udoc := bson.D{
{"$set", bson.D{{"slot", slot}}},
{"$inc", bson.D{
{"dead." + p.fieldKey, p.fieldBit},
{"alive." + p.fieldKey, p.fieldBit},
}}}
session := p.pings.Database.Session.Copy()
defer session.Close()
pings := p.pings.With(session)
_, err := pings.UpsertId(docIDInt64(p.modelUUID, slot), udoc)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4733
|
loop
|
train
|
func (p *Pinger) loop() error {
for {
select {
case <-p.tomb.Dying():
return errors.Trace(tomb.ErrDying)
case <-time.After(time.Duration(float64(period+1)*0.75) * time.Second):
if err := p.ping(); err != nil {
return errors.Trace(err)
}
}
}
}
|
go
|
{
"resource": ""
}
|
q4734
|
prepare
|
train
|
func (p *Pinger) prepare() error {
change := mgo.Change{
Update: bson.D{{"$inc", bson.D{{"seq", int64(1)}}}},
Upsert: true,
ReturnNew: true,
}
session := p.base.Database.Session.Copy()
defer session.Close()
base := p.base.With(session)
seqs := seqsC(base)
var seq struct{ Seq int64 }
seqID := docIDStr(p.modelUUID, "beings")
if _, err := seqs.FindId(seqID).Apply(change, &seq); err != nil {
return errors.Trace(err)
}
p.beingSeq = seq.Seq
p.fieldKey = fmt.Sprintf("%x", p.beingSeq/63)
p.fieldBit = 1 << uint64(p.beingSeq%63)
p.lastSlot = 0
beings := beingsC(base)
return errors.Trace(beings.Insert(
beingInfo{
DocID: docIDInt64(p.modelUUID, p.beingSeq),
Seq: p.beingSeq,
Key: p.beingKey,
},
))
}
|
go
|
{
"resource": ""
}
|
q4735
|
ping
|
train
|
func (p *Pinger) ping() (err error) {
logger.Tracef("[%s] pinging %q with seq=%d", p.modelUUID[:6], p.beingKey, p.beingSeq)
defer func() {
// If the session is killed from underneath us, it panics when we
// try to copy it, so deal with that here.
if v := recover(); v != nil {
err = fmt.Errorf("%v", v)
}
}()
if p.delta == 0 {
session := p.pings.Database.Session.Copy()
defer session.Close()
base := p.base.With(session)
delta, err := clockDelta(base)
if err != nil {
return errors.Trace(err)
}
p.delta = delta
}
// TODO(perrito666) 2016-05-02 lp:1558657
slot := timeSlot(time.Now(), p.delta)
if slot == p.lastSlot {
// Never, ever, ping the same slot twice.
// The increment below would corrupt the slot.
return nil
}
p.lastSlot = slot
p.recorderFunc().Ping(p.modelUUID, slot, p.fieldKey, p.fieldBit)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4736
|
clockDelta
|
train
|
func clockDelta(c *mgo.Collection) (time.Duration, error) {
var server struct {
time.Time `bson:"retval"`
}
var isMaster struct {
LocalTime time.Time `bson:"localTime"`
}
var after time.Time
var before time.Time
var serverDelay time.Duration
supportsMasterLocalTime := true
session := c.Database.Session.Copy()
defer session.Close()
db := c.Database.With(session)
for i := 0; i < 10; i++ {
if supportsMasterLocalTime {
// Try isMaster.localTime, which is present since MongoDB 2.2
// and does not require admin privileges.
// TODO(perrito666) 2016-05-02 lp:1558657
before = time.Now()
err := db.Run("isMaster", &isMaster)
// TODO(perrito666) 2016-05-02 lp:1558657
after = time.Now()
if err != nil {
return 0, errors.Trace(err)
}
if isMaster.LocalTime.IsZero() {
supportsMasterLocalTime = false
continue
} else {
serverDelay = isMaster.LocalTime.Sub(before)
}
} else {
// If MongoDB doesn't have localTime as part of
// isMaster result, it means that the server is likely
// a MongoDB older than 2.2.
//
// Fallback to 'eval' works fine on versions older than
// 2.4 where it does not require admin privileges.
//
// NOTE: 'eval' takes a global write lock unless you
// specify 'nolock' (which we are not doing below, for
// no apparent reason), so it is quite likely that the
// eval could take a relatively long time to acquire
// the lock and thus cause a retry on the callDelay
// check below on a busy server.
// TODO(perrito666) 2016-05-02 lp:1558657
before = time.Now()
err := db.Run(bson.D{{"$eval", "function() { return new Date(); }"}}, &server)
// TODO(perrito666) 2016-05-02 lp:1558657
after = time.Now()
if err != nil {
return 0, errors.Trace(err)
}
serverDelay = server.Sub(before)
}
// If the call to the server takes longer than a few seconds we
// retry it a couple more times before giving up. It is unclear
// why the retry would help at all here.
//
// If the server takes longer than the specified amount of time
// on every single try, then we simply give up.
callDelay := after.Sub(before)
if callDelay > 5*time.Second {
continue
}
return serverDelay, nil
}
return 0, errors.Errorf("cannot synchronize clock with database server")
}
|
go
|
{
"resource": ""
}
|
q4737
|
timeSlot
|
train
|
func timeSlot(now time.Time, delta time.Duration) int64 {
fakeMutex.Lock()
fake := !fakeNow.IsZero()
if fake {
now = fakeNow
}
slot := now.Add(delta).Unix()
slot -= slot % period
if fake {
slot += int64(fakeOffset) * period
}
fakeMutex.Unlock()
return slot
}
|
go
|
{
"resource": ""
}
|
q4738
|
realTimeSlot
|
train
|
func realTimeSlot() {
fakeMutex.Lock()
fakeNow = time.Time{}
fakeOffset = 0
fakeMutex.Unlock()
logger.Infof("not faking presence time. Real time slot in use.")
}
|
go
|
{
"resource": ""
}
|
q4739
|
RemovePresenceForModel
|
train
|
func RemovePresenceForModel(base *mgo.Collection, modelTag names.ModelTag) error {
errs := make([]error, 0)
for _, f := range []func(*mgo.Collection) *mgo.Collection{pingsC, beingsC, seqsC} {
err := removeModelFromCollection(f(base), modelTag.Id())
if err != nil {
errs = append(errs, err)
}
}
if len(errs) != 0 {
return errors.Errorf("errors removing presence for model %q: %v", modelTag.Id(), errs)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4740
|
CheckName
|
train
|
func CheckName(name string) (string, error) {
// Validate given name.
if !names.IsValidSpace(name) {
return "", errors.Errorf("%q is not a valid space name", name)
}
return name, nil
}
|
go
|
{
"resource": ""
}
|
q4741
|
CheckCIDRs
|
train
|
func CheckCIDRs(args []string, cidrsOptional bool) (set.Strings, error) {
// Validate any given CIDRs.
CIDRs := set.NewStrings()
for _, arg := range args {
_, ipNet, err := net.ParseCIDR(arg)
if err != nil {
logger.Debugf("cannot parse %q: %v", arg, err)
return CIDRs, errors.Errorf("%q is not a valid CIDR", arg)
}
cidr := ipNet.String()
if CIDRs.Contains(cidr) {
if cidr == arg {
return CIDRs, errors.Errorf("duplicate subnet %q specified", cidr)
}
return CIDRs, errors.Errorf("subnet %q overlaps with %q", arg, cidr)
}
CIDRs.Add(cidr)
}
if CIDRs.IsEmpty() && !cidrsOptional {
return CIDRs, errors.New("CIDRs required but not provided")
}
return CIDRs, nil
}
|
go
|
{
"resource": ""
}
|
q4742
|
NewAPI
|
train
|
func (c *SpaceCommandBase) NewAPI() (SpaceAPI, error) {
if c.api != nil {
// Already addd.
return c.api, nil
}
root, err := c.NewAPIRoot()
if err != nil {
return nil, errors.Trace(err)
}
// This is tested with a feature test.
shim := &mvpAPIShim{
apiState: root,
facade: spaces.NewAPI(root),
}
return shim, nil
}
|
go
|
{
"resource": ""
}
|
q4743
|
updateCharmDir
|
train
|
func updateCharmDir(opState operation.State, guard fortress.Guard, abort fortress.Abort) error {
var changing bool
// Determine if the charm content is changing.
if opState.Kind == operation.Install || opState.Kind == operation.Upgrade {
changing = true
} else if opState.Kind == operation.RunHook && opState.Hook != nil && opState.Hook.Kind == hooks.UpgradeCharm {
changing = true
}
available := opState.Started && !opState.Stopped && !changing
logger.Tracef("charmdir: available=%v opState: started=%v stopped=%v changing=%v",
available, opState.Started, opState.Stopped, changing)
if available {
return guard.Unlock()
} else {
return guard.Lockdown(abort)
}
}
|
go
|
{
"resource": ""
}
|
q4744
|
NewMockStorageV1Interface
|
train
|
func NewMockStorageV1Interface(ctrl *gomock.Controller) *MockStorageV1Interface {
mock := &MockStorageV1Interface{ctrl: ctrl}
mock.recorder = &MockStorageV1InterfaceMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q4745
|
StorageClasses
|
train
|
func (m *MockStorageV1Interface) StorageClasses() v11.StorageClassInterface {
ret := m.ctrl.Call(m, "StorageClasses")
ret0, _ := ret[0].(v11.StorageClassInterface)
return ret0
}
|
go
|
{
"resource": ""
}
|
q4746
|
NewMockStorageClassInterface
|
train
|
func NewMockStorageClassInterface(ctrl *gomock.Controller) *MockStorageClassInterface {
mock := &MockStorageClassInterface{ctrl: ctrl}
mock.recorder = &MockStorageClassInterfaceMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q4747
|
Phase
|
train
|
func (mig *modelMigration) Phase() (migration.Phase, error) {
phase, ok := migration.ParsePhase(mig.statusDoc.Phase)
if !ok {
return phase, errors.Errorf("invalid phase in DB: %v", mig.statusDoc.Phase)
}
return phase, nil
}
|
go
|
{
"resource": ""
}
|
q4748
|
TargetInfo
|
train
|
func (mig *modelMigration) TargetInfo() (*migration.TargetInfo, error) {
authTag, err := names.ParseUserTag(mig.doc.TargetAuthTag)
if err != nil {
return nil, errors.Trace(err)
}
macs, err := jsonToMacaroons(mig.doc.TargetMacaroons)
if err != nil {
return nil, errors.Trace(err)
}
return &migration.TargetInfo{
ControllerTag: names.NewControllerTag(mig.doc.TargetController),
ControllerAlias: mig.doc.TargetControllerAlias,
Addrs: mig.doc.TargetAddrs,
CACert: mig.doc.TargetCACert,
AuthTag: authTag,
Password: mig.doc.TargetPassword,
Macaroons: macs,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4749
|
SetPhase
|
train
|
func (mig *modelMigration) SetPhase(nextPhase migration.Phase) error {
now := mig.st.clock().Now().UnixNano()
phase, err := mig.Phase()
if err != nil {
return errors.Trace(err)
}
if nextPhase == phase {
return nil // Already at that phase. Nothing to do.
}
if !phase.CanTransitionTo(nextPhase) {
return errors.Errorf("illegal phase change: %s -> %s", phase, nextPhase)
}
nextDoc := mig.statusDoc
nextDoc.Phase = nextPhase.String()
nextDoc.PhaseChangedTime = now
update := bson.M{
"phase": nextDoc.Phase,
"phase-changed-time": now,
}
if nextPhase == migration.SUCCESS {
nextDoc.SuccessTime = now
update["success-time"] = now
}
ops, err := migStatusHistoryAndOps(mig.st, nextPhase, now, mig.StatusMessage())
if err != nil {
return errors.Trace(err)
}
// If the migration aborted, make the model active again.
if nextPhase == migration.ABORTDONE {
ops = append(ops, txn.Op{
C: modelsC,
Id: mig.doc.ModelUUID,
Assert: txn.DocExists,
Update: bson.M{
"$set": bson.M{"migration-mode": MigrationModeNone},
},
})
}
// Set end timestamps and mark migration as no longer active if a
// terminal phase is hit.
if nextPhase.IsTerminal() {
nextDoc.EndTime = now
update["end-time"] = now
ops = append(ops, txn.Op{
C: migrationsActiveC,
Id: mig.doc.ModelUUID,
Assert: txn.DocExists,
Remove: true,
})
}
ops = append(ops, txn.Op{
C: migrationsStatusC,
Id: mig.statusDoc.Id,
Update: bson.M{"$set": update},
// Ensure phase hasn't changed underneath us
Assert: bson.M{"phase": mig.statusDoc.Phase},
})
if err := mig.st.db().RunTransaction(ops); err == txn.ErrAborted {
return errors.New("phase already changed")
} else if err != nil {
return errors.Annotate(err, "failed to update phase")
}
mig.statusDoc = nextDoc
return nil
}
|
go
|
{
"resource": ""
}
|
q4750
|
migStatusHistoryAndOps
|
train
|
func migStatusHistoryAndOps(st *State, phase migration.Phase, now int64, msg string) ([]txn.Op, error) {
switch phase {
case migration.REAP, migration.DONE:
// if we're reaping/have reaped the model, setting status on it is both
// pointless and potentially problematic.
return nil, nil
}
model, err := st.Model()
if err != nil {
return nil, errors.Trace(err)
}
globalKey := model.globalKey()
modelStatus := status.Busy
if phase.IsTerminal() {
modelStatus = status.Available
}
if msg != "" {
msg = "migrating: " + msg
}
doc := statusDoc{
Status: modelStatus,
StatusInfo: msg,
Updated: now,
}
ops, err := statusSetOps(st.db(), doc, globalKey)
if err != nil {
return nil, errors.Trace(err)
}
probablyUpdateStatusHistory(st.db(), globalKey, doc)
return ops, nil
}
|
go
|
{
"resource": ""
}
|
q4751
|
SetStatusMessage
|
train
|
func (mig *modelMigration) SetStatusMessage(text string) error {
phase, err := mig.Phase()
if err != nil {
return errors.Trace(err)
}
ops, err := migStatusHistoryAndOps(mig.st, phase, mig.st.clock().Now().UnixNano(), text)
if err != nil {
return errors.Trace(err)
}
ops = append(ops, txn.Op{
C: migrationsStatusC,
Id: mig.statusDoc.Id,
Update: bson.M{"$set": bson.M{"status-message": text}},
Assert: txn.DocExists,
})
if err := mig.st.db().RunTransaction(ops); err != nil {
return errors.Annotate(err, "failed to set migration status")
}
mig.statusDoc.StatusMessage = text
return nil
}
|
go
|
{
"resource": ""
}
|
q4752
|
SubmitMinionReport
|
train
|
func (mig *modelMigration) SubmitMinionReport(tag names.Tag, phase migration.Phase, success bool) error {
globalKey, err := agentTagToGlobalKey(tag)
if err != nil {
return errors.Trace(err)
}
docID := mig.minionReportId(phase, globalKey)
doc := modelMigMinionSyncDoc{
Id: docID,
MigrationId: mig.Id(),
Phase: phase.String(),
EntityKey: globalKey,
Time: mig.st.clock().Now().UnixNano(),
Success: success,
}
ops := []txn.Op{{
C: migrationsMinionSyncC,
Id: docID,
Insert: doc,
Assert: txn.DocMissing,
}}
err = mig.st.db().RunTransaction(ops)
if errors.Cause(err) == txn.ErrAborted {
coll, closer := mig.st.db().GetCollection(migrationsMinionSyncC)
defer closer()
var existingDoc modelMigMinionSyncDoc
err := coll.FindId(docID).Select(bson.M{"success": 1}).One(&existingDoc)
if err != nil {
return errors.Annotate(err, "checking existing report")
}
if existingDoc.Success != success {
return errors.Errorf("conflicting reports received for %s/%s/%s",
mig.Id(), phase.String(), tag)
}
return nil
} else if err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4753
|
MinionReports
|
train
|
func (mig *modelMigration) MinionReports() (*MinionReports, error) {
all, err := mig.getAllAgents()
if err != nil {
return nil, errors.Trace(err)
}
phase, err := mig.Phase()
if err != nil {
return nil, errors.Annotate(err, "retrieving phase")
}
coll, closer := mig.st.db().GetCollection(migrationsMinionSyncC)
defer closer()
query := coll.Find(bson.M{"_id": bson.M{
"$regex": "^" + mig.minionReportId(phase, ".+"),
}})
query = query.Select(bson.M{
"entity-key": 1,
"success": 1,
})
var docs []bson.M
if err := query.All(&docs); err != nil {
return nil, errors.Annotate(err, "retrieving minion reports")
}
succeeded := names.NewSet()
failed := names.NewSet()
for _, doc := range docs {
entityKey, ok := doc["entity-key"].(string)
if !ok {
return nil, errors.Errorf("unexpected entity-key %v", doc["entity-key"])
}
tag, err := globalKeyToAgentTag(entityKey)
if err != nil {
return nil, errors.Trace(err)
}
success, ok := doc["success"].(bool)
if !ok {
return nil, errors.Errorf("unexpected success value: %v", doc["success"])
}
if success {
succeeded.Add(tag)
} else {
failed.Add(tag)
}
}
unknown := all.Difference(succeeded).Difference(failed)
return &MinionReports{
Succeeded: succeeded.Values(),
Failed: failed.Values(),
Unknown: unknown.Values(),
}, nil
}
|
go
|
{
"resource": ""
}
|
q4754
|
WatchMinionReports
|
train
|
func (mig *modelMigration) WatchMinionReports() (NotifyWatcher, error) {
phase, err := mig.Phase()
if err != nil {
return nil, errors.Annotate(err, "retrieving phase")
}
prefix := mig.minionReportId(phase, "")
filter := func(rawId interface{}) bool {
id, ok := rawId.(string)
if !ok {
return false
}
return strings.HasPrefix(id, prefix)
}
return newNotifyCollWatcher(mig.st, migrationsMinionSyncC, filter), nil
}
|
go
|
{
"resource": ""
}
|
q4755
|
Refresh
|
train
|
func (mig *modelMigration) Refresh() error {
// Only the status document is updated. The modelMigDoc is static
// after creation.
statusColl, closer := mig.st.db().GetCollection(migrationsStatusC)
defer closer()
var statusDoc modelMigStatusDoc
err := statusColl.FindId(mig.doc.Id).One(&statusDoc)
if err == mgo.ErrNotFound {
return errors.NotFoundf("migration status")
} else if err != nil {
return errors.Annotate(err, "migration status lookup failed")
}
mig.statusDoc = statusDoc
return nil
}
|
go
|
{
"resource": ""
}
|
q4756
|
ModelUserAccess
|
train
|
func (mig *modelMigration) ModelUserAccess(tag names.Tag) permission.Access {
id := tag.Id()
for _, user := range mig.doc.ModelUsers {
if user.UserID == id {
return user.Access
}
}
return permission.NoAccess
}
|
go
|
{
"resource": ""
}
|
q4757
|
Validate
|
train
|
func (spec *MigrationSpec) Validate() error {
if !names.IsValidUser(spec.InitiatedBy.Id()) {
return errors.NotValidf("InitiatedBy")
}
return spec.TargetInfo.Validate()
}
|
go
|
{
"resource": ""
}
|
q4758
|
Migration
|
train
|
func (st *State) Migration(id string) (ModelMigration, error) {
migColl, closer := st.db().GetCollection(migrationsC)
defer closer()
mig, err := st.migrationFromQuery(migColl.FindId(id))
if err != nil {
return nil, errors.Trace(err)
}
return mig, nil
}
|
go
|
{
"resource": ""
}
|
q4759
|
IsMigrationActive
|
train
|
func IsMigrationActive(st *State, modelUUID string) (bool, error) {
active, closer := st.db().GetCollection(migrationsActiveC)
defer closer()
n, err := active.FindId(modelUUID).Count()
if err != nil {
return false, errors.Trace(err)
}
return n > 0, nil
}
|
go
|
{
"resource": ""
}
|
q4760
|
Decorate
|
train
|
func (housing Housing) Decorate(base dependency.Manifold) dependency.Manifold {
manifold := base
// Apply Occupy wrapping first, so that it will be the last
// wrapper to execute before calling the original Start func, so
// as to minimise the time we hold the fortress open.
if housing.Occupy != "" {
manifold.Inputs = maybeAdd(manifold.Inputs, housing.Occupy)
manifold.Start = occupyStart(manifold.Start, housing.Occupy)
}
for _, name := range housing.Flags {
manifold.Inputs = maybeAdd(manifold.Inputs, name)
manifold.Start = flagStart(manifold.Start, name)
}
if housing.Filter != nil {
manifold.Filter = housing.Filter
}
return manifold
}
|
go
|
{
"resource": ""
}
|
q4761
|
List
|
train
|
func (a API) List(args params.PayloadListArgs) (params.PayloadListResults, error) {
var r params.PayloadListResults
payloads, err := a.backend.ListAll()
if err != nil {
return r, errors.Trace(err)
}
filters, err := payload.BuildPredicatesFor(args.Patterns)
if err != nil {
return r, errors.Trace(err)
}
payloads = payload.Filter(payloads, filters...)
for _, payload := range payloads {
apiInfo := api.Payload2api(payload)
r.Results = append(r.Results, apiInfo)
}
return r, nil
}
|
go
|
{
"resource": ""
}
|
q4762
|
PopValue
|
train
|
func (m ManagerConfig) PopValue(key string) string {
value := m[key]
delete(m, key)
return value
}
|
go
|
{
"resource": ""
}
|
q4763
|
WarnAboutUnused
|
train
|
func (m ManagerConfig) WarnAboutUnused() {
for key, value := range m {
logger.Infof("unused config option: %q -> %q", key, value)
}
}
|
go
|
{
"resource": ""
}
|
q4764
|
ServeHTTP
|
train
|
func (h *RestHTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var err error
switch r.Method {
case "GET":
err = errors.Annotate(h.GetHandler(w, r), "cannot retrieve model data")
default:
err = emitUnsupportedMethodErr(r.Method)
}
if err != nil {
if err := sendJSONError(w, r, errors.Trace(err)); err != nil {
logger.Errorf("%v", errors.Annotate(err, "cannot return error to user"))
}
}
}
|
go
|
{
"resource": ""
}
|
q4765
|
ServeGet
|
train
|
func (h *modelRestHandler) ServeGet(w http.ResponseWriter, r *http.Request) error {
if r.Method != "GET" {
return errors.Trace(emitUnsupportedMethodErr(r.Method))
}
st, _, err := h.ctxt.stateForRequestAuthenticated(r)
if err != nil {
return errors.Trace(err)
}
defer st.Release()
return errors.Trace(h.processGet(r, w, st.State))
}
|
go
|
{
"resource": ""
}
|
q4766
|
processGet
|
train
|
func (h *modelRestHandler) processGet(r *http.Request, w http.ResponseWriter, st *state.State) error {
query := r.URL.Query()
entity := query.Get(":entity")
// TODO(wallyworld) - support more than just "remote-application"
switch entity {
case "remote-application":
return h.processRemoteApplication(r, w, st)
default:
return errors.NotSupportedf("entity %v", entity)
}
}
|
go
|
{
"resource": ""
}
|
q4767
|
processRemoteApplication
|
train
|
func (h *modelRestHandler) processRemoteApplication(r *http.Request, w http.ResponseWriter, st *state.State) error {
query := r.URL.Query()
name := query.Get(":name")
remoteApp, err := st.RemoteApplication(name)
if err != nil {
return errors.Trace(err)
}
attribute := query.Get(":attribute")
// TODO(wallyworld) - support more than just "icon"
if attribute != "icon" {
return errors.NotSupportedf("attribute %v on entity %v", attribute, name)
}
// Get the backend state for the source model so we can lookup the app in that model to get the charm details.
offerUUID := remoteApp.OfferUUID()
sourceModelUUID := remoteApp.SourceModel().Id()
sourceSt, err := h.ctxt.srv.shared.statePool.Get(sourceModelUUID)
if err != nil {
return errors.Trace(err)
}
defer sourceSt.Release()
offers := state.NewApplicationOffers(sourceSt.State)
offer, err := offers.ApplicationOfferForUUID(offerUUID)
if err != nil {
return errors.Trace(err)
}
app, err := sourceSt.Application(offer.ApplicationName)
if err != nil {
return errors.Trace(err)
}
ch, _, err := app.Charm()
if err != nil {
return errors.Trace(err)
}
store := storage.NewStorage(sourceSt.ModelUUID(), sourceSt.MongoSession())
// Use the storage to retrieve and save the charm archive.
charmPath, err := common.ReadCharmFromStorage(store, h.dataDir, ch.StoragePath())
if errors.IsNotFound(err) {
return h.byteSender(w, ".svg", []byte(common.DefaultCharmIcon))
}
if err != nil {
return errors.Trace(err)
}
iconContents, err := common.CharmArchiveEntry(charmPath, "icon.svg", true)
if errors.IsNotFound(err) {
return h.byteSender(w, ".svg", []byte(common.DefaultCharmIcon))
}
if err != nil {
return errors.Trace(err)
}
return h.byteSender(w, ".svg", iconContents)
}
|
go
|
{
"resource": ""
}
|
q4768
|
NewMockNotifyWatcher
|
train
|
func NewMockNotifyWatcher(ctrl *gomock.Controller) *MockNotifyWatcher {
mock := &MockNotifyWatcher{ctrl: ctrl}
mock.recorder = &MockNotifyWatcherMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q4769
|
NewMockStringsWatcher
|
train
|
func NewMockStringsWatcher(ctrl *gomock.Controller) *MockStringsWatcher {
mock := &MockStringsWatcher{ctrl: ctrl}
mock.recorder = &MockStringsWatcherMockRecorder{mock}
return mock
}
|
go
|
{
"resource": ""
}
|
q4770
|
checkImageList
|
train
|
func checkImageList(c EnvironAPI) ([]*imagemetadata.ImageMetadata, error) {
if c == nil {
return nil, errors.NotFoundf("oracle client")
}
// take a list of all images that are in the oracle cloud account
resp, err := c.AllImageLists(nil)
if err != nil {
return nil, errors.Trace(err)
}
// if we don't have any images that are in
// the oracle cloud account under your username namespace
// we should let the user know this
n := len(resp.Result)
if n == 0 {
return nil, errors.NotFoundf(
"images under the current client username are",
)
}
images := make([]*imagemetadata.ImageMetadata, 0, n)
for _, val := range resp.Result {
uri, err := url.Parse(val.Uri)
if err != nil {
logger.Warningf("image with ID %q had invalid resource URI %q", val.Name, val.Uri)
continue
}
requestUri := strings.Split(uri.RequestURI(), "/")
if len(requestUri) == 0 {
continue
}
name := requestUri[len(requestUri)-1]
metadata, err := parseImageName(name, uri)
if err != nil {
logger.Warningf("failed to parse image name %s. Error was: %q", name, err)
continue
}
logger.Infof("adding image %v to metadata", metadata.String())
images = append(images, metadata)
}
return images, nil
}
|
go
|
{
"resource": ""
}
|
q4771
|
getImageName
|
train
|
func getImageName(c EnvironAPI, id string) (string, error) {
if id == "" {
return "", errors.NotFoundf("empty id")
}
resp, err := c.AllImageLists(nil)
if err != nil {
return "", errors.Trace(err)
}
// if we don't have any images that are in
// the oracle cloud account under your username namespace
// we should let the user know this
if resp.Result == nil {
return "", errors.NotFoundf(
"no usable images found in your account. Please add images from the oracle market",
)
}
for _, val := range resp.Result {
if strings.Contains(val.Name, id) {
s := strings.Split(val.Name, "/")
return s[len(s)-1], nil
}
}
return "", errors.NotFoundf("image not found: %q", id)
}
|
go
|
{
"resource": ""
}
|
q4772
|
Validate
|
train
|
func (config StringsConfig) Validate() error {
if config.Handler == nil {
return errors.NotValidf("nil Handler")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4773
|
NewStringsWorker
|
train
|
func NewStringsWorker(config StringsConfig) (*StringsWorker, error) {
if err := config.Validate(); err != nil {
return nil, errors.Trace(err)
}
sw := &StringsWorker{
config: config,
}
err := catacomb.Invoke(catacomb.Plan{
Site: &sw.catacomb,
Work: sw.loop,
})
if err != nil {
return nil, errors.Trace(err)
}
return sw, nil
}
|
go
|
{
"resource": ""
}
|
q4774
|
NewManagedFilesystemSource
|
train
|
func NewManagedFilesystemSource(
volumeBlockDevices map[names.VolumeTag]storage.BlockDevice,
filesystems map[names.FilesystemTag]storage.Filesystem,
) storage.FilesystemSource {
return &managedFilesystemSource{
logAndExec,
&osDirFuncs{logAndExec},
volumeBlockDevices, filesystems,
}
}
|
go
|
{
"resource": ""
}
|
q4775
|
isDiskDevice
|
train
|
func isDiskDevice(devicePath string) bool {
var last rune
for _, r := range devicePath {
last = r
}
return !unicode.IsDigit(last)
}
|
go
|
{
"resource": ""
}
|
q4776
|
watchMachine
|
train
|
func watchMachine(ctx *context, tag names.MachineTag) {
_, ok := ctx.machines[tag]
if ok {
return
}
w, err := newMachineWatcher(ctx.config.Machines, tag, ctx.machineChanges)
if err != nil {
ctx.kill(errors.Trace(err))
} else if err := ctx.addWorker(w); err != nil {
ctx.kill(errors.Trace(err))
} else {
ctx.machines[tag] = w
}
}
|
go
|
{
"resource": ""
}
|
q4777
|
refreshMachine
|
train
|
func refreshMachine(ctx *context, tag names.MachineTag) error {
w, ok := ctx.machines[tag]
if !ok {
return errors.Errorf("machine %s is not being watched", tag.Id())
}
stopAndRemove := func() error {
worker.Stop(w)
delete(ctx.machines, tag)
return nil
}
results, err := ctx.config.Machines.InstanceIds([]names.MachineTag{tag})
if err != nil {
return errors.Annotate(err, "getting machine instance ID")
}
if err := results[0].Error; err != nil {
if params.IsCodeNotProvisioned(err) {
return nil
} else if params.IsCodeNotFound(err) {
// Machine is gone, so stop watching.
return stopAndRemove()
}
return errors.Annotate(err, "getting machine instance ID")
}
machineProvisioned(ctx, tag, instance.Id(results[0].Result))
// machine provisioning is the only thing we care about;
// stop the watcher.
return stopAndRemove()
}
|
go
|
{
"resource": ""
}
|
q4778
|
machineProvisioned
|
train
|
func machineProvisioned(ctx *context, tag names.MachineTag, instanceId instance.Id) {
for _, params := range ctx.incompleteVolumeParams {
if params.Attachment.Machine != tag || params.Attachment.InstanceId != "" {
continue
}
params.Attachment.InstanceId = instanceId
updatePendingVolume(ctx, params)
}
for id, params := range ctx.incompleteVolumeAttachmentParams {
if params.Machine != tag || params.InstanceId != "" {
continue
}
params.InstanceId = instanceId
updatePendingVolumeAttachment(ctx, id, params)
}
for id, params := range ctx.incompleteFilesystemAttachmentParams {
if params.Machine != tag || params.InstanceId != "" {
continue
}
params.InstanceId = instanceId
updatePendingFilesystemAttachment(ctx, id, params)
}
}
|
go
|
{
"resource": ""
}
|
q4779
|
Manifold
|
train
|
func Manifold(config ManifoldConfig) dependency.Manifold {
inputs := []string{config.ClockName}
if config.StateName != "" {
inputs = append(inputs, config.StateName)
} else {
inputs = append(inputs, config.LeaseManagerName)
}
if config.RaftName != "" {
inputs = append(inputs, config.RaftName)
}
return dependency.Manifold{
Inputs: inputs,
Start: config.start,
}
}
|
go
|
{
"resource": ""
}
|
q4780
|
init
|
train
|
func (w *MachineLXDProfileWatcher) init(machine *Machine) error {
units, err := machine.Units()
if err != nil {
return errors.Annotatef(err, "failed to get units to start MachineLXDProfileWatcher")
}
for _, unit := range units {
appName := unit.Application()
unitName := unit.Name()
if info, found := w.applications[appName]; found {
info.units.Add(unitName)
continue
}
app, err := w.modeler.Application(appName)
if errors.IsNotFound(err) {
// This is unlikely, but could happen because Units()
// added the parent'd machine id to subordinates.
// If the unit has no machineId, it will be added
// to what is watched when the machineId is assigned.
// Otherwise return an error.
if unit.MachineId() != "" {
return errors.Errorf("programming error, unit %s has machineId but not application", unitName)
}
logger.Errorf("unit %s has no application, nor machine id, start watching when machine id assigned.", unitName)
w.metrics.LXDProfileChangeError.Inc()
continue
}
chURL := app.CharmURL()
info := appInfo{
charmURL: chURL,
units: set.NewStrings(unitName),
}
ch, err := w.modeler.Charm(chURL)
if err != nil {
return err
}
lxdProfile := ch.LXDProfile()
if !lxdProfile.Empty() {
info.charmProfile = lxdProfile
}
w.applications[appName] = info
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4781
|
applicationCharmURLChange
|
train
|
func (w *MachineLXDProfileWatcher) applicationCharmURLChange(topic string, value interface{}) {
// We don't want to respond to any events until we have been fully initialized.
select {
case <-w.initialized:
case <-w.tomb.Dying():
return
}
var notify bool
defer func(notify *bool) {
if *notify {
w.notify()
w.metrics.LXDProfileChangeHit.Inc()
} else {
w.metrics.LXDProfileChangeMiss.Inc()
}
}(¬ify)
values, ok := value.(appCharmUrlChange)
if !ok {
w.logError("programming error, value not of type appCharmUrlChange")
return
}
appName, chURL := values.appName, values.chURL
info, ok := w.applications[appName]
if ok {
ch, err := w.modeler.Charm(chURL)
if err != nil {
w.logError(fmt.Sprintf("error getting charm %s to evaluate for lxd profile notification: %s", chURL, err))
return
}
// notify if:
// 1. the prior charm had a profile and the new one does not.
// 2. the new profile is not empty.
lxdProfile := ch.LXDProfile()
if (!info.charmProfile.Empty() && lxdProfile.Empty()) || !lxdProfile.Empty() {
logger.Tracef("notifying due to change of charm lxd profile for %s, machine-%s", appName, w.machineId)
notify = true
} else {
logger.Tracef("no notification of charm lxd profile needed for %s, machine-%s", appName, w.machineId)
}
info.charmProfile = lxdProfile
info.charmURL = chURL
w.applications[appName] = info
} else {
logger.Tracef("not watching %s on machine-%s", appName, w.machineId)
}
logger.Tracef("end of application charm url change %#v", w.applications)
}
|
go
|
{
"resource": ""
}
|
q4782
|
addUnit
|
train
|
func (w *MachineLXDProfileWatcher) addUnit(topic string, value interface{}) {
// We don't want to respond to any events until we have been fully initialized.
select {
case <-w.initialized:
case <-w.tomb.Dying():
return
}
var notify bool
defer func(notify *bool) {
if *notify {
logger.Tracef("notifying due to add unit requires lxd profile change machine-%s", w.machineId)
w.notify()
w.metrics.LXDProfileChangeHit.Inc()
} else {
w.metrics.LXDProfileChangeMiss.Inc()
}
}(¬ify)
unit, okUnit := value.(*Unit)
if !okUnit {
w.logError("programming error, value not of type *Unit")
return
}
isSubordinate := unit.Subordinate()
unitMachineId := unit.MachineId()
unitName := unit.Name()
switch {
case unitMachineId == "" && !isSubordinate:
logger.Tracef("%s has no machineId and not a sub", unitName)
return
case isSubordinate:
principal, err := w.modeler.Unit(unit.Principal())
if err != nil {
logger.Tracef("unit %s is subordinate, principal %s not found", unitName, unit.Principal())
return
}
if w.machineId != principal.MachineId() {
logger.Tracef("watching unit changes on machine-%s not machine-%s", w.machineId, unitMachineId)
return
}
case w.machineId != unitMachineId:
logger.Tracef("watching unit changes on machine-%s not machine-%s", w.machineId, unitMachineId)
return
}
logger.Tracef("start watching %q on machine-%s", unitName, w.machineId)
notify = w.add(unit)
logger.Debugf("end of unit change %#v", w.applications)
}
|
go
|
{
"resource": ""
}
|
q4783
|
removeUnit
|
train
|
func (w *MachineLXDProfileWatcher) removeUnit(topic string, value interface{}) {
// We don't want to respond to any events until we have been fully initialized.
select {
case <-w.initialized:
case <-w.tomb.Dying():
return
}
var notify bool
defer func(notify *bool) {
if *notify {
logger.Tracef("notifying due to remove unit requires lxd profile change machine-%s", w.machineId)
w.notify()
w.metrics.LXDProfileChangeHit.Inc()
} else {
w.metrics.LXDProfileChangeMiss.Inc()
}
}(¬ify)
rUnit, ok := value.(unitLXDProfileRemove)
if !ok {
w.logError("programming error, value not of type unitLXDProfileRemove")
return
}
app, ok := w.applications[rUnit.appName]
if !ok {
w.logError("programming error, unit removed before being added, application name not found")
return
}
if !app.units.Contains(rUnit.name) {
return
}
profile := app.charmProfile
app.units.Remove(rUnit.name)
if app.units.Size() == 0 {
// the application has no more units on this machine,
// stop watching it.
delete(w.applications, rUnit.appName)
}
// If there are additional units on the machine and the current
// application has an lxd profile, notify so it can be removed
// from the machine.
if len(w.applications) > 0 && !profile.Empty() {
notify = true
}
return
}
|
go
|
{
"resource": ""
}
|
q4784
|
provisionedChange
|
train
|
func (w *MachineLXDProfileWatcher) provisionedChange(topic string, _ interface{}) {
// We don't want to respond to any events until we have been fully initialized.
select {
case <-w.initialized:
case <-w.tomb.Dying():
return
}
logger.Tracef("notifying due to machine-%s now provisioned", w.machineId)
w.metrics.LXDProfileChangeHit.Inc()
w.notify()
}
|
go
|
{
"resource": ""
}
|
q4785
|
ToParams
|
train
|
func (job MachineJob) ToParams() multiwatcher.MachineJob {
if jujuJob, ok := jobNames[job]; ok {
return jujuJob
}
return multiwatcher.MachineJob(fmt.Sprintf("<unknown job %d>", int(job)))
}
|
go
|
{
"resource": ""
}
|
q4786
|
paramsJobsFromJobs
|
train
|
func paramsJobsFromJobs(jobs []MachineJob) []multiwatcher.MachineJob {
jujuJobs := make([]multiwatcher.MachineJob, len(jobs))
for i, machineJob := range jobs {
jujuJobs[i] = machineJob.ToParams()
}
return jujuJobs
}
|
go
|
{
"resource": ""
}
|
q4787
|
MigrationValue
|
train
|
func (job MachineJob) MigrationValue() string {
if value, ok := jobMigrationValue[job]; ok {
return value
}
return "unknown"
}
|
go
|
{
"resource": ""
}
|
q4788
|
ContainerType
|
train
|
func (m *Machine) ContainerType() instance.ContainerType {
return instance.ContainerType(m.doc.ContainerType)
}
|
go
|
{
"resource": ""
}
|
q4789
|
SetKeepInstance
|
train
|
func (m *Machine) SetKeepInstance(keepInstance bool) error {
ops := []txn.Op{{
C: instanceDataC,
Id: m.doc.DocID,
Assert: txn.DocExists,
Update: bson.D{{"$set", bson.D{{"keep-instance", keepInstance}}}},
}}
if err := m.st.db().RunTransaction(ops); err != nil {
// If instance doc doesn't exist, that's ok; there's nothing to keep,
// but that's not an error we care about.
return errors.Annotatef(onAbort(err, nil), "cannot set KeepInstance on machine %v", m)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4790
|
KeepInstance
|
train
|
func (m *Machine) KeepInstance() (bool, error) {
instData, err := getInstanceData(m.st, m.Id())
if err != nil {
return false, err
}
return instData.KeepInstance, nil
}
|
go
|
{
"resource": ""
}
|
q4791
|
CharmProfiles
|
train
|
func (m *Machine) CharmProfiles() ([]string, error) {
instData, err := getInstanceData(m.st, m.Id())
if errors.IsNotFound(err) {
err = errors.NotProvisionedf("machine %v", m.Id())
}
if err != nil {
return nil, err
}
return instData.CharmProfiles, nil
}
|
go
|
{
"resource": ""
}
|
q4792
|
SetCharmProfiles
|
train
|
func (m *Machine) SetCharmProfiles(profiles []string) error {
if len(profiles) == 0 {
return nil
}
buildTxn := func(attempt int) ([]txn.Op, error) {
if attempt > 0 {
if err := m.Refresh(); err != nil {
return nil, errors.Trace(err)
}
}
// Exit early if the Machine profiles doesn't need to change.
mProfiles, err := m.CharmProfiles()
if err != nil {
return nil, errors.Trace(err)
}
mProfilesSet := set.NewStrings(mProfiles...)
if mProfilesSet.Union(set.NewStrings(profiles...)).Size() == mProfilesSet.Size() {
return nil, jujutxn.ErrNoOperations
}
ops := []txn.Op{{
C: instanceDataC,
Id: m.doc.DocID,
Assert: txn.DocExists,
Update: bson.D{{"$set", bson.D{{"charm-profiles", profiles}}}},
}}
return ops, nil
}
err := m.st.db().Run(buildTxn)
return errors.Annotatef(err, "cannot update profiles for %q to %s", m, strings.Join(profiles, ", "))
}
|
go
|
{
"resource": ""
}
|
q4793
|
WantsVote
|
train
|
func (m *Machine) WantsVote() bool {
return wantsVote(m.doc.Jobs, m.doc.NoVote)
}
|
go
|
{
"resource": ""
}
|
q4794
|
SetHasVote
|
train
|
func (m *Machine) SetHasVote(hasVote bool) error {
op := m.UpdateOperation()
op.HasVote = &hasVote
if err := m.st.ApplyOperation(op); err != nil {
return errors.Trace(err)
}
m.doc.HasVote = hasVote
return nil
}
|
go
|
{
"resource": ""
}
|
q4795
|
SetStopMongoUntilVersion
|
train
|
func (m *Machine) SetStopMongoUntilVersion(v mongo.Version) error {
ops := []txn.Op{{
C: machinesC,
Id: m.doc.DocID,
Update: bson.D{{"$set", bson.D{{"stopmongountilversion", v.String()}}}},
}}
if err := m.st.db().RunTransaction(ops); err != nil {
return fmt.Errorf("cannot set StopMongoUntilVersion %v: %v", m, onAbort(err, ErrDead))
}
m.doc.StopMongoUntilVersion = v.String()
return nil
}
|
go
|
{
"resource": ""
}
|
q4796
|
StopMongoUntilVersion
|
train
|
func (m *Machine) StopMongoUntilVersion() (mongo.Version, error) {
return mongo.NewVersion(m.doc.StopMongoUntilVersion)
}
|
go
|
{
"resource": ""
}
|
q4797
|
IsManual
|
train
|
func (m *Machine) IsManual() (bool, error) {
// Apart from the bootstrap machine, manually provisioned
// machines have a nonce prefixed with "manual:". This is
// unique to manual provisioning.
if strings.HasPrefix(m.doc.Nonce, manualMachinePrefix) {
return true, nil
}
// The bootstrap machine uses BootstrapNonce, so in that
// case we need to check if its provider type is "manual".
// We also check for "null", which is an alias for manual.
if m.doc.Id == "0" {
model, err := m.st.Model()
if err != nil {
return false, errors.Trace(err)
}
cfg, err := model.ModelConfig()
if err != nil {
return false, err
}
t := cfg.Type()
return t == "null" || t == "manual", nil
}
return false, nil
}
|
go
|
{
"resource": ""
}
|
q4798
|
AgentTools
|
train
|
func (m *Machine) AgentTools() (*tools.Tools, error) {
if m.doc.Tools == nil {
return nil, errors.NotFoundf("agent binaries for machine %v", m)
}
tools := *m.doc.Tools
return &tools, nil
}
|
go
|
{
"resource": ""
}
|
q4799
|
checkVersionValidity
|
train
|
func checkVersionValidity(v version.Binary) error {
if v.Series == "" || v.Arch == "" {
return fmt.Errorf("empty series or arch")
}
return nil
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.