_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q4300
|
DestroyMachine
|
train
|
func DestroyMachine(c *kvmContainer) error {
if c.runCmd == nil {
c.runCmd = run
}
if c.pathfinder == nil {
c.pathfinder = paths.DataDir
}
// We don't return errors for virsh commands because it is possible that we
// didn't succeed in creating the domain. Additionally, we want all the
// commands to run. If any fail it is certainly because the thing we're
// trying to remove wasn't created. However, we still want to try removing
// all the parts. The exception here is getting the guestBase, if that
// fails we return the error because we cannot continue without it.
_, err := c.runCmd("", virsh, "destroy", c.Name())
if err != nil {
logger.Infof("`%s destroy %s` failed: %q", virsh, c.Name(), err)
}
// The nvram flag here removes the pflash drive for us. There is also a
// `remove-all-storage` flag, but it is unclear if that would also remove
// the backing store which we don't want to do. So we remove those manually
// after undefining.
_, err = c.runCmd("", virsh, "undefine", "--nvram", c.Name())
if err != nil {
logger.Infof("`%s undefine --nvram %s` failed: %q", virsh, c.Name(), err)
}
guestBase, err := guestPath(c.pathfinder)
if err != nil {
return errors.Trace(err)
}
err = os.Remove(filepath.Join(guestBase, fmt.Sprintf("%s.qcow", c.Name())))
if err != nil {
logger.Errorf("failed to remove system disk for %q: %s", c.Name(), err)
}
err = os.Remove(filepath.Join(guestBase, fmt.Sprintf("%s-ds.iso", c.Name())))
if err != nil {
logger.Errorf("failed to remove cloud-init data disk for %q: %s", c.Name(), err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4301
|
AutostartMachine
|
train
|
func AutostartMachine(c *kvmContainer) error {
if c.runCmd == nil {
c.runCmd = run
}
_, err := c.runCmd("", virsh, "autostart", c.Name())
return errors.Annotatef(err, "failed to autostart domain %q", c.Name())
}
|
go
|
{
"resource": ""
}
|
q4302
|
guestPath
|
train
|
func guestPath(pathfinder func(string) (string, error)) (string, error) {
baseDir, err := pathfinder(series.MustHostSeries())
if err != nil {
return "", errors.Trace(err)
}
return filepath.Join(baseDir, kvm, guestDir), nil
}
|
go
|
{
"resource": ""
}
|
q4303
|
writeDataSourceVolume
|
train
|
func writeDataSourceVolume(params CreateMachineParams) (string, error) {
templateDir := filepath.Dir(params.UserDataFile)
if err := writeMetadata(templateDir); err != nil {
return "", errors.Trace(err)
}
if err := writeNetworkConfig(params, templateDir); err != nil {
return "", errors.Trace(err)
}
// Creating a working DS volume was a bit troublesome for me. I finally
// found the details in the docs.
// http://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html
//
// The arguments passed to create the DS volume for NoCloud must be
// `user-data` and `meta-data`. So the `cloud-init` file we generate won't
// work. Also, they must be exactly `user-data` and `meta-data` with no
// path beforehand, so `$JUJUDIR/containers/juju-someid-0/user-data` also
// fails.
//
// Furthermore, symlinks aren't followed by NoCloud. So we rename our
// cloud-init file to user-data. We could change the output name in
// juju/cloudconfig/containerinit/container_userdata.go:WriteUserData but
// who knows what that will break.
userDataPath := filepath.Join(templateDir, userdata)
if err := os.Rename(params.UserDataFile, userDataPath); err != nil {
return "", errors.Trace(err)
}
// Create data the source volume outputting the iso image to the guests
// (AKA libvirt storage pool) directory.
guestBase, err := guestPath(params.findPath)
if err != nil {
return "", errors.Trace(err)
}
dsPath := filepath.Join(guestBase, fmt.Sprintf("%s-ds.iso", params.Host()))
// Use the template path as the working directory.
// This allows us to run the command with user-data and meta-data as
// relative paths to appease the NoCloud script.
out, err := params.runCmd(
templateDir,
"genisoimage",
"-output", dsPath,
"-volid", "cidata",
"-joliet", "-rock",
userdata,
metadata,
networkconfig)
if err != nil {
return "", errors.Trace(err)
}
logger.Debugf("create ds image: %s", out)
return dsPath, nil
}
|
go
|
{
"resource": ""
}
|
q4304
|
writeDomainXML
|
train
|
func writeDomainXML(templateDir string, p CreateMachineParams) (string, error) {
domainPath := filepath.Join(templateDir, fmt.Sprintf("%s.xml", p.Host()))
dom, err := libvirt.NewDomain(p)
if err != nil {
return "", errors.Trace(err)
}
ml, err := xml.MarshalIndent(&dom, "", " ")
if err != nil {
return "", errors.Trace(err)
}
f, err := os.Create(domainPath)
if err != nil {
return "", errors.Trace(err)
}
defer func() {
err = f.Close()
if err != nil {
logger.Debugf("failed defer %q", errors.Trace(err))
}
}()
_, err = f.Write(ml)
if err != nil {
return "", errors.Trace(err)
}
return domainPath, nil
}
|
go
|
{
"resource": ""
}
|
q4305
|
NewAPI
|
train
|
func NewAPI(st *state.State, res facade.Resources, auth facade.Authorizer) (SubnetsAPI, error) {
stateshim, err := networkingcommon.NewStateShim(st)
if err != nil {
return nil, errors.Trace(err)
}
return newAPIWithBacking(stateshim, state.CallContext(st), res, auth)
}
|
go
|
{
"resource": ""
}
|
q4306
|
AllSpaces
|
train
|
func (api *subnetsAPI) AllSpaces() (params.SpaceResults, error) {
if err := api.checkCanRead(); err != nil {
return params.SpaceResults{}, err
}
var results params.SpaceResults
spaces, err := api.backing.AllSpaces()
if err != nil {
return results, errors.Trace(err)
}
results.Results = make([]params.SpaceResult, len(spaces))
for i, space := range spaces {
// TODO(dimitern): Add a Tag() a method and use it here. Too
// early to do it now as it will just complicate the tests.
tag := names.NewSpaceTag(space.Name())
results.Results[i].Tag = tag.String()
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4307
|
AddSubnets
|
train
|
func (api *subnetsAPI) AddSubnets(args params.AddSubnetsParams) (params.ErrorResults, error) {
if err := api.checkCanWrite(); err != nil {
return params.ErrorResults{}, err
}
return networkingcommon.AddSubnets(api.context, api.backing, args)
}
|
go
|
{
"resource": ""
}
|
q4308
|
NewLeaderGetCommand
|
train
|
func NewLeaderGetCommand(ctx Context) (cmd.Command, error) {
return &leaderGetCommand{ctx: ctx}, nil
}
|
go
|
{
"resource": ""
}
|
q4309
|
PersonalCloudMetadata
|
train
|
func PersonalCloudMetadata() (map[string]Cloud, error) {
clouds, err := ParseCloudMetadataFile(JujuPersonalCloudsPath())
if err != nil && os.IsNotExist(err) {
return nil, nil
}
return clouds, err
}
|
go
|
{
"resource": ""
}
|
q4310
|
ParseCloudMetadataFile
|
train
|
func ParseCloudMetadataFile(file string) (map[string]Cloud, error) {
data, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
clouds, err := ParseCloudMetadata(data)
if err != nil {
return nil, err
}
return clouds, err
}
|
go
|
{
"resource": ""
}
|
q4311
|
WritePersonalCloudMetadata
|
train
|
func WritePersonalCloudMetadata(cloudsMap map[string]Cloud) error {
data, err := marshalCloudMetadata(cloudsMap)
if err != nil {
return errors.Trace(err)
}
return ioutil.WriteFile(JujuPersonalCloudsPath(), data, os.FileMode(0600))
}
|
go
|
{
"resource": ""
}
|
q4312
|
inferContainerSpaces
|
train
|
func (p *BridgePolicy) inferContainerSpaces(m Machine, containerId, defaultSpaceName string) (set.Strings, error) {
if p.ContainerNetworkingMethod == "local" {
return set.NewStrings(""), nil
}
hostSpaces, err := m.AllSpaces()
if err != nil {
return nil, errors.Trace(err)
}
logger.Debugf("container %q not qualified to a space, host machine %q is using spaces %s",
containerId, m.Id(), network.QuoteSpaceSet(hostSpaces))
if len(hostSpaces) == 1 {
return hostSpaces, nil
}
if defaultSpaceName != "" && hostSpaces.Contains(defaultSpaceName) {
return set.NewStrings(defaultSpaceName), nil
}
if len(hostSpaces) == 0 {
logger.Debugf("container has no desired spaces, " +
"and host has no known spaces, triggering fallback " +
"to bridge all devices")
return set.NewStrings(""), nil
}
return nil, errors.Errorf("no obvious space for container %q, host machine has spaces: %s",
containerId, network.QuoteSpaceSet(hostSpaces))
}
|
go
|
{
"resource": ""
}
|
q4313
|
determineContainerSpaces
|
train
|
func (p *BridgePolicy) determineContainerSpaces(m Machine, containerMachine Container, defaultSpaceName string) (set.Strings, error) {
containerSpaces, err := containerMachine.DesiredSpaces()
if err != nil {
return nil, errors.Trace(err)
}
logger.Debugf("for container %q, found desired spaces: %s",
containerMachine.Id(), network.QuoteSpaceSet(containerSpaces))
if len(containerSpaces) == 0 {
// We have determined that the container doesn't have any useful
// constraints set on it. So lets see if we can come up with
// something useful.
containerSpaces, err = p.inferContainerSpaces(m, containerMachine.Id(), defaultSpaceName)
if err != nil {
return nil, errors.Trace(err)
}
}
return containerSpaces, nil
}
|
go
|
{
"resource": ""
}
|
q4314
|
findSpacesAndDevicesForContainer
|
train
|
func (p *BridgePolicy) findSpacesAndDevicesForContainer(m Machine, containerMachine Container) (set.Strings, map[string][]LinkLayerDevice, error) {
containerSpaces, err := p.determineContainerSpaces(m, containerMachine, "")
if err != nil {
return nil, nil, errors.Trace(err)
}
devicesPerSpace, err := m.LinkLayerDevicesForSpaces(containerSpaces.Values())
if err != nil {
logger.Errorf("findSpacesAndDevicesForContainer(%q) got error looking for host spaces: %v",
containerMachine.Id(), err)
return nil, nil, errors.Trace(err)
}
return containerSpaces, devicesPerSpace, nil
}
|
go
|
{
"resource": ""
}
|
q4315
|
Manifold
|
train
|
func Manifold(config ManifoldConfig) dependency.Manifold {
return dependency.Manifold{
Inputs: []string{
config.AgentName,
config.APICallerName,
config.CentralHubName,
},
Start: func(context dependency.Context) (worker.Worker, error) {
// Get the agent.
var agent coreagent.Agent
if err := context.Get(config.AgentName, &agent); err != nil {
return nil, err
}
// Grab the tag and ensure that it's for a machine.
currentConfig := agent.CurrentConfig()
tag, ok := currentConfig.Tag().(names.MachineTag)
if !ok {
return nil, errors.New("agent's tag is not a machine tag")
}
// Get API connection.
var apiCaller base.APICaller
if err := context.Get(config.APICallerName, &apiCaller); err != nil {
return nil, err
}
apiState, err := apiagent.NewState(apiCaller)
if err != nil {
return nil, errors.Trace(err)
}
// If the machine needs State, grab the state serving info
// over the API and write it to the agent configuration.
if controller, err := isController(apiState, tag); err != nil {
return nil, errors.Annotate(err, "checking controller status")
} else if !controller {
// Not a controller, nothing to do.
return nil, dependency.ErrUninstall
}
// Do the initial state serving info and mongo profile checks
// before attempting to get the central hub. The central hub is only
// running when the agent is a controller. If the agent isn't a controller
// but should be, the agent config will not have any state serving info
// but the database will think that we should be. In those situations
// we need to update the local config and restart.
controllerConfig, err := apiState.ControllerConfig()
if err != nil {
return nil, errors.Annotate(err, "getting controller config")
}
// If the mongo memory profile from the controller config
// is different from the one in the agent config we need to
// restart the agent to apply the memory profile to the mongo
// service.
logger := config.Logger
agentsMongoMemoryProfile := currentConfig.MongoMemoryProfile()
configMongoMemoryProfile := mongo.MemoryProfile(controllerConfig.MongoMemoryProfile())
mongoProfileChanged := agentsMongoMemoryProfile != configMongoMemoryProfile
info, err := apiState.StateServingInfo()
if err != nil {
return nil, errors.Annotate(err, "getting state serving info")
}
err = agent.ChangeConfig(func(config coreagent.ConfigSetter) error {
existing, hasInfo := config.StateServingInfo()
if hasInfo {
// Use the existing cert and key as they appear to
// have been already updated by the cert updater
// worker to have this machine's IP address as
// part of the cert. This changed cert is never
// put back into the database, so it isn't
// reflected in the copy we have got from
// apiState.
info.Cert = existing.Cert
info.PrivateKey = existing.PrivateKey
}
config.SetStateServingInfo(info)
if mongoProfileChanged {
logger.Debugf("setting agent config mongo memory profile: %q => %q", agentsMongoMemoryProfile, configMongoMemoryProfile)
config.SetMongoMemoryProfile(configMongoMemoryProfile)
}
return nil
})
if err != nil {
return nil, errors.Trace(err)
}
// If we need a restart, return the fatal error.
if mongoProfileChanged {
logger.Infof("restarting agent for new mongo memory profile")
return nil, jworker.ErrRestartAgent
}
// Only get the hub if we are a controller and we haven't updated
// the memory profile.
var hub *pubsub.StructuredHub
if err := context.Get(config.CentralHubName, &hub); err != nil {
logger.Tracef("hub dependency not available")
return nil, err
}
return NewWorker(WorkerConfig{
Agent: agent,
Hub: hub,
MongoProfile: configMongoMemoryProfile,
Logger: config.Logger,
})
},
}
}
|
go
|
{
"resource": ""
}
|
q4316
|
formatPoolListTabular
|
train
|
func formatPoolListTabular(writer io.Writer, value interface{}) error {
pools, ok := value.(map[string]PoolInfo)
if !ok {
return errors.Errorf("expected value of type %T, got %T", pools, value)
}
formatPoolsTabular(writer, pools)
return nil
}
|
go
|
{
"resource": ""
}
|
q4317
|
formatPoolsTabular
|
train
|
func formatPoolsTabular(writer io.Writer, pools map[string]PoolInfo) {
tw := output.TabWriter(writer)
print := func(values ...string) {
fmt.Fprintln(tw, strings.Join(values, "\t"))
}
print("Name", "Provider", "Attrs")
poolNames := make([]string, 0, len(pools))
for name := range pools {
poolNames = append(poolNames, name)
}
sort.Strings(poolNames)
for _, name := range poolNames {
pool := pools[name]
// order by key for deterministic return
keys := make([]string, 0, len(pool.Attrs))
for key := range pool.Attrs {
keys = append(keys, key)
}
sort.Strings(keys)
attrs := make([]string, len(pool.Attrs))
for i, key := range keys {
attrs[i] = fmt.Sprintf("%v=%v", key, pool.Attrs[key])
}
print(name, pool.Provider, strings.Join(attrs, " "))
}
tw.Flush()
}
|
go
|
{
"resource": ""
}
|
q4318
|
IsAuthorisationFailure
|
train
|
func IsAuthorisationFailure(err error) bool {
baseErr := errors.Cause(err)
if !soap.IsSoapFault(baseErr) {
return false
}
fault := soap.ToSoapFault(baseErr)
if fault.Code != serverFaultCode {
return false
}
_, isPermissionError := fault.Detail.Fault.(types.NoPermission)
if isPermissionError {
return true
}
// Otherwise it could be a login error.
return strings.Contains(fault.String, loginErrorFragment)
}
|
go
|
{
"resource": ""
}
|
q4319
|
ProcessDyingModel
|
train
|
func (st *State) ProcessDyingModel() (err error) {
model, err := st.Model()
if err != nil {
return errors.Trace(err)
}
if model.Life() != Dying {
return errors.Trace(ErrModelNotDying)
}
if st.IsController() {
// We should not mark the controller model as Dead until
// all hosted models have been removed, otherwise the
// hosted model environs may not have been destroyed.
modelUUIDs, err := st.AllModelUUIDsIncludingDead()
if err != nil {
return errors.Trace(err)
}
if n := len(modelUUIDs) - 1; n > 0 {
return errors.Trace(hasHostedModelsError(n))
}
}
modelEntityRefsDoc, err := model.getEntityRefs()
if err != nil {
return errors.Trace(err)
}
if _, err := checkModelEntityRefsEmpty(modelEntityRefsDoc); err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4320
|
NewMetricsDebugAPI
|
train
|
func NewMetricsDebugAPI(
st *state.State,
resources facade.Resources,
authorizer facade.Authorizer,
) (*MetricsDebugAPI, error) {
if !authorizer.AuthClient() {
return nil, common.ErrPerm
}
return &MetricsDebugAPI{
state: st,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4321
|
GetMetrics
|
train
|
func (api *MetricsDebugAPI) GetMetrics(args params.Entities) (params.MetricResults, error) {
results := params.MetricResults{
Results: make([]params.EntityMetrics, len(args.Entities)),
}
if len(args.Entities) == 0 {
batches, err := api.state.MetricBatchesForModel()
if err != nil {
return results, errors.Annotate(err, "failed to get metrics")
}
return params.MetricResults{
Results: []params.EntityMetrics{{
Metrics: api.filterLastValuePerKeyPerUnit(batches),
}},
}, nil
}
for i, arg := range args.Entities {
tag, err := names.ParseTag(arg.Tag)
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
var batches []state.MetricBatch
switch tag.Kind() {
case names.UnitTagKind:
batches, err = api.state.MetricBatchesForUnit(tag.Id())
if err != nil {
err = errors.Annotate(err, "failed to get metrics")
results.Results[i].Error = common.ServerError(err)
continue
}
case names.ApplicationTagKind:
batches, err = api.state.MetricBatchesForApplication(tag.Id())
if err != nil {
err = errors.Annotate(err, "failed to get metrics")
results.Results[i].Error = common.ServerError(err)
continue
}
default:
err := errors.Errorf("invalid tag %v", arg.Tag)
results.Results[i].Error = common.ServerError(err)
}
results.Results[i].Metrics = api.filterLastValuePerKeyPerUnit(batches)
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4322
|
SetMeterStatus
|
train
|
func (api *MetricsDebugAPI) SetMeterStatus(args params.MeterStatusParams) (params.ErrorResults, error) {
results := params.ErrorResults{
Results: make([]params.ErrorResult, len(args.Statuses)),
}
for i, arg := range args.Statuses {
tag, err := names.ParseTag(arg.Tag)
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
err = api.setEntityMeterStatus(tag, state.MeterStatus{
Code: state.MeterStatusFromString(arg.Code),
Info: arg.Info,
})
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4323
|
NewPasswordChanger
|
train
|
func NewPasswordChanger(st state.EntityFinder, getCanChange GetAuthFunc) *PasswordChanger {
return &PasswordChanger{
st: st,
getCanChange: getCanChange,
}
}
|
go
|
{
"resource": ""
}
|
q4324
|
SetPasswords
|
train
|
func (pc *PasswordChanger) SetPasswords(args params.EntityPasswords) (params.ErrorResults, error) {
result := params.ErrorResults{
Results: make([]params.ErrorResult, len(args.Changes)),
}
if len(args.Changes) == 0 {
return result, nil
}
canChange, err := pc.getCanChange()
if err != nil {
return params.ErrorResults{}, errors.Trace(err)
}
for i, param := range args.Changes {
tag, err := names.ParseTag(param.Tag)
if err != nil {
result.Results[i].Error = ServerError(ErrPerm)
continue
}
if !canChange(tag) {
result.Results[i].Error = ServerError(ErrPerm)
continue
}
if err := pc.setPassword(tag, param.Password); err != nil {
result.Results[i].Error = ServerError(err)
}
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4325
|
NewAddCAASCommand
|
train
|
func NewAddCAASCommand(cloudMetadataStore CloudMetadataStore) cmd.Command {
store := jujuclient.NewFileClientStore()
cmd := &AddCAASCommand{
OptionalControllerCommand: modelcmd.OptionalControllerCommand{Store: store},
cloudMetadataStore: cloudMetadataStore,
store: store,
newClientConfigReader: func(caasType string) (clientconfig.ClientConfigFunc, error) {
return clientconfig.NewClientConfigReader(caasType)
},
}
cmd.addCloudAPIFunc = func() (AddCloudAPI, error) {
root, err := cmd.NewAPIRoot(cmd.store, cmd.controllerName, "")
if err != nil {
return nil, errors.Trace(err)
}
return cloudapi.NewClient(root), nil
}
cmd.brokerGetter = cmd.newK8sClusterBroker
cmd.getAllCloudDetails = jujucmdcloud.GetAllCloudDetails
return modelcmd.WrapBase(cmd)
}
|
go
|
{
"resource": ""
}
|
q4326
|
getStdinPipe
|
train
|
func getStdinPipe(ctx *cmd.Context) (io.Reader, error) {
if stdIn, ok := ctx.Stdin.(*os.File); ok && !terminal.IsTerminal(int(stdIn.Fd())) {
// stdIn from pipe but not terminal
stat, err := stdIn.Stat()
if err != nil {
return nil, err
}
content, err := ioutil.ReadAll(stdIn)
if err != nil {
return nil, err
}
if (stat.Mode()&os.ModeCharDevice) == 0 && len(content) > 0 {
// workaround to get piped stdIn size because stat.Size() always == 0
return bytes.NewReader(content), nil
}
}
return nil, nil
}
|
go
|
{
"resource": ""
}
|
q4327
|
NewUnitsWatcher
|
train
|
func NewUnitsWatcher(st state.EntityFinder, resources facade.Resources, getCanWatch GetAuthFunc) *UnitsWatcher {
return &UnitsWatcher{
st: st,
resources: resources,
getCanWatch: getCanWatch,
}
}
|
go
|
{
"resource": ""
}
|
q4328
|
PreUpgradeSteps
|
train
|
func PreUpgradeSteps(_ *state.StatePool, agentConf agent.Config, isController, isMaster, isCaas bool) error {
if isCaas {
logger.Debugf("skipping disk space checks for k8s controllers")
return nil
}
if err := CheckFreeDiskSpace(agentConf.DataDir(), MinDiskSpaceMib); err != nil {
return errors.Trace(err)
}
if isController {
// Update distro info in case the new Juju controller version
// is aware of new supported series. We'll keep going if this
// fails, and the user can manually update it if they need to.
logger.Infof("updating distro-info")
err := updateDistroInfo()
return errors.Annotate(err, "failed to update distro-info")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4329
|
CheckFreeDiskSpace
|
train
|
func CheckFreeDiskSpace(dir string, thresholdMib uint64) error {
usage := du.NewDiskUsage(dir)
available := usage.Available()
if available < thresholdMib*humanize.MiByte {
return errors.Errorf("not enough free disk space on %q for upgrade: %s available, require %dMiB",
dir, humanize.IBytes(available), thresholdMib)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4330
|
ValidateState
|
train
|
func ValidateState(state string) error {
if !okayStates.Contains(state) {
supported := okayStates.Values()
sort.Strings(supported)
states := strings.Join(supported, `", "`)
msg := fmt.Sprintf(`status %q not supported; expected one of ["%s"]`, state, states)
return errors.NewNotValid(nil, msg)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4331
|
NewAPI
|
train
|
func NewAPI(backend Backend, resources facade.Resources, authorizer facade.Authorizer) (*API, error) {
isControllerAdmin, err := authorizer.HasPermission(permission.SuperuserAccess, backend.ControllerTag())
if err != nil && !errors.IsNotFound(err) {
return nil, errors.Trace(err)
}
if !authorizer.AuthClient() || !isControllerAdmin {
return nil, common.ErrPerm
}
// For now, backup operations are only permitted on the controller model.
if !backend.IsController() {
return nil, errors.New("backups are only supported from the controller model\nUse juju switch to select the controller model")
}
if backend.ModelType() == state.ModelTypeCAAS {
return nil, errors.NotSupportedf("backups on kubernetes controllers")
}
// Get the backup paths.
dataDir, err := extractResourceValue(resources, "dataDir")
if err != nil {
return nil, errors.Trace(err)
}
logsDir, err := extractResourceValue(resources, "logDir")
if err != nil {
return nil, errors.Trace(err)
}
config, err := backend.ModelConfig()
if err != nil {
return nil, errors.Trace(err)
}
backupDir := config.BackupDir()
paths := backups.Paths{
BackupDir: backupDir,
DataDir: dataDir,
LogsDir: logsDir,
}
// Build the API.
machineID, err := extractResourceValue(resources, "machineID")
if err != nil {
return nil, errors.Trace(err)
}
b := API{
backend: backend,
paths: &paths,
machineID: machineID,
}
return &b, nil
}
|
go
|
{
"resource": ""
}
|
q4332
|
CreateResult
|
train
|
func CreateResult(meta *backups.Metadata, filename string) params.BackupsMetadataResult {
var result params.BackupsMetadataResult
result.ID = meta.ID()
result.Checksum = meta.Checksum()
result.ChecksumFormat = meta.ChecksumFormat()
result.Size = meta.Size()
if meta.Stored() != nil {
result.Stored = *(meta.Stored())
}
result.Started = meta.Started
if meta.Finished != nil {
result.Finished = *meta.Finished
}
result.Notes = meta.Notes
result.Model = meta.Origin.Model
result.Machine = meta.Origin.Machine
result.Hostname = meta.Origin.Hostname
result.Version = meta.Origin.Version
result.Series = meta.Origin.Series
// TODO(wallyworld) - remove these ASAP
// These are only used by the restore CLI when re-bootstrapping.
// We will use a better solution but the way restore currently
// works, we need them and they are no longer available via
// bootstrap config. We will need to fix how re-bootstrap deals
// with these keys to address the issue.
result.CACert = meta.CACert
result.CAPrivateKey = meta.CAPrivateKey
result.Filename = filename
return result
}
|
go
|
{
"resource": ""
}
|
q4333
|
NewInstanceMutaterAPI
|
train
|
func NewInstanceMutaterAPI(st InstanceMutaterState,
model ModelCache,
resources facade.Resources,
authorizer facade.Authorizer,
) (*InstanceMutaterAPI, error) {
if !authorizer.AuthMachineAgent() && !authorizer.AuthController() {
return nil, common.ErrPerm
}
getAuthFunc := common.AuthFuncForMachineAgent(authorizer)
return &InstanceMutaterAPI{
LifeGetter: common.NewLifeGetter(st, getAuthFunc),
st: st,
model: model,
resources: resources,
authorizer: authorizer,
getAuthFunc: getAuthFunc,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4334
|
CharmProfilingInfo
|
train
|
func (api *InstanceMutaterAPI) CharmProfilingInfo(arg params.Entity) (params.CharmProfilingInfoResult, error) {
result := params.CharmProfilingInfoResult{
ProfileChanges: make([]params.ProfileInfoResult, 0),
}
canAccess, err := api.getAuthFunc()
if err != nil {
return params.CharmProfilingInfoResult{}, errors.Trace(err)
}
tag, err := names.ParseMachineTag(arg.Tag)
if err != nil {
result.Error = common.ServerError(common.ErrPerm)
return result, nil
}
m, err := api.getCacheMachine(canAccess, tag)
if err != nil {
result.Error = common.ServerError(err)
return result, nil
}
lxdProfileInfo, err := api.machineLXDProfileInfo(m)
if err != nil {
result.Error = common.ServerError(errors.Annotatef(err, "%s", tag))
}
// use the results from the machineLXDProfileInfo and apply them to the
// result
result.InstanceId = lxdProfileInfo.InstanceId
result.ModelName = lxdProfileInfo.ModelName
result.CurrentProfiles = lxdProfileInfo.MachineProfiles
result.ProfileChanges = lxdProfileInfo.ProfileUnits
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4335
|
SetCharmProfiles
|
train
|
func (api *InstanceMutaterAPI) SetCharmProfiles(args params.SetProfileArgs) (params.ErrorResults, error) {
results := make([]params.ErrorResult, len(args.Args))
canAccess, err := api.getAuthFunc()
if err != nil {
return params.ErrorResults{}, errors.Trace(err)
}
for i, a := range args.Args {
err := api.setOneMachineCharmProfiles(a.Entity.Tag, a.Profiles, canAccess)
results[i].Error = common.ServerError(err)
}
return params.ErrorResults{Results: results}, nil
}
|
go
|
{
"resource": ""
}
|
q4336
|
WatchMachines
|
train
|
func (api *InstanceMutaterAPI) WatchMachines() (params.StringsWatchResult, error) {
result := params.StringsWatchResult{}
if !api.authorizer.AuthController() {
return result, common.ErrPerm
}
watch, err := api.model.WatchMachines()
if err != nil {
return result, err
}
if changes, ok := <-watch.Changes(); ok {
result.StringsWatcherId = api.resources.Register(watch)
result.Changes = changes
} else {
return result, errors.Errorf("cannot obtain initial model machines")
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4337
|
WatchContainers
|
train
|
func (api *InstanceMutaterAPI) WatchContainers(arg params.Entity) (params.StringsWatchResult, error) {
result := params.StringsWatchResult{}
canAccess, err := api.getAuthFunc()
if err != nil {
return result, errors.Trace(err)
}
tag, err := names.ParseMachineTag(arg.Tag)
if err != nil {
return result, errors.Trace(err)
}
machine, err := api.getCacheMachine(canAccess, tag)
if err != nil {
return result, err
}
watch, err := machine.WatchContainers()
if err != nil {
return result, err
}
if changes, ok := <-watch.Changes(); ok {
result.StringsWatcherId = api.resources.Register(watch)
result.Changes = changes
} else {
return result, errors.Errorf("cannot obtain initial machine containers")
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4338
|
WatchLXDProfileVerificationNeeded
|
train
|
func (api *InstanceMutaterAPI) WatchLXDProfileVerificationNeeded(args params.Entities) (params.NotifyWatchResults, error) {
result := params.NotifyWatchResults{
Results: make([]params.NotifyWatchResult, len(args.Entities)),
}
if len(args.Entities) == 0 {
return result, nil
}
canAccess, err := api.getAuthFunc()
if err != nil {
return result, errors.Trace(err)
}
for i, entity := range args.Entities {
tag, err := names.ParseMachineTag(entity.Tag)
if err != nil {
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
entityResult, err := api.watchOneEntityApplication(canAccess, tag)
result.Results[i] = entityResult
result.Results[i].Error = common.ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4339
|
newTimedStatusUpdater
|
train
|
func newTimedStatusUpdater(ctx *cmd.Context, api destroyControllerAPI, controllerModelUUID string, clock clock.Clock) func(time.Duration) environmentStatus {
return func(wait time.Duration) environmentStatus {
if wait > 0 {
<-clock.After(wait)
}
// If we hit an error, status.HostedModelCount will be 0, the polling
// loop will stop and we'll go directly to destroying the model.
ctrStatus, modelsStatus, err := newData(api, controllerModelUUID)
if err != nil {
ctx.Infof("Unable to get the controller summary from the API: %s.", err)
}
return environmentStatus{
controller: ctrStatus,
models: modelsStatus,
}
}
}
|
go
|
{
"resource": ""
}
|
q4340
|
MakeCloudSpecGetter
|
train
|
func MakeCloudSpecGetter(pool Pool) func(names.ModelTag) (environs.CloudSpec, error) {
return func(tag names.ModelTag) (environs.CloudSpec, error) {
st, err := pool.Get(tag.Id())
if err != nil {
return environs.CloudSpec{}, errors.Trace(err)
}
defer st.Release()
m, err := st.Model()
if err != nil {
return environs.CloudSpec{}, errors.Trace(err)
}
// TODO - CAAS(externalreality): Once cloud methods are migrated
// to model EnvironConfigGetter will no longer need to contain
// both state and model but only model.
// TODO (manadart 2018-02-15): This potentially frees the state from
// the pool. Release is called, but the state reference survives.
return stateenvirons.EnvironConfigGetter{State: st.State, Model: m}.CloudSpec()
}
}
|
go
|
{
"resource": ""
}
|
q4341
|
MakeCloudSpecGetterForModel
|
train
|
func MakeCloudSpecGetterForModel(st *state.State) func(names.ModelTag) (environs.CloudSpec, error) {
return func(tag names.ModelTag) (environs.CloudSpec, error) {
m, err := st.Model()
if err != nil {
return environs.CloudSpec{}, errors.Trace(err)
}
configGetter := stateenvirons.EnvironConfigGetter{State: st, Model: m}
if tag.Id() != st.ModelUUID() {
return environs.CloudSpec{}, errors.New("cannot get cloud spec for this model")
}
return configGetter.CloudSpec()
}
}
|
go
|
{
"resource": ""
}
|
q4342
|
MakeCloudSpecWatcherForModel
|
train
|
func MakeCloudSpecWatcherForModel(st *state.State) func(names.ModelTag) (state.NotifyWatcher, error) {
return func(tag names.ModelTag) (state.NotifyWatcher, error) {
m, err := st.Model()
if err != nil {
return nil, errors.Trace(err)
}
if tag.Id() != st.ModelUUID() {
return nil, errors.New("cannot get cloud spec for this model")
}
return m.WatchCloudSpecChanges(), nil
}
}
|
go
|
{
"resource": ""
}
|
q4343
|
ValidateUpgradeSeriesStatus
|
train
|
func ValidateUpgradeSeriesStatus(status UpgradeSeriesStatus) (UpgradeSeriesStatus, error) {
if _, ok := UpgradeSeriesStatusOrder[status]; !ok {
return UpgradeSeriesNotStarted, errors.NotValidf("upgrade series status of %q is", status)
}
return status, nil
}
|
go
|
{
"resource": ""
}
|
q4344
|
CompareUpgradeSeriesStatus
|
train
|
func CompareUpgradeSeriesStatus(status1 UpgradeSeriesStatus, status2 UpgradeSeriesStatus) (int, error) {
var err error
st1, err := ValidateUpgradeSeriesStatus(status1)
st2, err := ValidateUpgradeSeriesStatus(status2)
if err != nil {
return 0, err
}
if UpgradeSeriesStatusOrder[st1] == UpgradeSeriesStatusOrder[st2] {
return 0, nil
}
if UpgradeSeriesStatusOrder[st1] < UpgradeSeriesStatusOrder[st2] {
return -1, nil
}
return 1, nil
}
|
go
|
{
"resource": ""
}
|
q4345
|
NewAuthenticator
|
train
|
func NewAuthenticator(statePool *state.StatePool, clock clock.Clock) (*Authenticator, error) {
authContext, err := newAuthContext(statePool.SystemState(), clock)
if err != nil {
return nil, errors.Trace(err)
}
return &Authenticator{
statePool: statePool,
authContext: authContext,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4346
|
Maintain
|
train
|
func (a *Authenticator) Maintain(done <-chan struct{}) {
for {
select {
case <-done:
return
case <-a.authContext.clock.After(authentication.LocalLoginInteractionTimeout):
now := a.authContext.clock.Now()
a.authContext.localUserInteractions.Expire(now)
}
}
}
|
go
|
{
"resource": ""
}
|
q4347
|
CreateLocalLoginMacaroon
|
train
|
func (a *Authenticator) CreateLocalLoginMacaroon(tag names.UserTag) (*macaroon.Macaroon, error) {
return a.authContext.CreateLocalLoginMacaroon(tag)
}
|
go
|
{
"resource": ""
}
|
q4348
|
AddHandlers
|
train
|
func (a *Authenticator) AddHandlers(mux *apiserverhttp.Mux) {
h := &localLoginHandlers{
authCtxt: a.authContext,
finder: a.statePool.SystemState(),
}
h.AddHandlers(mux)
}
|
go
|
{
"resource": ""
}
|
q4349
|
Authenticate
|
train
|
func (a *Authenticator) Authenticate(req *http.Request) (httpcontext.AuthInfo, error) {
modelUUID := httpcontext.RequestModelUUID(req)
if modelUUID == "" {
return httpcontext.AuthInfo{}, errors.New("model UUID not found")
}
loginRequest, err := LoginRequest(req)
if err != nil {
return httpcontext.AuthInfo{}, errors.Trace(err)
}
return a.AuthenticateLoginRequest(req.Host, modelUUID, loginRequest)
}
|
go
|
{
"resource": ""
}
|
q4350
|
mibToGb
|
train
|
func mibToGb(m uint64) uint64 {
return common.MiBToGiB(m) * (humanize.GiByte / humanize.GByte)
}
|
go
|
{
"resource": ""
}
|
q4351
|
buildMAASVolumeParameters
|
train
|
func buildMAASVolumeParameters(args []storage.VolumeParams, cons constraints.Value) ([]volumeInfo, error) {
if len(args) == 0 && cons.RootDisk == nil {
return nil, nil
}
volumes := make([]volumeInfo, len(args)+1)
rootVolume := volumeInfo{name: rootDiskLabel}
if cons.RootDisk != nil {
rootVolume.sizeInGB = mibToGb(*cons.RootDisk)
}
volumes[0] = rootVolume
for i, v := range args {
cfg, err := newStorageConfig(v.Attributes)
if err != nil {
return nil, errors.Trace(err)
}
info := volumeInfo{
name: v.Tag.Id(),
sizeInGB: mibToGb(v.Size),
tags: cfg.tags,
}
volumes[i+1] = info
}
return volumes, nil
}
|
go
|
{
"resource": ""
}
|
q4352
|
One
|
train
|
func (sp statePersistence) One(collName, id string, doc interface{}) error {
coll, closeColl := sp.st.db().GetCollection(collName)
defer closeColl()
err := coll.FindId(id).One(doc)
if err == mgo.ErrNotFound {
return errors.NotFoundf(id)
}
if err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4353
|
All
|
train
|
func (sp statePersistence) All(collName string, query, docs interface{}) error {
coll, closeColl := sp.st.db().GetCollection(collName)
defer closeColl()
if err := coll.Find(query).All(docs); err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4354
|
Run
|
train
|
func (sp statePersistence) Run(transactions jujutxn.TransactionSource) error {
if err := sp.st.db().Run(transactions); err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4355
|
NewStorage
|
train
|
func (sp *statePersistence) NewStorage() storage.Storage {
modelUUID := sp.st.ModelUUID()
// TODO(ericsnow) Copy the session?
session := sp.st.session
store := storage.NewStorage(modelUUID, session)
return store
}
|
go
|
{
"resource": ""
}
|
q4356
|
ApplicationExistsOps
|
train
|
func (sp *statePersistence) ApplicationExistsOps(applicationID string) []txn.Op {
return []txn.Op{{
C: applicationsC,
Id: applicationID,
Assert: isAliveDoc,
}}
}
|
go
|
{
"resource": ""
}
|
q4357
|
ClientStore
|
train
|
func (c *ModelCommandBase) ClientStore() jujuclient.ClientStore {
// c.store is set in maybeInitModel() below.
if c.store == nil && !c.runStarted {
panic("inappropriate method called before init finished")
}
return c.store
}
|
go
|
{
"resource": ""
}
|
q4358
|
SetModelName
|
train
|
func (c *ModelCommandBase) SetModelName(modelName string, allowDefault bool) error {
c._modelName = modelName
c.allowDefaultModel = allowDefault
// After setting the model name, we may need to ensure we have access to the
// other model details if not already done.
if err := c.maybeInitModel(); err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4359
|
ModelName
|
train
|
func (c *ModelCommandBase) ModelName() (string, error) {
c.assertRunStarted()
if err := c.maybeInitModel(); err != nil {
return "", errors.Trace(err)
}
return c._modelName, nil
}
|
go
|
{
"resource": ""
}
|
q4360
|
ModelType
|
train
|
func (c *ModelCommandBase) ModelType() (model.ModelType, error) {
if c._modelType != "" {
return c._modelType, nil
}
// If we need to look up the model type, we need to ensure we
// have access to the model details.
if err := c.maybeInitModel(); err != nil {
return "", errors.Trace(err)
}
details, err := c.store.ModelByName(c._controllerName, c._modelName)
if err != nil {
if !c.runStarted {
return "", errors.Trace(err)
}
details, err = c.modelDetails(c._controllerName, c._modelName)
if err != nil {
return "", errors.Trace(err)
}
}
c._modelType = details.ModelType
return c._modelType, nil
}
|
go
|
{
"resource": ""
}
|
q4361
|
SetActiveBranch
|
train
|
func (c *ModelCommandBase) SetActiveBranch(branchName string) error {
_, modelDetails, err := c.ModelDetails()
if err != nil {
return errors.Annotate(err, "getting model details")
}
modelDetails.ActiveBranch = branchName
if err = c.store.UpdateModel(c._controllerName, c._modelName, *modelDetails); err != nil {
return err
}
c._activeBranch = branchName
return nil
}
|
go
|
{
"resource": ""
}
|
q4362
|
ActiveBranch
|
train
|
func (c *ModelCommandBase) ActiveBranch() (string, error) {
if c._activeBranch != "" {
return c._activeBranch, nil
}
// If we need to look up the model generation, we need to ensure we
// have access to the model details.
if err := c.maybeInitModel(); err != nil {
return "", errors.Trace(err)
}
details, err := c.store.ModelByName(c._controllerName, c._modelName)
if err != nil {
if !c.runStarted {
return "", errors.Trace(err)
}
details, err = c.modelDetails(c._controllerName, c._modelName)
if err != nil {
return "", errors.Trace(err)
}
}
c._activeBranch = details.ActiveBranch
return c._activeBranch, nil
}
|
go
|
{
"resource": ""
}
|
q4363
|
ControllerName
|
train
|
func (c *ModelCommandBase) ControllerName() (string, error) {
c.assertRunStarted()
if err := c.maybeInitModel(); err != nil {
return "", errors.Trace(err)
}
return c._controllerName, nil
}
|
go
|
{
"resource": ""
}
|
q4364
|
NewAPIRoot
|
train
|
func (c *ModelCommandBase) NewAPIRoot() (api.Connection, error) {
// We need to call ModelDetails() here and not just ModelName() to force
// a refresh of the internal model details if those are not yet stored locally.
modelName, _, err := c.ModelDetails()
if err != nil {
return nil, errors.Trace(err)
}
return c.newAPIRoot(modelName)
}
|
go
|
{
"resource": ""
}
|
q4365
|
newAPIRoot
|
train
|
func (c *ModelCommandBase) newAPIRoot(modelName string) (api.Connection, error) {
controllerName, err := c.ControllerName()
if err != nil {
return nil, errors.Trace(err)
}
return c.CommandBase.NewAPIRoot(c.store, controllerName, modelName)
}
|
go
|
{
"resource": ""
}
|
q4366
|
Wrap
|
train
|
func Wrap(c ModelCommand, options ...WrapOption) ModelCommand {
wrapper := &modelCommandWrapper{
ModelCommand: c,
skipModelFlags: false,
useDefaultModel: true,
}
for _, option := range options {
option(wrapper)
}
// Define a new type so that we can embed the ModelCommand
// interface one level deeper than cmd.Command, so that
// we'll get the Command methods from WrapBase
// and all the ModelCommand methods not in cmd.Command
// from modelCommandWrapper.
type embed struct {
*modelCommandWrapper
}
return struct {
embed
cmd.Command
}{
Command: WrapBase(wrapper),
embed: embed{wrapper},
}
}
|
go
|
{
"resource": ""
}
|
q4367
|
validateCommandForModelType
|
train
|
func (w *modelCommandWrapper) validateCommandForModelType(runStarted bool) error {
_, iaasOnly := w.inner().(IAASOnlyCommand)
_, caasOnly := w.inner().(CAASOnlyCommand)
if !caasOnly && !iaasOnly {
return nil
}
modelType, err := w.ModelCommand.ModelType()
if err != nil {
err = errors.Cause(err)
// We need to error if Run() has been invoked the model is known and there was
// some other error. If the model is not yet known, we'll grab the details
// during the Run() API call later.
if runStarted || (err != ErrNoModelSpecified && !errors.IsNotFound(err)) {
return errors.Trace(err)
}
return nil
}
if modelType == model.CAAS && iaasOnly {
err = errors.Errorf("Juju command %q not supported on kubernetes models", w.Info().Name)
}
if modelType == model.IAAS && caasOnly {
err = errors.Errorf("Juju command %q not supported on non-container models", w.Info().Name)
}
if c, ok := w.inner().(modelSpecificCommand); ok {
return c.IncompatibleModel(err)
}
return err
}
|
go
|
{
"resource": ""
}
|
q4368
|
BootstrapContext
|
train
|
func BootstrapContext(cmdContext *cmd.Context) environs.BootstrapContext {
return &bootstrapContext{
Context: cmdContext,
verifyCredentials: true,
}
}
|
go
|
{
"resource": ""
}
|
q4369
|
BootstrapContextNoVerify
|
train
|
func BootstrapContextNoVerify(cmdContext *cmd.Context) environs.BootstrapContext {
return &bootstrapContext{
Context: cmdContext,
verifyCredentials: false,
}
}
|
go
|
{
"resource": ""
}
|
q4370
|
SplitModelName
|
train
|
func SplitModelName(name string) (controller, model string) {
if i := strings.IndexRune(name, ':'); i >= 0 {
return name[:i], name[i+1:]
}
return "", name
}
|
go
|
{
"resource": ""
}
|
q4371
|
vmExtensionProperties
|
train
|
func vmExtensionProperties(os jujuos.OSType) (*compute.VirtualMachineExtensionProperties, error) {
var commandToExecute, extensionPublisher, extensionType, extensionVersion string
switch os {
case jujuos.Windows:
commandToExecute = windowsExecuteCustomScriptCommand
extensionPublisher = windowsCustomScriptPublisher
extensionType = windowsCustomScriptType
extensionVersion = windowsCustomScriptVersion
case jujuos.CentOS:
commandToExecute = linuxExecuteCustomScriptCommand
extensionPublisher = linuxCustomScriptPublisher
extensionType = linuxCustomScriptType
extensionVersion = linuxCustomScriptVersion
default:
// Ubuntu renders CustomData as cloud-config, and interprets
// it with cloud-init. Windows and CentOS do not use cloud-init
// on Azure.
return nil, errors.NotSupportedf("CustomScript extension for OS %q", os)
}
extensionSettings := map[string]interface{}{
"commandToExecute": commandToExecute,
}
return &compute.VirtualMachineExtensionProperties{
Publisher: to.StringPtr(extensionPublisher),
Type: to.StringPtr(extensionType),
TypeHandlerVersion: to.StringPtr(extensionVersion),
AutoUpgradeMinorVersion: to.BoolPtr(true),
Settings: &extensionSettings,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4372
|
NewUpgradeCharmCommand
|
train
|
func NewUpgradeCharmCommand() cmd.Command {
cmd := &upgradeCharmCommand{
DeployResources: resourceadapters.DeployResources,
ResolveCharm: resolveCharm,
NewCharmAdder: newCharmAdder,
NewCharmClient: func(conn base.APICallCloser) CharmClient {
return charms.NewClient(conn)
},
NewCharmUpgradeClient: func(conn base.APICallCloser) CharmAPIClient {
return application.NewClient(conn)
},
NewModelConfigGetter: func(conn base.APICallCloser) ModelConfigGetter {
return modelconfig.NewClient(conn)
},
NewResourceLister: func(conn base.APICallCloser) (ResourceLister, error) {
resclient, err := resourceadapters.NewAPIClient(conn)
if err != nil {
return nil, err
}
return resclient, nil
},
CharmStoreURLGetter: getCharmStoreAPIURL,
}
return modelcmd.Wrap(cmd)
}
|
go
|
{
"resource": ""
}
|
q4373
|
addCharm
|
train
|
func (c *upgradeCharmCommand) addCharm(
charmAdder CharmAdder,
charmRepo *charmrepo.CharmStore,
config *config.Config,
oldURL *charm.URL,
charmRef string,
deployedSeries string,
force bool,
) (charmstore.CharmID, *macaroon.Macaroon, error) {
var id charmstore.CharmID
// Charm may have been supplied via a path reference. If so, build a
// local charm URL from the deployed series.
ch, newURL, err := charmrepo.NewCharmAtPathForceSeries(charmRef, deployedSeries, c.ForceSeries)
if err == nil {
newName := ch.Meta().Name
if newName != oldURL.Name {
return id, nil, errors.Errorf("cannot upgrade %q to %q", oldURL.Name, newName)
}
addedURL, err := charmAdder.AddLocalCharm(newURL, ch, force)
id.URL = addedURL
return id, nil, err
}
if _, ok := err.(*charmrepo.NotFoundError); ok {
return id, nil, errors.Errorf("no charm found at %q", charmRef)
}
// If we get a "not exists" or invalid path error then we attempt to interpret
// the supplied charm reference as a URL below, otherwise we return the error.
if err != os.ErrNotExist && !charmrepo.IsInvalidPathError(err) {
return id, nil, err
}
refURL, err := charm.ParseURL(charmRef)
if err != nil {
return id, nil, errors.Trace(err)
}
// Charm has been supplied as a URL so we resolve and deploy using the store.
newURL, channel, supportedSeries, err := c.ResolveCharm(charmRepo.ResolveWithChannel, refURL)
if err != nil {
return id, nil, errors.Trace(err)
}
id.Channel = channel
_, seriesSupportedErr := charm.SeriesForCharm(deployedSeries, supportedSeries)
if !c.ForceSeries && deployedSeries != "" && newURL.Series == "" && seriesSupportedErr != nil {
series := []string{"no series"}
if len(supportedSeries) > 0 {
series = supportedSeries
}
return id, nil, errors.Errorf(
"cannot upgrade from single series %q charm to a charm supporting %q. Use --force-series to override.",
deployedSeries, series,
)
}
// If no explicit revision was set with either SwitchURL
// or Revision flags, discover the latest.
if *newURL == *oldURL {
if refURL.Revision != -1 {
return id, nil, errors.Errorf("already running specified charm %q", newURL)
}
// No point in trying to upgrade a charm store charm when
// we just determined that's the latest revision
// available.
return id, nil, errors.Errorf("already running latest charm %q", newURL)
}
curl, csMac, err := addCharmFromURL(charmAdder, newURL, channel, force)
if err != nil {
return id, nil, errors.Trace(err)
}
id.URL = curl
return id, csMac, nil
}
|
go
|
{
"resource": ""
}
|
q4374
|
FormatOneline
|
train
|
func FormatOneline(writer io.Writer, value interface{}) error {
return formatOneline(writer, value, func(out io.Writer, format, uName string, u unitStatus, level int) {
status := fmt.Sprintf(
"agent:%s, workload:%s",
u.JujuStatusInfo.Current,
u.WorkloadStatusInfo.Current,
)
fmt.Fprintf(out, format,
uName,
u.PublicAddress,
status,
)
})
}
|
go
|
{
"resource": ""
}
|
q4375
|
EnsureCachedImage
|
train
|
func (c *kvmContainer) EnsureCachedImage(params StartParams) error {
var srcFunc func() simplestreams.DataSource
if params.ImageDownloadURL != "" {
srcFunc = func() simplestreams.DataSource {
return imagedownloads.NewDataSource(params.ImageDownloadURL)
}
}
var fType = BIOSFType
if params.Arch == arch.ARM64 {
fType = UEFIFType
}
sp := syncParams{
arch: params.Arch,
series: params.Series,
stream: params.Stream,
fType: fType,
srcFunc: srcFunc,
}
logger.Debugf("synchronise images for %s %s %s %s", sp.arch, sp.series, sp.stream, params.ImageDownloadURL)
var callback ProgressCallback
if params.StatusCallback != nil {
callback = func(msg string) {
_ = params.StatusCallback(status.Provisioning, msg, nil)
}
}
if err := Sync(sp, nil, callback); err != nil {
if !errors.IsAlreadyExists(err) {
return errors.Trace(err)
}
logger.Debugf("image already cached %s", err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4376
|
Start
|
train
|
func (c *kvmContainer) Start(params StartParams) error {
var bridge string
var interfaces []libvirt.InterfaceInfo
if params.Network != nil {
if params.Network.NetworkType == container.BridgeNetwork {
bridge = params.Network.Device
for _, iface := range params.Network.Interfaces {
interfaces = append(interfaces, interfaceInfo{config: iface})
}
} else {
err := errors.New("Non-bridge network devices not yet supported")
logger.Infof(err.Error())
return err
}
}
logger.Debugf("create the machine %s", c.name)
if params.StatusCallback != nil {
_ = params.StatusCallback(status.Provisioning, "Creating instance", nil)
}
if err := CreateMachine(CreateMachineParams{
Hostname: c.name,
Series: params.Series,
UserDataFile: params.UserDataFile,
NetworkConfigData: params.NetworkConfigData,
NetworkBridge: bridge,
Memory: params.Memory,
CpuCores: params.CpuCores,
RootDisk: params.RootDisk,
Interfaces: interfaces,
}); err != nil {
return err
}
logger.Debugf("Set machine %s to autostart", c.name)
if params.StatusCallback != nil {
_ = params.StatusCallback(status.Provisioning, "Starting instance", nil)
}
return AutostartMachine(c)
}
|
go
|
{
"resource": ""
}
|
q4377
|
InstanceAvailabilityZoneNames
|
train
|
func (o *OracleEnviron) InstanceAvailabilityZoneNames(ctx context.ProviderCallContext, ids []instance.Id) ([]string, error) {
instances, err := o.Instances(ctx, ids)
if err != nil && err != environs.ErrPartialInstances {
return nil, err
}
zones := make([]string, len(instances))
for idx := range instances {
zones[idx] = "default"
}
return zones, nil
}
|
go
|
{
"resource": ""
}
|
q4378
|
NewOracleEnviron
|
train
|
func NewOracleEnviron(p *EnvironProvider, args environs.OpenParams, client EnvironAPI, c clock.Clock) (env *OracleEnviron, err error) {
if client == nil {
return nil, errors.NotFoundf("oracle client")
}
if p == nil {
return nil, errors.NotFoundf("environ proivder")
}
env = &OracleEnviron{
p: p,
spec: args.Cloud,
cfg: args.Config,
mutex: &sync.Mutex{},
client: client,
clock: c,
}
env.namespace, err = instance.NewNamespace(env.cfg.UUID())
if err != nil {
return nil, errors.Trace(err)
}
env.Firewaller = oraclenet.NewFirewall(env, client, c)
env.Networking = oraclenet.NewEnviron(client, env)
source := rand.NewSource(env.clock.Now().UTC().UnixNano())
r := rand.New(source)
env.rand = r
return env, nil
}
|
go
|
{
"resource": ""
}
|
q4379
|
buildSpacesMap
|
train
|
func (e *OracleEnviron) buildSpacesMap(ctx context.ProviderCallContext) (map[string]network.SpaceInfo, map[string]string, error) {
empty := set.Strings{}
providerIdMap := map[string]string{}
// NOTE (gsamfira): This seems brittle to me, and I would much rather get this
// from state, as that information should already be there from the discovered spaces
// and that is the information that gets presented to the user when running:
// juju spaces
// However I have not found a clean way to access that info from the provider,
// without creating a facade. Someone with more knowledge on this might be able to chip in.
spaces, err := e.Spaces(ctx)
if err != nil {
return nil, providerIdMap, errors.Trace(err)
}
spaceMap := make(map[string]network.SpaceInfo)
for _, space := range spaces {
jujuName := network.ConvertSpaceName(space.Name, empty)
spaceMap[jujuName] = space
empty.Add(jujuName)
providerIdMap[string(space.ProviderId)] = space.Name
}
return spaceMap, providerIdMap, nil
}
|
go
|
{
"resource": ""
}
|
q4380
|
StopInstances
|
train
|
func (o *OracleEnviron) StopInstances(ctx context.ProviderCallContext, ids ...instance.Id) error {
oracleInstances, err := o.getOracleInstances(ids...)
if err == environs.ErrNoInstances {
return nil
} else if err != nil {
return err
}
logger.Debugf("terminating instances %v", ids)
if err := o.terminateInstances(oracleInstances...); err != nil {
return err
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4381
|
getOracleInstances
|
train
|
func (o *OracleEnviron) getOracleInstances(ids ...instance.Id) ([]*oracleInstance, error) {
ret := make([]*oracleInstance, 0, len(ids))
resp, err := o.client.AllInstances(nil)
if err != nil {
return nil, errors.Trace(err)
}
if len(resp.Result) == 0 {
return nil, environs.ErrNoInstances
}
for _, val := range resp.Result {
for _, id := range ids {
oInst, err := newInstance(val, o)
if err != nil {
return nil, errors.Trace(err)
}
if oInst.Id() == id {
ret = append(ret, oInst)
break
}
}
}
if len(ret) < len(ids) {
return ret, environs.ErrPartialInstances
}
return ret, nil
}
|
go
|
{
"resource": ""
}
|
q4382
|
AllInstances
|
train
|
func (o *OracleEnviron) AllInstances(ctx context.ProviderCallContext) ([]envinstance.Instance, error) {
tagFilter := tagValue{tags.JujuModel, o.Config().UUID()}
all, err := o.allInstances(tagFilter)
if err != nil {
return nil, err
}
ret := make([]envinstance.Instance, len(all))
for i, val := range all {
ret[i] = val
}
return ret, nil
}
|
go
|
{
"resource": ""
}
|
q4383
|
MaintainInstance
|
train
|
func (o *OracleEnviron) MaintainInstance(ctx context.ProviderCallContext, args environs.StartInstanceParams) error {
return nil
}
|
go
|
{
"resource": ""
}
|
q4384
|
Config
|
train
|
func (o *OracleEnviron) Config() *config.Config {
o.mutex.Lock()
defer o.mutex.Unlock()
return o.cfg
}
|
go
|
{
"resource": ""
}
|
q4385
|
ConstraintsValidator
|
train
|
func (o *OracleEnviron) ConstraintsValidator(ctx context.ProviderCallContext) (constraints.Validator, error) {
// list of unsupported oracle provider constraints
unsupportedConstraints := []string{
constraints.Container,
constraints.CpuPower,
constraints.RootDisk,
constraints.VirtType,
}
// we choose to use the default validator implementation
validator := constraints.NewValidator()
// we must feed the validator that the oracle cloud
// provider does not support these constraints
validator.RegisterUnsupported(unsupportedConstraints)
validator.RegisterVocabulary(constraints.Arch, []string{arch.I386, arch.AMD64})
logger.Infof("Returning constraints validator: %v", validator)
return validator, nil
}
|
go
|
{
"resource": ""
}
|
q4386
|
DestroyController
|
train
|
func (o *OracleEnviron) DestroyController(ctx context.ProviderCallContext, controllerUUID string) error {
err := o.Destroy(ctx)
if err != nil {
logger.Errorf("Failed to destroy environment through controller: %s", errors.Trace(err))
}
instances, err := o.allControllerManagedInstances(controllerUUID)
if err != nil {
if err == environs.ErrNoInstances {
return nil
}
return errors.Trace(err)
}
ids := make([]instance.Id, len(instances))
for i, val := range instances {
ids[i] = val.Id()
}
return o.StopInstances(ctx, ids...)
}
|
go
|
{
"resource": ""
}
|
q4387
|
InstanceTypes
|
train
|
func (o *OracleEnviron) InstanceTypes(context.ProviderCallContext, constraints.Value) (envinstance.InstanceTypesWithCostMetadata, error) {
var i envinstance.InstanceTypesWithCostMetadata
return i, nil
}
|
go
|
{
"resource": ""
}
|
q4388
|
createInstance
|
train
|
func (e *OracleEnviron) createInstance(params oci.InstanceParams) (*oracleInstance, error) {
if len(params.Instances) > 1 {
return nil, errors.NotSupportedf("launching multiple instances")
}
logger.Debugf("running createInstance")
resp, err := e.client.CreateInstance(params)
if err != nil {
return nil, errors.Trace(err)
}
instance, err := newInstance(resp.Instances[0], e)
if err != nil {
return nil, errors.Trace(err)
}
return instance, nil
}
|
go
|
{
"resource": ""
}
|
q4389
|
IsAvailabilityZoneIndependent
|
train
|
func IsAvailabilityZoneIndependent(err error) bool {
if err, ok := errors.Cause(err).(AvailabilityZoneError); ok {
return err.AvailabilityZoneIndependent()
}
return false
}
|
go
|
{
"resource": ""
}
|
q4390
|
OSDependentEnvVars
|
train
|
func OSDependentEnvVars(paths Paths) []string {
switch jujuos.HostOS() {
case jujuos.Windows:
return windowsEnv(paths)
case jujuos.Ubuntu:
return ubuntuEnv(paths)
case jujuos.CentOS:
return centosEnv(paths)
case jujuos.OpenSUSE:
return opensuseEnv(paths)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4391
|
windowsEnv
|
train
|
func windowsEnv(paths Paths) []string {
charmDir := paths.GetCharmDir()
charmModules := filepath.Join(charmDir, "lib", "Modules")
return []string{
"Path=" + paths.GetToolsDir() + ";" + os.Getenv("Path"),
"PSModulePath=" + os.Getenv("PSModulePath") + ";" + charmModules,
}
}
|
go
|
{
"resource": ""
}
|
q4392
|
stateStepsFor223
|
train
|
func stateStepsFor223() []Step {
return []Step{
&upgradeStep{
description: "add max-action-age and max-action-size config settings",
targets: []Target{DatabaseMaster},
run: func(context Context) error {
return context.State().AddActionPruneSettings()
},
},
}
}
|
go
|
{
"resource": ""
}
|
q4393
|
DestroyController
|
train
|
func (c *ControllerAPIv3) DestroyController(args params.DestroyControllerArgs) error {
if args.DestroyStorage != nil {
return errors.New("destroy-storage unexpected on the v3 API")
}
destroyStorage := true
args.DestroyStorage = &destroyStorage
return destroyController(c.state, c.statePool, c.authorizer, args)
}
|
go
|
{
"resource": ""
}
|
q4394
|
DestroyController
|
train
|
func (c *ControllerAPI) DestroyController(args params.DestroyControllerArgs) error {
return destroyController(c.state, c.statePool, c.authorizer, args)
}
|
go
|
{
"resource": ""
}
|
q4395
|
Resource2API
|
train
|
func Resource2API(res resource.Resource) params.Resource {
return params.Resource{
CharmResource: CharmResource2API(res.Resource),
ID: res.ID,
PendingID: res.PendingID,
ApplicationID: res.ApplicationID,
Username: res.Username,
Timestamp: res.Timestamp,
}
}
|
go
|
{
"resource": ""
}
|
q4396
|
APIResult2ApplicationResources
|
train
|
func APIResult2ApplicationResources(apiResult params.ResourcesResult) (resource.ApplicationResources, error) {
var result resource.ApplicationResources
if apiResult.Error != nil {
// TODO(ericsnow) Return the resources too?
err := common.RestoreError(apiResult.Error)
return resource.ApplicationResources{}, errors.Trace(err)
}
for _, apiRes := range apiResult.Resources {
res, err := API2Resource(apiRes)
if err != nil {
// This could happen if the server is misbehaving
// or non-conforming.
// TODO(ericsnow) Aggregate errors?
return resource.ApplicationResources{}, errors.Annotate(err, "got bad data from server")
}
result.Resources = append(result.Resources, res)
}
for _, unitRes := range apiResult.UnitResources {
tag, err := names.ParseUnitTag(unitRes.Tag)
if err != nil {
return resource.ApplicationResources{}, errors.Annotate(err, "got bad data from server")
}
resNames := map[string]bool{}
unitResources := resource.UnitResources{Tag: tag}
for _, apiRes := range unitRes.Resources {
res, err := API2Resource(apiRes)
if err != nil {
return resource.ApplicationResources{}, errors.Annotate(err, "got bad data from server")
}
resNames[res.Name] = true
unitResources.Resources = append(unitResources.Resources, res)
}
if len(unitRes.DownloadProgress) > 0 {
unitResources.DownloadProgress = make(map[string]int64)
for resName, progress := range unitRes.DownloadProgress {
if _, ok := resNames[resName]; !ok {
err := errors.Errorf("got progress from unrecognized resource %q", resName)
return resource.ApplicationResources{}, errors.Annotate(err, "got bad data from server")
}
unitResources.DownloadProgress[resName] = progress
}
}
result.UnitResources = append(result.UnitResources, unitResources)
}
for _, chRes := range apiResult.CharmStoreResources {
res, err := API2CharmResource(chRes)
if err != nil {
return resource.ApplicationResources{}, errors.Annotate(err, "got bad data from server")
}
result.CharmStoreResources = append(result.CharmStoreResources, res)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4397
|
API2Resource
|
train
|
func API2Resource(apiRes params.Resource) (resource.Resource, error) {
var res resource.Resource
charmRes, err := API2CharmResource(apiRes.CharmResource)
if err != nil {
return res, errors.Trace(err)
}
res = resource.Resource{
Resource: charmRes,
ID: apiRes.ID,
PendingID: apiRes.PendingID,
ApplicationID: apiRes.ApplicationID,
Username: apiRes.Username,
Timestamp: apiRes.Timestamp,
}
if err := res.Validate(); err != nil {
return res, errors.Trace(err)
}
return res, nil
}
|
go
|
{
"resource": ""
}
|
q4398
|
CharmResource2API
|
train
|
func CharmResource2API(res charmresource.Resource) params.CharmResource {
return params.CharmResource{
Name: res.Name,
Type: res.Type.String(),
Path: res.Path,
Description: res.Description,
Origin: res.Origin.String(),
Revision: res.Revision,
Fingerprint: res.Fingerprint.Bytes(),
Size: res.Size,
}
}
|
go
|
{
"resource": ""
}
|
q4399
|
API2CharmResource
|
train
|
func API2CharmResource(apiInfo params.CharmResource) (charmresource.Resource, error) {
var res charmresource.Resource
rtype, err := charmresource.ParseType(apiInfo.Type)
if err != nil {
return res, errors.Trace(err)
}
origin, err := charmresource.ParseOrigin(apiInfo.Origin)
if err != nil {
return res, errors.Trace(err)
}
fp, err := resource.DeserializeFingerprint(apiInfo.Fingerprint)
if err != nil {
return res, errors.Trace(err)
}
res = charmresource.Resource{
Meta: charmresource.Meta{
Name: apiInfo.Name,
Type: rtype,
Path: apiInfo.Path,
Description: apiInfo.Description,
},
Origin: origin,
Revision: apiInfo.Revision,
Fingerprint: fp,
Size: apiInfo.Size,
}
if err := res.Validate(); err != nil {
return res, errors.Trace(err)
}
return res, nil
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.