_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q4100
|
unpinLeaders
|
train
|
func (w *upgradeSeriesWorker) unpinLeaders() error {
results, err := w.UnpinMachineApplications()
if err != nil {
return errors.Trace(err)
}
var lastErr error
for app, err := range results {
if err == nil {
w.logger.Infof("unpinned leader for application %q", app)
continue
}
w.logger.Errorf("failed to unpin leader for application %q: %s", app, err.Error())
lastErr = err
}
if lastErr == nil {
w.leadersPinned = false
return nil
}
return errors.Trace(lastErr)
}
|
go
|
{
"resource": ""
}
|
q4101
|
unitServices
|
train
|
func (w *upgradeSeriesWorker) unitServices() (map[string]string, error) {
services, err := w.service.ListServices()
if err != nil {
return nil, errors.Trace(err)
}
return service.FindUnitServiceNames(services), nil
}
|
go
|
{
"resource": ""
}
|
q4102
|
unitNames
|
train
|
func unitNames(units map[string]string) string {
unitIds := make([]string, len(units))
i := 0
for u := range units {
unitIds[i] = u
i++
}
return strings.Join(unitIds, ", ")
}
|
go
|
{
"resource": ""
}
|
q4103
|
NewEndpointPath
|
train
|
func NewEndpointPath(application, name string) string {
return fmt.Sprintf(HTTPEndpointPath, application, name)
}
|
go
|
{
"resource": ""
}
|
q4104
|
ExtractEndpointDetails
|
train
|
func ExtractEndpointDetails(url *url.URL) (application, name string) {
application = url.Query().Get(":application")
name = url.Query().Get(":resource")
return application, name
}
|
go
|
{
"resource": ""
}
|
q4105
|
SendHTTPStatusAndJSON
|
train
|
func SendHTTPStatusAndJSON(w http.ResponseWriter, statusCode int, response interface{}) {
body, err := json.Marshal(response)
if err != nil {
http.Error(w, errors.Annotatef(err, "cannot marshal JSON result %#v", response).Error(), 504)
return
}
if statusCode == http.StatusUnauthorized {
w.Header().Set("WWW-Authenticate", `Basic realm="juju"`)
}
w.Header().Set("Content-Type", params.ContentTypeJSON)
w.Header().Set("Content-Length", fmt.Sprint(len(body)))
w.WriteHeader(statusCode)
w.Write(body)
}
|
go
|
{
"resource": ""
}
|
q4106
|
NewWorker
|
train
|
func NewWorker(config Config) (worker.Worker, error) {
if err := config.Validate(); err != nil {
return nil, errors.Trace(err)
}
// There are no upgrade steps for a CAAS model.
// We just set the status to available and unlock the gate.
return jujuworker.NewSimpleWorker(func(<-chan struct{}) error {
setStatus := func(s status.Status, info string) error {
return config.Facade.SetModelStatus(config.ModelTag, s, info, nil)
}
if err := setStatus(status.Available, ""); err != nil {
return errors.Trace(err)
}
config.GateUnlocker.Unlock()
return nil
}), nil
}
|
go
|
{
"resource": ""
}
|
q4107
|
NewAPI
|
train
|
func NewAPI(
st *state.State,
resources facade.Resources,
authorizer facade.Authorizer,
) (*API, error) {
if !authorizer.AuthClient() {
return nil, common.ErrPerm
}
m, err := st.Model()
if err != nil {
return nil, errors.Trace(err)
}
return &API{
access: getState(st, m),
authorizer: authorizer,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4108
|
Get
|
train
|
func (api *API) Get(args params.Entities) params.AnnotationsGetResults {
if err := api.checkCanRead(); err != nil {
result := make([]params.AnnotationsGetResult, len(args.Entities))
for i := range result {
result[i].Error = params.ErrorResult{Error: common.ServerError(err)}
}
return params.AnnotationsGetResults{Results: result}
}
entityResults := []params.AnnotationsGetResult{}
for _, entity := range args.Entities {
anEntityResult := params.AnnotationsGetResult{EntityTag: entity.Tag}
if annts, err := api.getEntityAnnotations(entity.Tag); err != nil {
anEntityResult.Error = params.ErrorResult{annotateError(err, entity.Tag, "getting")}
} else {
anEntityResult.Annotations = annts
}
entityResults = append(entityResults, anEntityResult)
}
return params.AnnotationsGetResults{Results: entityResults}
}
|
go
|
{
"resource": ""
}
|
q4109
|
Set
|
train
|
func (api *API) Set(args params.AnnotationsSet) params.ErrorResults {
if err := api.checkCanWrite(); err != nil {
errorResults := make([]params.ErrorResult, len(args.Annotations))
for i := range errorResults {
errorResults[i].Error = common.ServerError(err)
}
return params.ErrorResults{Results: errorResults}
}
setErrors := []params.ErrorResult{}
for _, entityAnnotation := range args.Annotations {
err := api.setEntityAnnotations(entityAnnotation.EntityTag, entityAnnotation.Annotations)
if err != nil {
setErrors = append(setErrors,
params.ErrorResult{Error: annotateError(err, entityAnnotation.EntityTag, "setting")})
}
}
return params.ErrorResults{Results: setErrors}
}
|
go
|
{
"resource": ""
}
|
q4110
|
UpgradeSeriesPrepare
|
train
|
func (c *upgradeSeriesCommand) UpgradeSeriesPrepare(ctx *cmd.Context) (err error) {
apiRoot, err := c.ensureAPIClient()
if err != nil {
return errors.Trace(err)
}
if apiRoot != nil {
defer apiRoot.Close()
}
units, err := c.upgradeMachineSeriesClient.UpgradeSeriesValidate(c.machineNumber, c.series)
if err != nil {
return errors.Trace(err)
}
if err := c.promptConfirmation(ctx, units); err != nil {
return errors.Trace(err)
}
if err = c.upgradeMachineSeriesClient.UpgradeSeriesPrepare(c.machineNumber, c.series, c.force); err != nil {
return errors.Trace(err)
}
if err = c.handleNotifications(ctx); err != nil {
return errors.Trace(err)
}
m := UpgradeSeriesPrepareFinishedMessage + "\n"
ctx.Infof(m, c.machineNumber)
return nil
}
|
go
|
{
"resource": ""
}
|
q4111
|
displayNotifications
|
train
|
func (c *upgradeSeriesCommand) displayNotifications(ctx *cmd.Context) func() error {
// We return and anonymous function here to satisfy the catacomb plan's
// need for a work function and to close over the commands context.
return func() error {
uw, wid, err := c.upgradeMachineSeriesClient.WatchUpgradeSeriesNotifications(c.machineNumber)
if err != nil {
return errors.Trace(err)
}
err = c.catacomb.Add(uw)
if err != nil {
return errors.Trace(err)
}
for {
select {
case <-c.catacomb.Dying():
return c.catacomb.ErrDying()
case <-uw.Changes():
err = c.handleUpgradeSeriesChange(ctx, wid)
if err != nil {
return errors.Trace(err)
}
}
}
}
}
|
go
|
{
"resource": ""
}
|
q4112
|
UpgradeSeriesComplete
|
train
|
func (c *upgradeSeriesCommand) UpgradeSeriesComplete(ctx *cmd.Context) error {
apiRoot, err := c.ensureAPIClient()
if err != nil {
return errors.Trace(err)
}
if apiRoot != nil {
defer apiRoot.Close()
}
if err := c.upgradeMachineSeriesClient.UpgradeSeriesComplete(c.machineNumber); err != nil {
return errors.Trace(err)
}
if err := c.handleNotifications(ctx); err != nil {
return errors.Trace(err)
}
m := UpgradeSeriesCompleteFinishedMessage + "\n"
ctx.Infof(m, c.machineNumber)
return nil
}
|
go
|
{
"resource": ""
}
|
q4113
|
Status
|
train
|
func (u *UnitAgent) Status() (status.StatusInfo, error) {
info, err := getStatus(u.st.db(), u.globalKey(), "agent")
if err != nil {
return status.StatusInfo{}, errors.Trace(err)
}
// The current health spec says when a hook error occurs, the workload should
// be in error state, but the state model more correctly records the agent
// itself as being in error. So we'll do that model translation here.
// TODO(fwereade): this should absolutely not be happpening in the model.
// TODO: when fixed, also fix code in status.go for UnitAgent.
if info.Status == status.Error {
return status.StatusInfo{
Status: status.Idle,
Message: "",
Data: map[string]interface{}{},
Since: info.Since,
}, nil
}
return info, nil
}
|
go
|
{
"resource": ""
}
|
q4114
|
SetStatus
|
train
|
func (u *UnitAgent) SetStatus(unitAgentStatus status.StatusInfo) (err error) {
unit, err := u.st.Unit(u.name)
if errors.IsNotFound(err) {
return errors.Annotate(errors.NotFoundf("agent"), "cannot set status")
}
if err != nil {
return errors.Trace(err)
}
isAssigned := unit.doc.MachineId != ""
shouldBeAssigned := unit.ShouldBeAssigned()
isPrincipal := unit.doc.Principal == ""
switch unitAgentStatus.Status {
case status.Idle, status.Executing, status.Rebooting, status.Failed:
if !isAssigned && isPrincipal && shouldBeAssigned {
return errors.Errorf("cannot set status %q until unit is assigned", unitAgentStatus.Status)
}
case status.Error:
if unitAgentStatus.Message == "" {
return errors.Errorf("cannot set status %q without info", unitAgentStatus.Status)
}
case status.Allocating:
if isAssigned {
return errors.Errorf("cannot set status %q as unit is already assigned", unitAgentStatus.Status)
}
case status.Running:
// Only CAAS units (those that require assignment) can have a status of running.
if shouldBeAssigned {
return errors.Errorf("cannot set invalid status %q", unitAgentStatus.Status)
}
case status.Lost:
return errors.Errorf("cannot set status %q", unitAgentStatus.Status)
default:
return errors.Errorf("cannot set invalid status %q", unitAgentStatus.Status)
}
return setStatus(u.st.db(), setStatusParams{
badge: "agent",
globalKey: u.globalKey(),
status: unitAgentStatus.Status,
message: unitAgentStatus.Message,
rawData: unitAgentStatus.Data,
updated: timeOrNow(unitAgentStatus.Since, u.st.clock()),
})
}
|
go
|
{
"resource": ""
}
|
q4115
|
Path
|
train
|
func (ns *NetworkSpec) Path() string {
name := ns.Name
if name == "" {
name = networkDefaultName
}
return networkPathRoot + name
}
|
go
|
{
"resource": ""
}
|
q4116
|
newInterface
|
train
|
func (ns *NetworkSpec) newInterface(name string) *compute.NetworkInterface {
var access []*compute.AccessConfig
if name != "" {
// This interface has an internet connection.
access = append(access, &compute.AccessConfig{
Name: name,
Type: NetworkAccessOneToOneNAT,
// NatIP (only set if using a reserved public IP)
})
// TODO(ericsnow) Will we need to support more access configs?
}
return &compute.NetworkInterface{
Network: ns.Path(),
AccessConfigs: access,
}
}
|
go
|
{
"resource": ""
}
|
q4117
|
firewallSpec
|
train
|
func firewallSpec(name, target string, sourceCIDRs []string, ports protocolPorts) *compute.Firewall {
if len(sourceCIDRs) == 0 {
sourceCIDRs = []string{"0.0.0.0/0"}
}
firewall := compute.Firewall{
// Allowed is set below.
// Description is not set.
Name: name,
// Network: (defaults to global)
// SourceTags is not set.
TargetTags: []string{target},
SourceRanges: sourceCIDRs,
}
var sortedProtocols []string
for protocol := range ports {
sortedProtocols = append(sortedProtocols, protocol)
}
sort.Strings(sortedProtocols)
for _, protocol := range sortedProtocols {
allowed := compute.FirewallAllowed{
IPProtocol: protocol,
Ports: ports.portStrings(protocol),
}
firewall.Allowed = append(firewall.Allowed, &allowed)
}
return &firewall
}
|
go
|
{
"resource": ""
}
|
q4118
|
ModelStatus
|
train
|
func (c *ModelStatusAPI) ModelStatus(req params.Entities) (params.ModelStatusResults, error) {
models := req.Entities
status := make([]params.ModelStatus, len(models))
for i, model := range models {
modelStatus, err := c.modelStatus(model.Tag)
if err != nil {
status[i].Error = ServerError(err)
continue
}
status[i] = modelStatus
}
return params.ModelStatusResults{Results: status}, nil
}
|
go
|
{
"resource": ""
}
|
q4119
|
ModelFilesystemInfo
|
train
|
func ModelFilesystemInfo(in []state.Filesystem) []params.ModelFilesystemInfo {
out := make([]params.ModelFilesystemInfo, len(in))
for i, in := range in {
var statusString string
status, err := in.Status()
if err != nil {
statusString = err.Error()
} else {
statusString = string(status.Status)
}
var providerId string
if info, err := in.Info(); err == nil {
providerId = info.FilesystemId
}
out[i] = params.ModelFilesystemInfo{
Id: in.Tag().Id(),
ProviderId: providerId,
Status: statusString,
Message: status.Message,
Detachable: in.Detachable(),
}
}
return out
}
|
go
|
{
"resource": ""
}
|
q4120
|
ModelVolumeInfo
|
train
|
func ModelVolumeInfo(in []state.Volume) []params.ModelVolumeInfo {
out := make([]params.ModelVolumeInfo, len(in))
for i, in := range in {
var statusString string
status, err := in.Status()
if err != nil {
statusString = err.Error()
} else {
statusString = string(status.Status)
}
var providerId string
if info, err := in.Info(); err == nil {
providerId = info.VolumeId
}
out[i] = params.ModelVolumeInfo{
Id: in.Tag().Id(),
ProviderId: providerId,
Status: statusString,
Message: status.Message,
Detachable: in.Detachable(),
}
}
return out
}
|
go
|
{
"resource": ""
}
|
q4121
|
NewRelationIdValue
|
train
|
func NewRelationIdValue(ctx Context, result *int) (*relationIdValue, error) {
v := &relationIdValue{result: result, ctx: ctx}
id := -1
if r, err := ctx.HookRelation(); err == nil {
id = r.Id()
v.value = r.FakeId()
} else if !errors.IsNotFound(err) {
return nil, errors.Trace(err)
}
*result = id
return v, nil
}
|
go
|
{
"resource": ""
}
|
q4122
|
Set
|
train
|
func (v *relationIdValue) Set(value string) error {
trim := value
if idx := strings.LastIndex(trim, ":"); idx != -1 {
trim = trim[idx+1:]
}
id, err := strconv.Atoi(trim)
if err != nil {
return fmt.Errorf("invalid relation id")
}
if _, err := v.ctx.Relation(id); err != nil {
return errors.Trace(err)
}
*v.result = id
v.value = value
return nil
}
|
go
|
{
"resource": ""
}
|
q4123
|
FriendlyDuration
|
train
|
func FriendlyDuration(when *time.Time, now time.Time) string {
if when == nil {
return ""
}
return UserFriendlyDuration(*when, now)
}
|
go
|
{
"resource": ""
}
|
q4124
|
ModelInfoFromParams
|
train
|
func ModelInfoFromParams(info params.ModelInfo, now time.Time) (ModelInfo, error) {
ownerTag, err := names.ParseUserTag(info.OwnerTag)
if err != nil {
return ModelInfo{}, errors.Trace(err)
}
cloudTag, err := names.ParseCloudTag(info.CloudTag)
if err != nil {
return ModelInfo{}, errors.Trace(err)
}
modelInfo := ModelInfo{
ShortName: info.Name,
Name: jujuclient.JoinOwnerModelName(ownerTag, info.Name),
Type: model.ModelType(info.Type),
UUID: info.UUID,
ControllerUUID: info.ControllerUUID,
IsController: info.IsController,
Owner: ownerTag.Id(),
Life: string(info.Life),
Cloud: cloudTag.Id(),
CloudRegion: info.CloudRegion,
}
if info.AgentVersion != nil {
modelInfo.AgentVersion = info.AgentVersion.String()
}
// Although this may be more performance intensive, we have to use reflection
// since structs containing map[string]interface {} cannot be compared, i.e
// cannot use simple '==' here.
if !reflect.DeepEqual(info.Status, params.EntityStatus{}) {
modelInfo.Status = &ModelStatus{
Current: info.Status.Status,
Message: info.Status.Info,
Since: FriendlyDuration(info.Status.Since, now),
}
}
if info.Migration != nil {
status := modelInfo.Status
if status == nil {
status = &ModelStatus{}
modelInfo.Status = status
}
status.Migration = info.Migration.Status
status.MigrationStart = FriendlyDuration(info.Migration.Start, now)
status.MigrationEnd = FriendlyDuration(info.Migration.End, now)
}
if info.ProviderType != "" {
modelInfo.ProviderType = info.ProviderType
}
if len(info.Users) != 0 {
modelInfo.Users = ModelUserInfoFromParams(info.Users, now)
}
if len(info.Machines) != 0 {
modelInfo.Machines = ModelMachineInfoFromParams(info.Machines)
}
if info.SLA != nil {
modelInfo.SLA = ModelSLAFromParams(info.SLA)
modelInfo.SLAOwner = ModelSLAOwnerFromParams(info.SLA)
}
if info.CloudCredentialTag != "" {
credTag, err := names.ParseCloudCredentialTag(info.CloudCredentialTag)
if err != nil {
return ModelInfo{}, errors.Trace(err)
}
modelInfo.Credential = &ModelCredential{
Name: credTag.Name(),
Owner: credTag.Owner().Id(),
Cloud: credTag.Cloud().Id(),
}
}
return modelInfo, nil
}
|
go
|
{
"resource": ""
}
|
q4125
|
OwnerQualifiedModelName
|
train
|
func OwnerQualifiedModelName(modelName string, owner, user names.UserTag) string {
if owner.Id() == user.Id() {
return modelName
}
return jujuclient.JoinOwnerModelName(owner, modelName)
}
|
go
|
{
"resource": ""
}
|
q4126
|
NewCharmResourcesCommand
|
train
|
func NewCharmResourcesCommand(resourceLister ResourceLister) modelcmd.ModelCommand {
var c CharmResourcesCommand
c.setResourceLister(resourceLister)
return modelcmd.Wrap(&c)
}
|
go
|
{
"resource": ""
}
|
q4127
|
ListResources
|
train
|
func (c *baseCharmResourcesCommand) ListResources(ids []charmstore.CharmID) ([][]charmresource.Resource, error) {
bakeryClient, err := c.BakeryClient()
if err != nil {
return nil, errors.Trace(err)
}
conAPIRoot, err := c.NewControllerAPIRoot()
if err != nil {
return nil, errors.Trace(err)
}
csURL, err := getCharmStoreAPIURL(conAPIRoot)
if err != nil {
return nil, errors.Trace(err)
}
client, err := charmstore.NewCustomClient(bakeryClient, csURL)
if err != nil {
return nil, errors.Trace(err)
}
return client.ListResources(ids)
}
|
go
|
{
"resource": ""
}
|
q4128
|
Work
|
train
|
func (w *PrunerWorker) Work(getPrunerConfig func(*config.Config) (time.Duration, uint)) error {
modelConfigWatcher, err := w.config.Facade.WatchForModelConfigChanges()
if err != nil {
return errors.Trace(err)
}
err = w.catacomb.Add(modelConfigWatcher)
if err != nil {
return errors.Trace(err)
}
var (
maxAge time.Duration
maxCollectionMB uint
modelConfigChanges = modelConfigWatcher.Changes()
// We will also get an initial event, but need to ensure that event is
// received before doing any pruning.
)
var timer clock.Timer
var timerCh <-chan time.Time
for {
select {
case <-w.catacomb.Dying():
return w.catacomb.ErrDying()
case _, ok := <-modelConfigChanges:
if !ok {
return errors.New("model configuration watcher closed")
}
modelConfig, err := w.config.Facade.ModelConfig()
if err != nil {
return errors.Annotate(err, "cannot load model configuration")
}
newMaxAge, newMaxCollectionMB := getPrunerConfig(modelConfig)
if newMaxAge != maxAge || newMaxCollectionMB != maxCollectionMB {
logger.Infof("status history config: max age: %v, max collection size %dM for %s (%s)",
newMaxAge, newMaxCollectionMB, modelConfig.Name(), modelConfig.UUID())
maxAge = newMaxAge
maxCollectionMB = newMaxCollectionMB
}
if timer == nil {
timer = w.config.Clock.NewTimer(w.config.PruneInterval)
timerCh = timer.Chan()
}
case <-timerCh:
err := w.config.Facade.Prune(maxAge, int(maxCollectionMB))
if err != nil {
return errors.Trace(err)
}
timer.Reset(w.config.PruneInterval)
}
}
}
|
go
|
{
"resource": ""
}
|
q4129
|
finalizeConfig
|
train
|
func finalizeConfig(
provider environs.EnvironProvider,
cloud environs.CloudSpec,
attrs map[string]interface{},
) (*config.Config, error) {
cfg, err := config.New(config.UseDefaults, attrs)
if err != nil {
return nil, errors.Annotate(err, "creating config from values failed")
}
cfg, err = provider.PrepareConfig(environs.PrepareConfigParams{
Cloud: cloud,
Config: cfg,
})
if err != nil {
return nil, errors.Annotate(err, "provider config preparation failed")
}
cfg, err = provider.Validate(cfg, nil)
if err != nil {
return nil, errors.Annotate(err, "provider config validation failed")
}
return cfg, nil
}
|
go
|
{
"resource": ""
}
|
q4130
|
ProvisionMachine
|
train
|
func ProvisionMachine(args manual.ProvisionMachineArgs) (machineId string, err error) {
defer func() {
if machineId != "" && err != nil {
logger.Errorf("provisioning failed, removing machine %v: %v", machineId, err)
if cleanupErr := args.Client.ForceDestroyMachines(machineId); cleanupErr != nil {
logger.Errorf("error cleaning up machine: %s", cleanupErr)
}
machineId = ""
}
}()
// Create the "ubuntu" user and initialise passwordless sudo. We populate
// the ubuntu user's authorized_keys file with the public keys in the current
// user's ~/.ssh directory. The authenticationworker will later update the
// ubuntu user's authorized_keys.
if err = InitUbuntuUser(args.Host, args.User,
args.AuthorizedKeys, args.Stdin, args.Stdout); err != nil {
return "", err
}
machineParams, err := gatherMachineParams(args.Host)
if err != nil {
return "", err
}
// Inform Juju that the machine exists.
machineId, err = manual.RecordMachineInState(args.Client, *machineParams)
if err != nil {
return "", err
}
provisioningScript, err := args.Client.ProvisioningScript(params.ProvisioningScriptParams{
MachineId: machineId,
Nonce: machineParams.Nonce,
DisablePackageCommands: !args.EnableOSRefreshUpdate && !args.EnableOSUpgrade,
})
if err != nil {
logger.Errorf("cannot obtain provisioning script")
return "", err
}
// Finally, provision the machine agent.
err = runProvisionScript(provisioningScript, args.Host, args.Stderr)
if err != nil {
return machineId, err
}
logger.Infof("Provisioned machine %v", machineId)
return machineId, nil
}
|
go
|
{
"resource": ""
}
|
q4131
|
NewCredentialInvalidatorFacade
|
train
|
func NewCredentialInvalidatorFacade(apiCaller base.APICaller) (CredentialAPI, error) {
return credentialvalidator.NewFacade(apiCaller), nil
}
|
go
|
{
"resource": ""
}
|
q4132
|
NewCloudCallContext
|
train
|
func NewCloudCallContext(c CredentialAPI, dying context.Dying) context.ProviderCallContext {
return &context.CloudCallContext{
DyingFunc: dying,
InvalidateCredentialFunc: c.InvalidateModelCredential,
}
}
|
go
|
{
"resource": ""
}
|
q4133
|
NewDisableCommand
|
train
|
func NewDisableCommand() cmd.Command {
return modelcmd.Wrap(&disableCommand{
apiFunc: func(c newAPIRoot) (blockClientAPI, error) {
return getBlockAPI(c)
},
})
}
|
go
|
{
"resource": ""
}
|
q4134
|
NewDeployerAPI
|
train
|
func NewDeployerAPI(
st *state.State,
resources facade.Resources,
authorizer facade.Authorizer,
) (*DeployerAPI, error) {
if !authorizer.AuthMachineAgent() {
return nil, common.ErrPerm
}
getAuthFunc := func() (common.AuthFunc, error) {
// Get all units of the machine and cache them.
thisMachineTag := authorizer.GetAuthTag()
units, err := getAllUnits(st, thisMachineTag)
if err != nil {
return nil, err
}
// Then we just check if the unit is already known.
return func(tag names.Tag) bool {
for _, unit := range units {
// TODO (thumper): remove the names.Tag conversion when gccgo
// implements concrete-type-to-interface comparison correctly.
if names.Tag(names.NewUnitTag(unit)) == tag {
return true
}
}
return false
}, nil
}
getCanWatch := func() (common.AuthFunc, error) {
return authorizer.AuthOwner, nil
}
return &DeployerAPI{
Remover: common.NewRemover(st, true, getAuthFunc),
PasswordChanger: common.NewPasswordChanger(st, getAuthFunc),
LifeGetter: common.NewLifeGetter(st, getAuthFunc),
APIAddresser: common.NewAPIAddresser(st, resources),
UnitsWatcher: common.NewUnitsWatcher(st, resources, getCanWatch),
StatusSetter: common.NewStatusSetter(st, getAuthFunc),
st: st,
resources: resources,
authorizer: authorizer,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4135
|
SetStatus
|
train
|
func (d *DeployerAPI) SetStatus(args params.SetStatus) (params.ErrorResults, error) {
return d.StatusSetter.SetStatus(args)
}
|
go
|
{
"resource": ""
}
|
q4136
|
getAllUnits
|
train
|
func getAllUnits(st *state.State, tag names.Tag) ([]string, error) {
machine, err := st.Machine(tag.Id())
if err != nil {
return nil, err
}
// Start a watcher on machine's units, read the initial event and stop it.
watch := machine.WatchUnits()
defer watch.Stop()
if units, ok := <-watch.Changes(); ok {
return units, nil
}
return nil, fmt.Errorf("cannot obtain units of machine %q: %v", tag, watch.Err())
}
|
go
|
{
"resource": ""
}
|
q4137
|
NewRestoreCommand
|
train
|
func NewRestoreCommand() cmd.Command {
c := &restoreCommand{}
c.getModelStatusAPI = func() (ModelStatusAPI, error) { return c.NewModelManagerAPIClient() }
return modelcmd.Wrap(c)
}
|
go
|
{
"resource": ""
}
|
q4138
|
SetFlags
|
train
|
func (c *restoreCommand) SetFlags(f *gnuflag.FlagSet) {
c.CommandBase.SetFlags(f)
f.StringVar(&c.Filename, "file", "", "Provide a file to be used as the backup")
f.StringVar(&c.BackupId, "id", "", "Provide the name of the backup to be restored")
}
|
go
|
{
"resource": ""
}
|
q4139
|
Init
|
train
|
func (c *restoreCommand) Init(args []string) error {
if c.Filename == "" && c.BackupId == "" {
return errors.Errorf("you must specify either a file or a backup id.")
}
if c.Filename != "" && c.BackupId != "" {
return errors.Errorf("you must specify either a file or a backup id but not both.")
}
if c.Filename != "" {
var err error
c.Filename, err = filepath.Abs(c.Filename)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4140
|
Run
|
train
|
func (c *restoreCommand) Run(ctx *cmd.Context) error {
if err := c.validateIaasController(c.Info().Name); err != nil {
return errors.Trace(err)
}
if c.Log != nil {
if err := c.Log.Start(ctx); err != nil {
return err
}
}
// Don't allow restore in an HA environment
controllerModelUUID, modelStatus, err := c.modelStatus()
if err != nil {
return errors.Trace(err)
}
activeCount, _ := controller.ControllerMachineCounts(controllerModelUUID, modelStatus)
if activeCount > 1 {
return errors.Errorf("unable to restore backup in HA configuration. For help see https://docs.jujucharms.com/stable/controllers-backup")
}
var archive ArchiveReader
var meta *params.BackupsMetadataResult
target := c.BackupId
if c.Filename != "" {
// Read archive specified by the Filename
target = c.Filename
var err error
archive, meta, err = getArchive(c.Filename)
if err != nil {
return errors.Trace(err)
}
defer archive.Close()
}
client, err := c.NewAPIClient()
if err != nil {
return errors.Trace(err)
}
defer client.Close()
// We have a backup client, now use the relevant method
// to restore the backup.
if c.Filename != "" {
err = client.RestoreReader(archive, meta, c.newClient)
} else {
err = client.Restore(c.BackupId, c.newClient)
}
if err != nil {
return errors.Trace(err)
}
fmt.Fprintf(ctx.Stdout, "restore from %q completed\n", target)
return nil
}
|
go
|
{
"resource": ""
}
|
q4141
|
NewWorker
|
train
|
func NewWorker(config WorkerConfig) (worker.Worker, error) {
if err := config.Validate(); err != nil {
return nil, errors.Trace(err)
}
started := make(chan struct{})
w := &agentConfigUpdater{
config: config,
mongoProfile: config.MongoProfile,
}
w.tomb.Go(func() error {
return w.loop(started)
})
select {
case <-started:
case <-time.After(10 * time.Second):
return nil, errors.New("worker failed to start properly")
}
return w, nil
}
|
go
|
{
"resource": ""
}
|
q4142
|
SingularClaimer
|
train
|
func (st *State) SingularClaimer() lease.Claimer {
return lazyLeaseClaimer{func() (lease.Claimer, error) {
manager := st.workers.singularManager()
return manager.Claimer(singularControllerNamespace, st.modelUUID())
}}
}
|
go
|
{
"resource": ""
}
|
q4143
|
counterpartRole
|
train
|
func counterpartRole(r charm.RelationRole) charm.RelationRole {
switch r {
case charm.RoleProvider:
return charm.RoleRequirer
case charm.RoleRequirer:
return charm.RoleProvider
case charm.RolePeer:
return charm.RolePeer
}
panic(fmt.Errorf("unknown relation role %q", r))
}
|
go
|
{
"resource": ""
}
|
q4144
|
CanRelateTo
|
train
|
func (ep Endpoint) CanRelateTo(other Endpoint) bool {
return ep.ApplicationName != other.ApplicationName &&
ep.Interface == other.Interface &&
ep.Role != charm.RolePeer &&
counterpartRole(ep.Role) == other.Role
}
|
go
|
{
"resource": ""
}
|
q4145
|
NewStateFirewallerAPIV3
|
train
|
func NewStateFirewallerAPIV3(context facade.Context) (*FirewallerAPIV3, error) {
st := context.State()
m, err := st.Model()
if err != nil {
return nil, errors.Trace(err)
}
cloudSpecAPI := cloudspec.NewCloudSpec(
context.Resources(),
cloudspec.MakeCloudSpecGetterForModel(st),
cloudspec.MakeCloudSpecWatcherForModel(st),
common.AuthFuncForTag(m.ModelTag()),
)
return NewFirewallerAPI(stateShim{st: st, State: firewall.StateShim(st, m)}, context.Resources(), context.Auth(), cloudSpecAPI)
}
|
go
|
{
"resource": ""
}
|
q4146
|
NewStateFirewallerAPIV4
|
train
|
func NewStateFirewallerAPIV4(context facade.Context) (*FirewallerAPIV4, error) {
facadev3, err := NewStateFirewallerAPIV3(context)
if err != nil {
return nil, err
}
return &FirewallerAPIV4{
ControllerConfigAPI: common.NewStateControllerConfig(context.State()),
FirewallerAPIV3: facadev3,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4147
|
NewStateFirewallerAPIV5
|
train
|
func NewStateFirewallerAPIV5(context facade.Context) (*FirewallerAPIV5, error) {
facadev4, err := NewStateFirewallerAPIV4(context)
if err != nil {
return nil, err
}
return &FirewallerAPIV5{
FirewallerAPIV4: facadev4,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4148
|
NewFirewallerAPI
|
train
|
func NewFirewallerAPI(
st State,
resources facade.Resources,
authorizer facade.Authorizer,
cloudSpecAPI cloudspec.CloudSpecAPI,
) (*FirewallerAPIV3, error) {
if !authorizer.AuthController() {
// Firewaller must run as a controller.
return nil, common.ErrPerm
}
// Set up the various authorization checkers.
accessModel := common.AuthFuncForTagKind(names.ModelTagKind)
accessUnit := common.AuthFuncForTagKind(names.UnitTagKind)
accessApplication := common.AuthFuncForTagKind(names.ApplicationTagKind)
accessMachine := common.AuthFuncForTagKind(names.MachineTagKind)
accessRelation := common.AuthFuncForTagKind(names.RelationTagKind)
accessUnitApplicationOrMachineOrRelation := common.AuthAny(accessUnit, accessApplication, accessMachine, accessRelation)
// Life() is supported for units, applications or machines.
lifeGetter := common.NewLifeGetter(
st,
accessUnitApplicationOrMachineOrRelation,
)
// ModelConfig() and WatchForModelConfigChanges() are allowed
// with unrestricted access.
modelWatcher := common.NewModelWatcher(
st,
resources,
authorizer,
)
// Watch() is supported for applications only.
entityWatcher := common.NewAgentEntityWatcher(
st,
resources,
accessApplication,
)
// WatchUnits() is supported for machines.
unitsWatcher := common.NewUnitsWatcher(st,
resources,
accessMachine,
)
// WatchModelMachines() is allowed with unrestricted access.
machinesWatcher := common.NewModelMachinesWatcher(
st,
resources,
authorizer,
)
// InstanceId() is supported for machines.
instanceIdGetter := common.NewInstanceIdGetter(
st,
accessMachine,
)
return &FirewallerAPIV3{
LifeGetter: lifeGetter,
ModelWatcher: modelWatcher,
AgentEntityWatcher: entityWatcher,
UnitsWatcher: unitsWatcher,
ModelMachinesWatcher: machinesWatcher,
InstanceIdGetter: instanceIdGetter,
CloudSpecAPI: cloudSpecAPI,
st: st,
resources: resources,
authorizer: authorizer,
accessUnit: accessUnit,
accessApplication: accessApplication,
accessMachine: accessMachine,
accessModel: accessModel,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4149
|
WatchOpenedPorts
|
train
|
func (f *FirewallerAPIV3) WatchOpenedPorts(args params.Entities) (params.StringsWatchResults, error) {
result := params.StringsWatchResults{
Results: make([]params.StringsWatchResult, len(args.Entities)),
}
if len(args.Entities) == 0 {
return result, nil
}
canWatch, err := f.accessModel()
if err != nil {
return params.StringsWatchResults{}, errors.Trace(err)
}
for i, entity := range args.Entities {
tag, err := names.ParseTag(entity.Tag)
if err != nil {
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
if !canWatch(tag) {
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
watcherId, initial, err := f.watchOneModelOpenedPorts(tag)
if err != nil {
result.Results[i].Error = common.ServerError(err)
continue
}
result.Results[i].StringsWatcherId = watcherId
result.Results[i].Changes = initial
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4150
|
GetMachinePorts
|
train
|
func (f *FirewallerAPIV3) GetMachinePorts(args params.MachinePortsParams) (params.MachinePortsResults, error) {
result := params.MachinePortsResults{
Results: make([]params.MachinePortsResult, len(args.Params)),
}
canAccess, err := f.accessMachine()
if err != nil {
return params.MachinePortsResults{}, err
}
for i, param := range args.Params {
machineTag, err := names.ParseMachineTag(param.MachineTag)
if err != nil {
result.Results[i].Error = common.ServerError(err)
continue
}
var subnetTag names.SubnetTag
if param.SubnetTag != "" {
subnetTag, err = names.ParseSubnetTag(param.SubnetTag)
if err != nil {
result.Results[i].Error = common.ServerError(err)
continue
}
}
machine, err := f.getMachine(canAccess, machineTag)
if err != nil {
result.Results[i].Error = common.ServerError(err)
continue
}
ports, err := machine.OpenedPorts(subnetTag.Id())
if err != nil {
result.Results[i].Error = common.ServerError(err)
continue
}
if ports != nil {
portRangeMap := ports.AllPortRanges()
var portRanges []network.PortRange
for portRange := range portRangeMap {
portRanges = append(portRanges, portRange)
}
network.SortPortRanges(portRanges)
for _, portRange := range portRanges {
unitTag := names.NewUnitTag(portRangeMap[portRange]).String()
result.Results[i].Ports = append(result.Results[i].Ports,
params.MachinePortRange{
UnitTag: unitTag,
PortRange: params.FromNetworkPortRange(portRange),
})
}
}
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4151
|
GetExposed
|
train
|
func (f *FirewallerAPIV3) GetExposed(args params.Entities) (params.BoolResults, error) {
result := params.BoolResults{
Results: make([]params.BoolResult, len(args.Entities)),
}
canAccess, err := f.accessApplication()
if err != nil {
return params.BoolResults{}, err
}
for i, entity := range args.Entities {
tag, err := names.ParseApplicationTag(entity.Tag)
if err != nil {
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
application, err := f.getApplication(canAccess, tag)
if err == nil {
result.Results[i].Result = application.IsExposed()
}
result.Results[i].Error = common.ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4152
|
WatchIngressAddressesForRelations
|
train
|
func (f *FirewallerAPIV4) WatchIngressAddressesForRelations(relations params.Entities) (params.StringsWatchResults, error) {
results := params.StringsWatchResults{
make([]params.StringsWatchResult, len(relations.Entities)),
}
one := func(tag string) (id string, changes []string, _ error) {
logger.Debugf("Watching ingress addresses for %+v from model %v", tag, f.st.ModelUUID())
relationTag, err := names.ParseRelationTag(tag)
if err != nil {
return "", nil, errors.Trace(err)
}
rel, err := f.st.KeyRelation(relationTag.Id())
if err != nil {
return "", nil, errors.Trace(err)
}
w := rel.WatchRelationIngressNetworks()
changes, ok := <-w.Changes()
if !ok {
return "", nil, common.ServerError(watcher.EnsureErr(w))
}
return f.resources.Register(w), changes, nil
}
for i, e := range relations.Entities {
watcherId, changes, err := one(e.Tag)
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
results.Results[i].StringsWatcherId = watcherId
results.Results[i].Changes = changes
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4153
|
MacaroonForRelations
|
train
|
func (f *FirewallerAPIV4) MacaroonForRelations(args params.Entities) (params.MacaroonResults, error) {
var result params.MacaroonResults
result.Results = make([]params.MacaroonResult, len(args.Entities))
for i, entity := range args.Entities {
relationTag, err := names.ParseRelationTag(entity.Tag)
if err != nil {
result.Results[i].Error = common.ServerError(err)
continue
}
mac, err := f.st.GetMacaroon(relationTag)
if err != nil {
result.Results[i].Error = common.ServerError(err)
continue
}
result.Results[i].Result = mac
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4154
|
SetRelationsStatus
|
train
|
func (f *FirewallerAPIV4) SetRelationsStatus(args params.SetStatus) (params.ErrorResults, error) {
var result params.ErrorResults
result.Results = make([]params.ErrorResult, len(args.Entities))
for i, entity := range args.Entities {
relationTag, err := names.ParseRelationTag(entity.Tag)
if err != nil {
result.Results[i].Error = common.ServerError(err)
continue
}
rel, err := f.st.KeyRelation(relationTag.Id())
if err != nil {
result.Results[i].Error = common.ServerError(err)
continue
}
err = rel.SetStatus(status.StatusInfo{
Status: status.Status(entity.Status),
Message: entity.Info,
})
result.Results[i].Error = common.ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4155
|
FirewallRules
|
train
|
func (f *FirewallerAPIV4) FirewallRules(args params.KnownServiceArgs) (params.ListFirewallRulesResults, error) {
var result params.ListFirewallRulesResults
for _, knownService := range args.KnownServices {
rule, err := f.st.FirewallRule(state.WellKnownServiceType(knownService))
if err != nil && !errors.IsNotFound(err) {
return result, common.ServerError(err)
}
if err != nil {
continue
}
result.Rules = append(result.Rules, params.FirewallRule{
KnownService: knownService,
WhitelistCIDRS: rule.WhitelistCIDRs,
})
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4156
|
NewDBInfo
|
train
|
func NewDBInfo(mgoInfo *mongo.MongoInfo, session DBSession, version mongo.Version) (*DBInfo, error) {
targets, err := getBackupTargetDatabases(session)
if err != nil {
return nil, errors.Trace(err)
}
info := DBInfo{
Address: mgoInfo.Addrs[0],
Password: mgoInfo.Password,
Targets: targets,
MongoVersion: version,
}
// TODO(dfc) Backup should take a Tag.
if mgoInfo.Tag != nil {
info.Username = mgoInfo.Tag.String()
}
return &info, nil
}
|
go
|
{
"resource": ""
}
|
q4157
|
NewDBDumper
|
train
|
func NewDBDumper(info *DBInfo) (DBDumper, error) {
mongodumpPath, err := getMongodumpPath()
if err != nil {
return nil, errors.Annotate(err, "mongodump not available")
}
dumper := mongoDumper{
DBInfo: info,
binPath: mongodumpPath,
}
return &dumper, nil
}
|
go
|
{
"resource": ""
}
|
q4158
|
Dump
|
train
|
func (md *mongoDumper) Dump(baseDumpDir string) error {
if err := md.dump(baseDumpDir); err != nil {
return errors.Trace(err)
}
found, err := listDatabases(baseDumpDir)
if err != nil {
return errors.Trace(err)
}
// Strip the ignored database from the dump dir.
ignored := found.Difference(md.Targets)
// Admin must be removed only if the mongo version is 3.x or
// above, since 2.x will not restore properly without admin.
if md.DBInfo.MongoVersion.NewerThan(mongo.Mongo26) == -1 {
ignored.Remove("admin")
}
err = stripIgnored(ignored, baseDumpDir)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4159
|
listDatabases
|
train
|
func listDatabases(dumpDir string) (set.Strings, error) {
list, err := ioutil.ReadDir(dumpDir)
if err != nil {
return set.Strings{}, errors.Trace(err)
}
databases := make(set.Strings)
for _, info := range list {
if !info.IsDir() {
// Notably, oplog.bson is thus excluded here.
continue
}
databases.Add(info.Name())
}
return databases, nil
}
|
go
|
{
"resource": ""
}
|
q4160
|
NewDBRestorer
|
train
|
func NewDBRestorer(args RestorerArgs) (DBRestorer, error) {
mongorestorePath, err := getMongorestorePath()
if err != nil {
return nil, errors.Annotate(err, "mongorestore not available")
}
installedMongo := mongoInstalledVersion()
logger.Debugf("args: is %#v", args)
logger.Infof("installed mongo is %s", installedMongo)
// NewerThan will check Major and Minor so migration between micro versions
// will work, before changing this beware, Mongo has been known to break
// compatibility between minors.
if args.Version.NewerThan(installedMongo) != 0 {
return nil, errors.NotSupportedf("restore mongo version %s into version %s", args.Version.String(), installedMongo.String())
}
var restorer DBRestorer
mgoRestorer := mongoRestorer{
DialInfo: args.DialInfo,
binPath: mongorestorePath,
tagUser: args.TagUser,
tagUserPassword: args.TagUserPassword,
runCommandFn: args.RunCommandFn,
}
switch args.Version.Major {
case 2:
restorer = &mongoRestorer24{
mongoRestorer: mgoRestorer,
startMongo: args.StartMongo,
stopMongo: args.StopMongo,
}
case 3:
restorer = &mongoRestorer32{
mongoRestorer: mgoRestorer,
getDB: args.GetDB,
newMongoSession: args.NewMongoSession,
}
default:
return nil, errors.Errorf("cannot restore from mongo version %q", args.Version.String())
}
return restorer, nil
}
|
go
|
{
"resource": ""
}
|
q4161
|
ensureOplogPermissions
|
train
|
func (md *mongoRestorer32) ensureOplogPermissions(dialInfo *mgo.DialInfo) error {
s, err := md.newMongoSession(dialInfo)
if err != nil {
return errors.Trace(err)
}
defer s.Close()
roles := bson.D{
{"createRole", "oploger"},
{"privileges", []bson.D{
{
{"resource", bson.M{"anyResource": true}},
{"actions", []string{"anyAction"}},
},
}},
{"roles", []string{}},
}
var mgoErr bson.M
err = s.Run(roles, &mgoErr)
if err != nil && !mgo.IsDup(err) {
return errors.Trace(err)
}
result, ok := mgoErr["ok"]
success, isFloat := result.(float64)
if (!ok || !isFloat || success != 1) && mgoErr != nil && !mgo.IsDup(err) {
return errors.Errorf("could not create special role to replay oplog, result was: %#v", mgoErr)
}
// This will replace old user with the new credentials
admin := md.getDB("admin", s)
grant := bson.D{
{"grantRolesToUser", md.DialInfo.Username},
{"roles", []string{"oploger"}},
}
err = s.Run(grant, &mgoErr)
if err != nil {
return errors.Trace(err)
}
result, ok = mgoErr["ok"]
success, isFloat = result.(float64)
if (!ok || !isFloat || success != 1) && mgoErr != nil {
return errors.Errorf("could not grant special role to %q, result was: %#v", md.DialInfo.Username, mgoErr)
}
grant = bson.D{
{"grantRolesToUser", "admin"},
{"roles", []string{"oploger"}},
}
err = s.Run(grant, &mgoErr)
if err != nil {
return errors.Trace(err)
}
result, ok = mgoErr["ok"]
success, isFloat = result.(float64)
if (!ok || !isFloat || success != 1) && mgoErr != nil {
return errors.Errorf("could not grant special role to \"admin\", result was: %#v", mgoErr)
}
if err := admin.UpsertUser(&mgo.User{
Username: md.DialInfo.Username,
Password: md.DialInfo.Password,
}); err != nil {
return errors.Errorf("cannot set new admin credentials: %v", err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4162
|
NewUniter
|
train
|
func NewUniter(uniterParams *UniterParams) (*Uniter, error) {
startFunc := newUniter(uniterParams)
w, err := startFunc()
return w.(*Uniter), err
}
|
go
|
{
"resource": ""
}
|
q4163
|
StartUniter
|
train
|
func StartUniter(runner *worker.Runner, params *UniterParams) error {
startFunc := newUniter(params)
logger.Debugf("starting uniter for %q", params.UnitTag.Id())
err := runner.StartWorker(params.UnitTag.Id(), startFunc)
return errors.Annotate(err, "error starting uniter worker")
}
|
go
|
{
"resource": ""
}
|
q4164
|
stopUnitError
|
train
|
func (u *Uniter) stopUnitError() error {
logger.Debugf("u.modelType: %s", u.modelType)
if u.modelType == model.CAAS {
return ErrCAASUnitDead
}
return jworker.ErrTerminateAgent
}
|
go
|
{
"resource": ""
}
|
q4165
|
acquireExecutionLock
|
train
|
func (u *Uniter) acquireExecutionLock(action string) (func(), error) {
// We want to make sure we don't block forever when locking, but take the
// Uniter's catacomb into account.
spec := machinelock.Spec{
Cancel: u.catacomb.Dying(),
Worker: "uniter",
Comment: action,
}
releaser, err := u.hookLock.Acquire(spec)
if err != nil {
return nil, errors.Trace(err)
}
return releaser, nil
}
|
go
|
{
"resource": ""
}
|
q4166
|
ReadLegacyCloudCredentials
|
train
|
func ReadLegacyCloudCredentials(readFile func(string) ([]byte, error)) (cloud.Credential, error) {
var (
jujuConfDir = jujupaths.MustSucceed(jujupaths.ConfDir(version.SupportedLTS()))
clientCertPath = path.Join(jujuConfDir, "lxd-client.crt")
clientKeyPath = path.Join(jujuConfDir, "lxd-client.key")
serverCertPath = path.Join(jujuConfDir, "lxd-server.crt")
)
readFileString := func(path string) (string, error) {
data, err := readFile(path)
if err != nil {
if os.IsNotExist(err) {
err = errors.NotFoundf("%s", path)
}
return "", errors.Trace(err)
}
return string(data), nil
}
clientCert, err := readFileString(clientCertPath)
if err != nil {
return cloud.Credential{}, errors.Annotate(err, "reading client certificate")
}
clientKey, err := readFileString(clientKeyPath)
if err != nil {
return cloud.Credential{}, errors.Annotate(err, "reading client key")
}
serverCert, err := readFileString(serverCertPath)
if err != nil {
return cloud.Credential{}, errors.Annotate(err, "reading server certificate")
}
return cloud.NewCredential(cloud.CertificateAuthType, map[string]string{
credAttrServerCert: serverCert,
credAttrClientCert: clientCert,
credAttrClientKey: clientKey,
}), nil
}
|
go
|
{
"resource": ""
}
|
q4167
|
ValidateExistingModelCredential
|
train
|
func ValidateExistingModelCredential(backend PersistentBackend, callCtx context.ProviderCallContext) (params.ErrorResults, error) {
model, err := backend.Model()
if err != nil {
return params.ErrorResults{}, errors.Trace(err)
}
credentialTag, isSet := model.CloudCredential()
if !isSet {
return params.ErrorResults{}, nil
}
storedCredential, err := backend.CloudCredential(credentialTag)
if err != nil {
return params.ErrorResults{}, errors.Trace(err)
}
if !storedCredential.IsValid() {
return params.ErrorResults{}, errors.NotValidf("credential %q", storedCredential.Name)
}
credential := cloud.NewCredential(cloud.AuthType(storedCredential.AuthType), storedCredential.Attributes)
return ValidateNewModelCredential(backend, callCtx, credentialTag, &credential)
}
|
go
|
{
"resource": ""
}
|
q4168
|
ValidateNewModelCredential
|
train
|
func ValidateNewModelCredential(backend PersistentBackend, callCtx context.ProviderCallContext, credentialTag names.CloudCredentialTag, credential *cloud.Credential) (params.ErrorResults, error) {
openParams, err := buildOpenParams(backend, credentialTag, credential)
if err != nil {
return params.ErrorResults{}, errors.Trace(err)
}
model, err := backend.Model()
if err != nil {
return params.ErrorResults{}, errors.Trace(err)
}
switch model.Type() {
case state.ModelTypeCAAS:
return checkCAASModelCredential(openParams)
case state.ModelTypeIAAS:
return checkIAASModelCredential(openParams, backend, callCtx)
default:
return params.ErrorResults{}, errors.NotSupportedf("model type %q", model.Type())
}
}
|
go
|
{
"resource": ""
}
|
q4169
|
checkMachineInstances
|
train
|
func checkMachineInstances(backend PersistentBackend, provider CloudProvider, callCtx context.ProviderCallContext) (params.ErrorResults, error) {
fail := func(original error) (params.ErrorResults, error) {
return params.ErrorResults{}, original
}
// Get machines from state
machines, err := backend.AllMachines()
if err != nil {
return fail(errors.Trace(err))
}
var results []params.ErrorResult
serverError := func(received error) params.ErrorResult {
return params.ErrorResult{Error: common.ServerError(received)}
}
machinesByInstance := make(map[string]string)
for _, machine := range machines {
if machine.IsContainer() {
// Containers don't correspond to instances at the
// provider level.
continue
}
if manual, err := machine.IsManual(); err != nil {
return fail(errors.Trace(err))
} else if manual {
continue
}
instanceId, err := machine.InstanceId()
if errors.IsNotProvisioned(err) {
// Skip over this machine; we wouldn't expect the cloud
// to know about it.
continue
} else if err != nil {
results = append(results, serverError(errors.Annotatef(err, "getting instance id for machine %s", machine.Id())))
continue
}
machinesByInstance[string(instanceId)] = machine.Id()
}
// Check can see all machines' instances
instances, err := provider.AllInstances(callCtx)
if err != nil {
return fail(errors.Trace(err))
}
instanceIds := set.NewStrings()
for _, instance := range instances {
id := string(instance.Id())
instanceIds.Add(id)
if _, found := machinesByInstance[id]; !found {
results = append(results, serverError(errors.Errorf("no machine with instance %q", id)))
}
}
for instanceId, name := range machinesByInstance {
if !instanceIds.Contains(instanceId) {
results = append(results, serverError(errors.Errorf("couldn't find instance %q for machine %s", instanceId, name)))
}
}
return params.ErrorResults{Results: results}, nil
}
|
go
|
{
"resource": ""
}
|
q4170
|
BootstrapEnv
|
train
|
func (dp DefaultProvider) BootstrapEnv(ctx environs.BootstrapContext, callCtx context.ProviderCallContext, args environs.BootstrapParams) (*environs.BootstrapResult, error) {
result, err := Bootstrap(ctx, dp.Env, callCtx, args)
if err != nil {
return nil, errors.Trace(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4171
|
DestroyEnv
|
train
|
func (dp DefaultProvider) DestroyEnv(ctx context.ProviderCallContext) error {
if err := Destroy(dp.Env, ctx); err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4172
|
addPackageCommandsCommon
|
train
|
func addPackageCommandsCommon(
cfg CloudConfig,
packageProxySettings proxy.Settings,
packageMirror string,
addUpdateScripts bool,
addUpgradeScripts bool,
series string,
) {
// Set the package mirror.
cfg.SetPackageMirror(packageMirror)
// For LTS series which need support for the cloud-tools archive,
// we need to enable package-list update regardless of the environ
// setting, otherwise bootstrap or provisioning will fail.
if config.SeriesRequiresCloudArchiveTools(series) && !addUpdateScripts {
addUpdateScripts = true
}
// Bring packages up-to-date.
cfg.SetSystemUpdate(addUpdateScripts)
cfg.SetSystemUpgrade(addUpgradeScripts)
// Always run this step - this is where we install packages that juju
// requires.
cfg.addRequiredPackages()
// TODO(bogdanteleaga): Deal with proxy settings on CentOS
cfg.updateProxySettings(packageProxySettings)
}
|
go
|
{
"resource": ""
}
|
q4173
|
renderScriptCommon
|
train
|
func renderScriptCommon(cfg CloudConfig) (string, error) {
// TODO(axw): 2013-08-23 bug 1215777
// Carry out configuration for ssh-keys-per-user,
// machine-updates-authkeys, using cloud-init config.
//
// We should work with smoser to get a supported
// command in (or next to) cloud-init for manually
// invoking cloud-config. This would address the
// above comment by removing the need to generate a
// script "by hand".
// Bootcmds must be run before anything else,
// as they may affect package installation.
bootcmds := cfg.BootCmds()
// Depending on cfg, potentially add package sources and packages.
pkgcmds, err := cfg.getCommandsForAddingPackages()
if err != nil {
return "", err
}
// Runcmds come last.
runcmds := cfg.RunCmds()
// We prepend "set -xe". This is already in runcmds,
// but added here to avoid relying on that to be
// invariant.
script := []string{"#!/bin/bash", "set -e"}
// We must initialise progress reporting before entering
// the subshell and redirecting stderr.
script = append(script, InitProgressCmd())
stdout, stderr := cfg.Output(OutAll)
script = append(script, "(")
if stderr != "" {
script = append(script, "(")
}
script = append(script, bootcmds...)
script = append(script, pkgcmds...)
script = append(script, runcmds...)
if stderr != "" {
script = append(script, ") "+stdout)
script = append(script, ") "+stderr)
} else {
script = append(script, ") "+stdout+" 2>&1")
}
return strings.Join(script, "\n"), nil
}
|
go
|
{
"resource": ""
}
|
q4174
|
restrictRoot
|
train
|
func restrictRoot(root rpc.Root, check func(string, string) error) *restrictedRoot {
return &restrictedRoot{
Root: root,
check: check,
}
}
|
go
|
{
"resource": ""
}
|
q4175
|
FindMethod
|
train
|
func (r *restrictedRoot) FindMethod(facadeName string, version int, methodName string) (rpcreflect.MethodCaller, error) {
if err := r.check(facadeName, methodName); err != nil {
return nil, err
}
return r.Root.FindMethod(facadeName, version, methodName)
}
|
go
|
{
"resource": ""
}
|
q4176
|
restrictAll
|
train
|
func restrictAll(root rpc.Root, err error) *restrictedRoot {
return restrictRoot(root, func(string, string) error {
return err
})
}
|
go
|
{
"resource": ""
}
|
q4177
|
osVal
|
train
|
func osVal(ser string, valname osVarType) (string, error) {
os, err := series.GetOSFromSeries(ser)
if err != nil {
return "", err
}
switch os {
case jujuos.Windows:
return winVals[valname], nil
default:
return nixVals[valname], nil
}
}
|
go
|
{
"resource": ""
}
|
q4178
|
makeWatcherAPICaller
|
train
|
func makeWatcherAPICaller(caller base.APICaller, facadeName, watcherId string) watcherAPICall {
bestVersion := caller.BestFacadeVersion(facadeName)
return func(request string, result interface{}) error {
return caller.APICall(facadeName, bestVersion,
watcherId, request, nil, &result)
}
}
|
go
|
{
"resource": ""
}
|
q4179
|
init
|
train
|
func (w *commonWatcher) init() {
w.in = make(chan interface{})
if w.newResult == nil {
panic("newResult must be set")
}
if w.call == nil {
panic("call must be set")
}
}
|
go
|
{
"resource": ""
}
|
q4180
|
commonLoop
|
train
|
func (w *commonWatcher) commonLoop() {
defer close(w.in)
var wg sync.WaitGroup
wg.Add(1)
go func() {
// When the watcher has been stopped, we send a Stop request
// to the server, which will remove the watcher and return a
// CodeStopped error to any currently outstanding call to
// Next. If a call to Next happens just after the watcher has
// been stopped, we'll get a CodeNotFound error; Either way
// we'll return, wait for the stop request to complete, and
// the watcher will die with all resources cleaned up.
defer wg.Done()
<-w.tomb.Dying()
if err := w.call("Stop", nil); err != nil {
// Don't log an error if a watcher is stopped due to an agent restart.
if err.Error() != worker.ErrRestartAgent.Error() && err.Error() != rpc.ErrShutdown.Error() {
logger.Errorf("error trying to stop watcher: %v", err)
}
}
}()
wg.Add(1)
go func() {
// Because Next blocks until there are changes, we need to
// call it in a separate goroutine, so the watcher can be
// stopped normally.
defer wg.Done()
for {
result := w.newResult()
err := w.call("Next", &result)
if err != nil {
if params.IsCodeStopped(err) || params.IsCodeNotFound(err) {
if w.tomb.Err() != tomb.ErrStillAlive {
// The watcher has been stopped at the client end, so we're
// expecting one of the above two kinds of error.
// We might see the same errors if the server itself
// has been shut down, in which case we leave them
// untouched.
err = tomb.ErrDying
}
}
// Something went wrong, just report the error and bail out.
w.tomb.Kill(err)
return
}
select {
case <-w.tomb.Dying():
return
case w.in <- result:
// Report back the result we just got.
}
}
}()
wg.Wait()
}
|
go
|
{
"resource": ""
}
|
q4181
|
NewNotifyWatcher
|
train
|
func NewNotifyWatcher(caller base.APICaller, result params.NotifyWatchResult) watcher.NotifyWatcher {
w := ¬ifyWatcher{
caller: caller,
notifyWatcherId: result.NotifyWatcherId,
out: make(chan struct{}),
}
w.tomb.Go(w.loop)
return w
}
|
go
|
{
"resource": ""
}
|
q4182
|
NewRelationStatusWatcher
|
train
|
func NewRelationStatusWatcher(
caller base.APICaller, result params.RelationLifeSuspendedStatusWatchResult,
) watcher.RelationStatusWatcher {
w := &relationStatusWatcher{
caller: caller,
relationStatusWatcherId: result.RelationStatusWatcherId,
out: make(chan []watcher.RelationStatusChange),
}
w.tomb.Go(func() error {
return w.loop(result.Changes)
})
return w
}
|
go
|
{
"resource": ""
}
|
q4183
|
NewOfferStatusWatcher
|
train
|
func NewOfferStatusWatcher(
caller base.APICaller, result params.OfferStatusWatchResult,
) watcher.OfferStatusWatcher {
w := &offerStatusWatcher{
caller: caller,
offerStatusWatcherId: result.OfferStatusWatcherId,
out: make(chan []watcher.OfferStatusChange),
}
w.tomb.Go(func() error {
return w.loop(result.Changes)
})
return w
}
|
go
|
{
"resource": ""
}
|
q4184
|
NewVolumeAttachmentsWatcher
|
train
|
func NewVolumeAttachmentsWatcher(caller base.APICaller, result params.MachineStorageIdsWatchResult) watcher.MachineStorageIdsWatcher {
return newMachineStorageIdsWatcher("VolumeAttachmentsWatcher", caller, result)
}
|
go
|
{
"resource": ""
}
|
q4185
|
NewMigrationStatusWatcher
|
train
|
func NewMigrationStatusWatcher(caller base.APICaller, watcherId string) watcher.MigrationStatusWatcher {
w := &migrationStatusWatcher{
caller: caller,
id: watcherId,
out: make(chan watcher.MigrationStatus),
}
w.tomb.Go(w.loop)
return w
}
|
go
|
{
"resource": ""
}
|
q4186
|
BridgeAndActivate
|
train
|
func BridgeAndActivate(params ActivationParams) (*ActivationResult, error) {
if len(params.Devices) == 0 {
return nil, errors.Errorf("no devices specified")
}
netplan, err := ReadDirectory(params.Directory)
if err != nil {
return nil, err
}
for _, device := range params.Devices {
var deviceId string
deviceId, deviceType, err := netplan.FindDeviceByNameOrMAC(device.DeviceName, device.MACAddress)
if err != nil {
return nil, errors.Trace(err)
}
switch deviceType {
case TypeEthernet:
err = netplan.BridgeEthernetById(deviceId, device.BridgeName)
if err != nil {
return nil, err
}
case TypeBond:
err = netplan.BridgeBondById(deviceId, device.BridgeName)
if err != nil {
return nil, err
}
case TypeVLAN:
err = netplan.BridgeVLANById(deviceId, device.BridgeName)
if err != nil {
return nil, err
}
default:
return nil, errors.Errorf("unable to create bridge for %q, unknown device type %q", deviceId, deviceType)
}
}
_, err = netplan.Write("")
if err != nil {
return nil, err
}
err = netplan.MoveYamlsToBak()
if err != nil {
netplan.Rollback()
return nil, err
}
environ := os.Environ()
// TODO(wpk) 2017-06-21 Is there a way to verify that apply is finished?
// https://bugs.launchpad.net/netplan/+bug/1701436
command := fmt.Sprintf("%snetplan generate && netplan apply && sleep 10", params.RunPrefix)
result, err := scriptrunner.RunCommand(command, environ, params.Clock, params.Timeout)
activationResult := ActivationResult{
Stderr: string(result.Stderr),
Stdout: string(result.Stdout),
Code: result.Code,
}
logger.Debugf("Netplan activation result %q %q %d", result.Stderr, result.Stdout, result.Code)
if err != nil {
netplan.Rollback()
return &activationResult, errors.Errorf("bridge activation error: %s", err)
}
if result.Code != 0 {
netplan.Rollback()
return &activationResult, errors.Errorf("bridge activation error code %d", result.Code)
}
return nil, nil
}
|
go
|
{
"resource": ""
}
|
q4187
|
New
|
train
|
func New(in io.Reader, out, errOut io.Writer) *Pollster {
return &Pollster{
scanner: bufio.NewScanner(byteAtATimeReader{in}),
out: out,
errOut: errOut,
in: in,
}
}
|
go
|
{
"resource": ""
}
|
q4188
|
Select
|
train
|
func (p *Pollster) Select(l List) (string, error) {
return p.SelectVerify(l, VerifyOptions(l.Singular, l.Options, l.Default != ""))
}
|
go
|
{
"resource": ""
}
|
q4189
|
SelectVerify
|
train
|
func (p *Pollster) SelectVerify(l List, verify VerifyFunc) (string, error) {
if err := listTmpl.Execute(p.out, l); err != nil {
return "", err
}
question, err := sprint(selectTmpl, l)
if err != nil {
return "", errors.Trace(err)
}
val, err := QueryVerify(question, p.scanner, p.out, p.errOut, verify)
if err != nil {
return "", errors.Trace(err)
}
if val == "" {
return l.Default, nil
}
return val, nil
}
|
go
|
{
"resource": ""
}
|
q4190
|
Enter
|
train
|
func (p *Pollster) Enter(valueName string) (string, error) {
return p.EnterVerify(valueName, func(s string) (ok bool, msg string, err error) {
return s != "", "", nil
})
}
|
go
|
{
"resource": ""
}
|
q4191
|
EnterPassword
|
train
|
func (p *Pollster) EnterPassword(valueName string) (string, error) {
if f, ok := p.in.(*os.File); ok && terminal.IsTerminal(int(f.Fd())) {
defer fmt.Fprint(p.out, "\n\n")
if _, err := fmt.Fprintf(p.out, "Enter "+valueName+": "); err != nil {
return "", errors.Trace(err)
}
value, err := terminal.ReadPassword(int(f.Fd()))
if err != nil {
return "", errors.Trace(err)
}
return string(value), nil
}
return p.Enter(valueName)
}
|
go
|
{
"resource": ""
}
|
q4192
|
EnterDefault
|
train
|
func (p *Pollster) EnterDefault(valueName, defVal string) (string, error) {
return p.EnterVerifyDefault(valueName, nil, defVal)
}
|
go
|
{
"resource": ""
}
|
q4193
|
EnterOptional
|
train
|
func (p *Pollster) EnterOptional(valueName string) (string, error) {
return QueryVerify("Enter "+valueName+" (optional): ", p.scanner, p.out, p.errOut, nil)
}
|
go
|
{
"resource": ""
}
|
q4194
|
EnterVerifyDefault
|
train
|
func (p *Pollster) EnterVerifyDefault(valueName string, verify VerifyFunc, defVal string) (string, error) {
var verifyDefault VerifyFunc
if verify != nil {
verifyDefault = func(s string) (ok bool, errmsg string, err error) {
if s == "" {
return true, "", nil
}
return verify(s)
}
}
s, err := QueryVerify("Enter "+valueName+" ["+defVal+"]: ", p.scanner, p.out, p.errOut, verifyDefault)
if err != nil {
return "", errors.Trace(err)
}
if s == "" {
return defVal, nil
}
return s, nil
}
|
go
|
{
"resource": ""
}
|
q4195
|
VerifyOptions
|
train
|
func VerifyOptions(singular string, options []string, hasDefault bool) VerifyFunc {
return func(s string) (ok bool, errmsg string, err error) {
if s == "" {
return hasDefault, "", nil
}
for _, opt := range options {
if strings.ToLower(opt) == strings.ToLower(s) {
return true, "", nil
}
}
return false, fmt.Sprintf("Invalid %s: %q", singular, s), nil
}
}
|
go
|
{
"resource": ""
}
|
q4196
|
names
|
train
|
func names(m map[string]*jsonschema.Schema) []string {
ret := make([]string, 0, len(m))
for n := range m {
ret = append(ret, n)
}
sort.Strings(ret)
return ret
}
|
go
|
{
"resource": ""
}
|
q4197
|
convert
|
train
|
func convert(s string, t jsonschema.Type) (interface{}, error) {
switch t {
case jsonschema.IntegerType:
return strconv.Atoi(s)
case jsonschema.NumberType:
return strconv.ParseFloat(s, 64)
case jsonschema.StringType:
return s, nil
case jsonschema.BooleanType:
switch strings.ToLower(s) {
case "y", "yes", "true", "t":
return true, nil
case "n", "no", "false", "f":
return false, nil
default:
return nil, errors.Errorf("unknown value for boolean type: %q", s)
}
default:
return nil, errors.Errorf("don't know how to convert value %q of type %q", s, t)
}
}
|
go
|
{
"resource": ""
}
|
q4198
|
NewUnitAgent
|
train
|
func NewUnitAgent(ctx *cmd.Context, bufferedLogger *logsender.BufferedLogWriter) (*UnitAgent, error) {
prometheusRegistry, err := newPrometheusRegistry()
if err != nil {
return nil, errors.Trace(err)
}
return &UnitAgent{
AgentConf: NewAgentConf(""),
configChangedVal: voyeur.NewValue(true),
ctx: ctx,
dead: make(chan struct{}),
initialUpgradeCheckComplete: gate.NewLock(),
bufferedLogger: bufferedLogger,
prometheusRegistry: prometheusRegistry,
preUpgradeSteps: upgrades.PreUpgradeSteps,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4199
|
Done
|
train
|
func (a *UnitAgent) Done(err error) {
a.errReason = err
close(a.dead)
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.