_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q4200
|
Run
|
train
|
func (a *UnitAgent) Run(ctx *cmd.Context) (err error) {
defer a.Done(err)
if err := a.ReadConfig(a.Tag().String()); err != nil {
return err
}
setupAgentLogging(a.CurrentConfig())
a.runner.StartWorker("api", a.APIWorkers)
err = cmdutil.AgentDone(logger, a.runner.Wait())
return err
}
|
go
|
{
"resource": ""
}
|
q4201
|
APIWorkers
|
train
|
func (a *UnitAgent) APIWorkers() (worker.Worker, error) {
updateAgentConfLogging := func(loggingConfig string) error {
return a.AgentConf.ChangeConfig(func(setter agent.ConfigSetter) error {
setter.SetLoggingConfig(loggingConfig)
return nil
})
}
agentConfig := a.AgentConf.CurrentConfig()
a.upgradeComplete = upgradesteps.NewLock(agentConfig)
machineLock, err := machinelock.New(machinelock.Config{
AgentName: a.Tag().String(),
Clock: clock.WallClock,
Logger: loggo.GetLogger("juju.machinelock"),
LogFilename: agent.MachineLockLogFilename(agentConfig),
})
// There will only be an error if the required configuration
// values are not passed in.
if err != nil {
return nil, errors.Trace(err)
}
manifolds := unitManifolds(unit.ManifoldsConfig{
Agent: agent.APIHostPortsSetter{a},
LogSource: a.bufferedLogger.Logs(),
LeadershipGuarantee: 30 * time.Second,
AgentConfigChanged: a.configChangedVal,
ValidateMigration: a.validateMigration,
PrometheusRegisterer: a.prometheusRegistry,
UpdateLoggerConfig: updateAgentConfLogging,
PreviousAgentVersion: agentConfig.UpgradedToVersion(),
PreUpgradeSteps: a.preUpgradeSteps,
UpgradeStepsLock: a.upgradeComplete,
UpgradeCheckLock: a.initialUpgradeCheckComplete,
MachineLock: machineLock,
})
engine, err := dependency.NewEngine(dependencyEngineConfig())
if err != nil {
return nil, err
}
if err := dependency.Install(engine, manifolds); err != nil {
if err := worker.Stop(engine); err != nil {
logger.Errorf("while stopping engine with bad manifolds: %v", err)
}
return nil, err
}
if err := startIntrospection(introspectionConfig{
Agent: a,
Engine: engine,
NewSocketName: DefaultIntrospectionSocketName,
PrometheusGatherer: a.prometheusRegistry,
MachineLock: machineLock,
WorkerFunc: introspection.NewWorker,
}); err != nil {
// If the introspection worker failed to start, we just log error
// but continue. It is very unlikely to happen in the real world
// as the only issue is connecting to the abstract domain socket
// and the agent is controlled by by the OS to only have one.
logger.Errorf("failed to start introspection worker: %v", err)
}
return engine, nil
}
|
go
|
{
"resource": ""
}
|
q4202
|
NewStateCrossControllerAPI
|
train
|
func NewStateCrossControllerAPI(ctx facade.Context) (*CrossControllerAPI, error) {
st := ctx.State()
return NewCrossControllerAPI(
ctx.Resources(),
func() ([]string, string, error) { return common.StateControllerInfo(st) },
st.WatchAPIHostPortsForClients,
)
}
|
go
|
{
"resource": ""
}
|
q4203
|
NewCrossControllerAPI
|
train
|
func NewCrossControllerAPI(
resources facade.Resources,
localControllerInfo localControllerInfoFunc,
watchLocalControllerInfo watchLocalControllerInfoFunc,
) (*CrossControllerAPI, error) {
return &CrossControllerAPI{
resources: resources,
localControllerInfo: localControllerInfo,
watchLocalControllerInfo: watchLocalControllerInfo,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4204
|
WatchControllerInfo
|
train
|
func (api *CrossControllerAPI) WatchControllerInfo() (params.NotifyWatchResults, error) {
results := params.NotifyWatchResults{
Results: make([]params.NotifyWatchResult, 1),
}
w := api.watchLocalControllerInfo()
if _, ok := <-w.Changes(); !ok {
results.Results[0].Error = common.ServerError(watcher.EnsureErr(w))
return results, nil
}
results.Results[0].NotifyWatcherId = api.resources.Register(w)
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4205
|
ControllerInfo
|
train
|
func (api *CrossControllerAPI) ControllerInfo() (params.ControllerAPIInfoResults, error) {
results := params.ControllerAPIInfoResults{
Results: make([]params.ControllerAPIInfoResult, 1),
}
addrs, caCert, err := api.localControllerInfo()
if err != nil {
results.Results[0].Error = common.ServerError(err)
return results, nil
}
results.Results[0].Addresses = addrs
results.Results[0].CACert = caCert
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4206
|
NewContextAPI
|
train
|
func NewContextAPI(apiClient APIClient, dataDir string) *Context {
return &Context{
apiClient: apiClient,
dataDir: dataDir,
}
}
|
go
|
{
"resource": ""
}
|
q4207
|
Download
|
train
|
func (c *Context) Download(name string) (string, error) {
deps := &contextDeps{
APIClient: c.apiClient,
name: name,
dataDir: c.dataDir,
}
path, err := internal.ContextDownload(deps)
if err != nil {
return "", errors.Trace(err)
}
return path, nil
}
|
go
|
{
"resource": ""
}
|
q4208
|
attachmentLife
|
train
|
func attachmentLife(ctx *context, ids []params.MachineStorageId) (
alive, dying, dead []params.MachineStorageId, _ error,
) {
lifeResults, err := ctx.config.Life.AttachmentLife(ids)
if err != nil {
return nil, nil, nil, errors.Annotate(err, "getting machine attachment life")
}
for i, result := range lifeResults {
life := result.Life
if result.Error != nil {
if !params.IsCodeNotFound(result.Error) {
return nil, nil, nil, errors.Annotatef(
result.Error, "getting life of %s attached to %s",
ids[i].AttachmentTag, ids[i].MachineTag,
)
}
life = params.Dead
}
switch life {
case params.Alive:
alive = append(alive, ids[i])
case params.Dying:
dying = append(dying, ids[i])
case params.Dead:
dead = append(dead, ids[i])
}
}
return alive, dying, dead, nil
}
|
go
|
{
"resource": ""
}
|
q4209
|
removeEntities
|
train
|
func removeEntities(ctx *context, tags []names.Tag) error {
if len(tags) == 0 {
return nil
}
logger.Debugf("removing entities: %v", tags)
errorResults, err := ctx.config.Life.Remove(tags)
if err != nil {
return errors.Annotate(err, "removing storage entities")
}
for i, result := range errorResults {
if result.Error != nil {
return errors.Annotatef(result.Error, "removing %s from state", names.ReadableString(tags[i]))
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4210
|
removeAttachments
|
train
|
func removeAttachments(ctx *context, ids []params.MachineStorageId) error {
if len(ids) == 0 {
return nil
}
errorResults, err := ctx.config.Life.RemoveAttachments(ids)
if err != nil {
return errors.Annotate(err, "removing attachments")
}
for i, result := range errorResults {
if result.Error != nil && !params.IsCodeNotFound(result.Error) {
// ignore not found error.
return errors.Annotatef(
result.Error, "removing attachment of %s to %s from state",
ids[i].AttachmentTag, ids[i].MachineTag,
)
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4211
|
setStatus
|
train
|
func setStatus(ctx *context, statuses []params.EntityStatusArgs) {
if len(statuses) > 0 {
if err := ctx.config.Status.SetStatus(statuses); err != nil {
logger.Errorf("failed to set status: %v", err)
}
}
}
|
go
|
{
"resource": ""
}
|
q4212
|
NewNonCanonicalArchivePaths
|
train
|
func NewNonCanonicalArchivePaths(rootDir string) ArchivePaths {
return ArchivePaths{
ContentDir: filepath.Join(rootDir, contentDir),
FilesBundle: filepath.Join(rootDir, contentDir, filesBundle),
DBDumpDir: filepath.Join(rootDir, contentDir, dbDumpDir),
MetadataFile: filepath.Join(rootDir, contentDir, metadataFile),
}
}
|
go
|
{
"resource": ""
}
|
q4213
|
NewArchiveWorkspaceReader
|
train
|
func NewArchiveWorkspaceReader(archive io.Reader) (*ArchiveWorkspace, error) {
ws, err := newArchiveWorkspace()
if err != nil {
return nil, errors.Trace(err)
}
err = unpackCompressedReader(ws.RootDir, archive)
return ws, errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4214
|
Close
|
train
|
func (ws *ArchiveWorkspace) Close() error {
err := os.RemoveAll(ws.RootDir)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4215
|
UnpackFilesBundle
|
train
|
func (ws *ArchiveWorkspace) UnpackFilesBundle(targetRoot string) error {
tarFile, err := os.Open(ws.FilesBundle)
if err != nil {
return errors.Trace(err)
}
defer tarFile.Close()
err = tar.UntarFiles(tarFile, targetRoot)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4216
|
OpenBundledFile
|
train
|
func (ws *ArchiveWorkspace) OpenBundledFile(filename string) (io.Reader, error) {
if filepath.IsAbs(filename) {
return nil, errors.Errorf("filename must be relative, got %q", filename)
}
tarFile, err := os.Open(ws.FilesBundle)
if err != nil {
return nil, errors.Trace(err)
}
_, file, err := tar.FindFile(tarFile, filename)
if err != nil {
tarFile.Close()
return nil, errors.Trace(err)
}
return file, nil
}
|
go
|
{
"resource": ""
}
|
q4217
|
Metadata
|
train
|
func (ws *ArchiveWorkspace) Metadata() (*Metadata, error) {
metaFile, err := os.Open(ws.MetadataFile)
if err != nil {
return nil, errors.Trace(err)
}
defer metaFile.Close()
meta, err := NewMetadataJSONReader(metaFile)
return meta, errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4218
|
NewArchiveDataReader
|
train
|
func NewArchiveDataReader(r io.Reader) (*ArchiveData, error) {
gzr, err := gzip.NewReader(r)
if err != nil {
return nil, errors.Trace(err)
}
defer gzr.Close()
data, err := ioutil.ReadAll(gzr)
if err != nil {
return nil, errors.Trace(err)
}
return NewArchiveData(data), nil
}
|
go
|
{
"resource": ""
}
|
q4219
|
Metadata
|
train
|
func (ad *ArchiveData) Metadata() (*Metadata, error) {
buf := ad.NewBuffer()
_, metaFile, err := tar.FindFile(buf, ad.MetadataFile)
if err != nil {
return nil, errors.Trace(err)
}
meta, err := NewMetadataJSONReader(metaFile)
return meta, errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4220
|
Version
|
train
|
func (ad *ArchiveData) Version() (*version.Number, error) {
meta, err := ad.Metadata()
if errors.IsNotFound(err) {
return &legacyVersion, nil
}
if err != nil {
return nil, errors.Trace(err)
}
return &meta.Origin.Version, nil
}
|
go
|
{
"resource": ""
}
|
q4221
|
searchHook
|
train
|
func searchHook(charmDir, hook string) (string, error) {
hookFile := filepath.Join(charmDir, hook)
if jujuos.HostOS() != jujuos.Windows {
// we are not running on windows,
// there is no need to look for suffixed hooks
return lookPath(hookFile)
}
for _, suffix := range windowsSuffixOrder {
file := fmt.Sprintf("%s%s", hookFile, suffix)
foundHook, err := lookPath(file)
if err != nil {
if charmrunner.IsMissingHookError(err) {
// look for next suffix
continue
}
return "", err
}
return foundHook, nil
}
return "", charmrunner.NewMissingHookError(hook)
}
|
go
|
{
"resource": ""
}
|
q4222
|
ProvisioningInfo
|
train
|
func (p *ProvisionerAPI) ProvisioningInfo(args params.Entities) (params.ProvisioningInfoResults, error) {
result := params.ProvisioningInfoResults{
Results: make([]params.ProvisioningInfoResult, len(args.Entities)),
}
canAccess, err := p.getAuthFunc()
if err != nil {
return result, errors.Trace(err)
}
env, err := environs.GetEnviron(p.configGetter, environs.New)
if err != nil {
return result, errors.Annotate(err, "could not get environ")
}
for i, entity := range args.Entities {
tag, err := names.ParseMachineTag(entity.Tag)
if err != nil {
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
machine, err := p.getMachine(canAccess, tag)
if err == nil {
result.Results[i].Result, err = p.getProvisioningInfo(machine, env)
}
result.Results[i].Error = common.ServerError(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4223
|
machineTags
|
train
|
func (p *ProvisionerAPI) machineTags(m *state.Machine, jobs []multiwatcher.MachineJob) (map[string]string, error) {
// Names of all units deployed to the machine.
//
// TODO(axw) 2015-06-02 #1461358
// We need a worker that periodically updates
// instance tags with current deployment info.
units, err := m.Units()
if err != nil {
return nil, errors.Trace(err)
}
unitNames := make([]string, 0, len(units))
for _, unit := range units {
if !unit.IsPrincipal() {
continue
}
unitNames = append(unitNames, unit.Name())
}
sort.Strings(unitNames)
cfg, err := p.m.ModelConfig()
if err != nil {
return nil, errors.Trace(err)
}
controllerCfg, err := p.st.ControllerConfig()
if err != nil {
return nil, errors.Trace(err)
}
machineTags := instancecfg.InstanceTags(cfg.UUID(), controllerCfg.ControllerUUID(), cfg, jobs)
if len(unitNames) > 0 {
machineTags[tags.JujuUnitsDeployed] = strings.Join(unitNames, " ")
}
machineId := fmt.Sprintf("%s-%s", cfg.Name(), m.Tag().String())
machineTags[tags.JujuMachine] = machineId
return machineTags, nil
}
|
go
|
{
"resource": ""
}
|
q4224
|
machineSubnetsAndZones
|
train
|
func (p *ProvisionerAPI) machineSubnetsAndZones(m *state.Machine) (map[string][]string, error) {
mcons, err := m.Constraints()
if err != nil {
return nil, errors.Annotate(err, "cannot get machine constraints")
}
includeSpaces := mcons.IncludeSpaces()
if len(includeSpaces) < 1 {
// Nothing to do.
return nil, nil
}
// TODO(dimitern): For the network model MVP we only use the first
// included space and ignore the rest.
//
// LKK Card: https://canonical.leankit.com/Boards/View/101652562/117352306
// LP Bug: http://pad.lv/1498232
spaceName := includeSpaces[0]
if len(includeSpaces) > 1 {
logger.Debugf(
"using space %q from constraints for machine %q (ignoring remaining: %v)",
spaceName, m.Id(), includeSpaces[1:],
)
}
space, err := p.st.Space(spaceName)
if err != nil {
return nil, errors.Trace(err)
}
subnets, err := space.Subnets()
if err != nil {
return nil, errors.Trace(err)
}
if len(subnets) == 0 {
return nil, errors.Errorf("cannot use space %q as deployment target: no subnets", spaceName)
}
subnetsToZones := make(map[string][]string, len(subnets))
for _, subnet := range subnets {
warningPrefix := fmt.Sprintf(
"not using subnet %q in space %q for machine %q provisioning: ",
subnet.CIDR(), spaceName, m.Id(),
)
providerId := subnet.ProviderId()
if providerId == "" {
logger.Warningf(warningPrefix + "no ProviderId set")
continue
}
// TODO(dimitern): Once state.Subnet supports multiple zones,
// use all of them below.
//
// LKK Card: https://canonical.leankit.com/Boards/View/101652562/119979611
zone := subnet.AvailabilityZone()
if zone == "" {
logger.Warningf(warningPrefix + "no availability zone(s) set")
continue
}
subnetsToZones[string(providerId)] = []string{zone}
}
return subnetsToZones, nil
}
|
go
|
{
"resource": ""
}
|
q4225
|
machineLXDProfileNames
|
train
|
func (p *ProvisionerAPI) machineLXDProfileNames(m *state.Machine, env environs.Environ) ([]string, error) {
profileEnv, ok := env.(environs.LXDProfiler)
if !ok {
logger.Tracef("LXDProfiler not implemented by environ")
return nil, nil
}
units, err := m.Units()
if err != nil {
return nil, errors.Trace(err)
}
var names []string
for _, unit := range units {
app, err := unit.Application()
if err != nil {
return nil, errors.Trace(err)
}
ch, _, err := app.Charm()
if err != nil {
return nil, errors.Trace(err)
}
profile := ch.LXDProfile()
if profile == nil || (profile != nil && profile.Empty()) {
continue
}
pName := lxdprofile.Name(p.m.Name(), app.Name(), ch.Revision())
// Lock here, we get a new env for every call to ProvisioningInfo().
p.mu.Lock()
if err := profileEnv.MaybeWriteLXDProfile(pName, profile); err != nil {
p.mu.Unlock()
return nil, errors.Trace(err)
}
p.mu.Unlock()
names = append(names, pName)
}
return names, nil
}
|
go
|
{
"resource": ""
}
|
q4226
|
availableImageMetadata
|
train
|
func (p *ProvisionerAPI) availableImageMetadata(m *state.Machine, env environs.Environ) ([]params.CloudImageMetadata, error) {
imageConstraint, err := p.constructImageConstraint(m, env)
if err != nil {
return nil, errors.Annotate(err, "could not construct image constraint")
}
// Look for image metadata in state.
data, err := p.findImageMetadata(imageConstraint, env)
if err != nil {
return nil, errors.Trace(err)
}
sort.Sort(metadataList(data))
logger.Debugf("available image metadata for provisioning: %v", data)
return data, nil
}
|
go
|
{
"resource": ""
}
|
q4227
|
constructImageConstraint
|
train
|
func (p *ProvisionerAPI) constructImageConstraint(m *state.Machine, env environs.Environ) (*imagemetadata.ImageConstraint, error) {
lookup := simplestreams.LookupParams{
Series: []string{m.Series()},
Stream: env.Config().ImageStream(),
}
mcons, err := m.Constraints()
if err != nil {
return nil, errors.Annotatef(err, "cannot get machine constraints for machine %v", m.MachineTag().Id())
}
if mcons.Arch != nil {
lookup.Arches = []string{*mcons.Arch}
}
if hasRegion, ok := env.(simplestreams.HasRegion); ok {
// We can determine current region; we want only
// metadata specific to this region.
spec, err := hasRegion.Region()
if err != nil {
// can't really find images if we cannot determine cloud region
// TODO (anastasiamac 2015-12-03) or can we?
return nil, errors.Annotate(err, "getting provider region information (cloud spec)")
}
lookup.CloudSpec = spec
}
return imagemetadata.NewImageConstraint(lookup), nil
}
|
go
|
{
"resource": ""
}
|
q4228
|
findImageMetadata
|
train
|
func (p *ProvisionerAPI) findImageMetadata(imageConstraint *imagemetadata.ImageConstraint, env environs.Environ) ([]params.CloudImageMetadata, error) {
// Look for image metadata in state.
stateMetadata, err := p.imageMetadataFromState(imageConstraint)
if err != nil && !errors.IsNotFound(err) {
// look into simple stream if for some reason can't get from controller,
// so do not exit on error.
logger.Infof("could not get image metadata from controller: %v", err)
}
logger.Debugf("got from controller %d metadata", len(stateMetadata))
// No need to look in data sources if found in state.
if len(stateMetadata) != 0 {
return stateMetadata, nil
}
// If no metadata is found in state, fall back to original simple stream search.
// Currently, an image metadata worker picks up this metadata periodically (daily),
// and stores it in state. So potentially, this collection could be different
// to what is in state.
dsMetadata, err := p.imageMetadataFromDataSources(env, imageConstraint)
if err != nil {
if !errors.IsNotFound(err) {
return nil, errors.Trace(err)
}
}
logger.Debugf("got from data sources %d metadata", len(dsMetadata))
return dsMetadata, nil
}
|
go
|
{
"resource": ""
}
|
q4229
|
imageMetadataFromState
|
train
|
func (p *ProvisionerAPI) imageMetadataFromState(constraint *imagemetadata.ImageConstraint) ([]params.CloudImageMetadata, error) {
filter := cloudimagemetadata.MetadataFilter{
Series: constraint.Series,
Arches: constraint.Arches,
Region: constraint.Region,
Stream: constraint.Stream,
}
stored, err := p.st.CloudImageMetadataStorage.FindMetadata(filter)
if err != nil {
return nil, errors.Trace(err)
}
toParams := func(m cloudimagemetadata.Metadata) params.CloudImageMetadata {
return params.CloudImageMetadata{
ImageId: m.ImageId,
Stream: m.Stream,
Region: m.Region,
Version: m.Version,
Series: m.Series,
Arch: m.Arch,
VirtType: m.VirtType,
RootStorageType: m.RootStorageType,
RootStorageSize: m.RootStorageSize,
Source: m.Source,
Priority: m.Priority,
}
}
var all []params.CloudImageMetadata
for _, ms := range stored {
for _, m := range ms {
all = append(all, toParams(m))
}
}
return all, nil
}
|
go
|
{
"resource": ""
}
|
q4230
|
imageMetadataFromDataSources
|
train
|
func (p *ProvisionerAPI) imageMetadataFromDataSources(env environs.Environ, constraint *imagemetadata.ImageConstraint) ([]params.CloudImageMetadata, error) {
sources, err := environs.ImageMetadataSources(env)
if err != nil {
return nil, errors.Trace(err)
}
cfg := env.Config()
toModel := func(m *imagemetadata.ImageMetadata, mSeries string, source string, priority int) cloudimagemetadata.Metadata {
result := cloudimagemetadata.Metadata{
MetadataAttributes: cloudimagemetadata.MetadataAttributes{
Region: m.RegionName,
Arch: m.Arch,
VirtType: m.VirtType,
RootStorageType: m.Storage,
Source: source,
Series: mSeries,
Stream: m.Stream,
Version: m.Version,
},
Priority: priority,
ImageId: m.Id,
}
// TODO (anastasiamac 2016-08-24) This is a band-aid solution.
// Once correct value is read from simplestreams, this needs to go.
// Bug# 1616295
if result.Stream == "" {
result.Stream = constraint.Stream
}
if result.Stream == "" {
result.Stream = cfg.ImageStream()
}
return result
}
var metadataState []cloudimagemetadata.Metadata
for _, source := range sources {
logger.Debugf("looking in data source %v", source.Description())
found, info, err := imagemetadata.Fetch([]simplestreams.DataSource{source}, constraint)
if err != nil {
// Do not stop looking in other data sources if there is an issue here.
logger.Warningf("encountered %v while getting published images metadata from %v", err, source.Description())
continue
}
for _, m := range found {
mSeries, err := series.VersionSeries(m.Version)
if err != nil {
logger.Warningf("could not determine series for image id %s: %v", m.Id, err)
continue
}
metadataState = append(metadataState, toModel(m, mSeries, info.Source, source.Priority()))
}
}
if len(metadataState) > 0 {
if err := p.st.CloudImageMetadataStorage.SaveMetadata(metadataState); err != nil {
// No need to react here, just take note
logger.Warningf("failed to save published image metadata: %v", err)
}
}
// Since we've fallen through to data sources search and have saved all needed images into controller,
// let's try to get them from controller to avoid duplication of conversion logic here.
all, err := p.imageMetadataFromState(constraint)
if err != nil {
return nil, errors.Annotate(err, "could not read metadata from controller after saving it there from data sources")
}
if len(all) == 0 {
return nil, errors.NotFoundf("image metadata for series %v, arch %v", constraint.Series, constraint.Arches)
}
return all, nil
}
|
go
|
{
"resource": ""
}
|
q4231
|
Less
|
train
|
func (m metadataList) Less(i, j int) bool {
return m[i].Priority < m[j].Priority
}
|
go
|
{
"resource": ""
}
|
q4232
|
NewHTTPBlobOpener
|
train
|
func NewHTTPBlobOpener(hostnameVerification utils.SSLHostnameVerification) func(*url.URL) (io.ReadCloser, error) {
return func(url *url.URL) (io.ReadCloser, error) {
// TODO(rog) make the download operation interruptible.
client := utils.GetHTTPClient(hostnameVerification)
resp, err := client.Get(url.String())
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
// resp.Body is always non-nil. (see https://golang.org/pkg/net/http/#Response)
resp.Body.Close()
return nil, errors.Errorf("bad http response: %v", resp.Status)
}
return resp.Body, nil
}
}
|
go
|
{
"resource": ""
}
|
q4233
|
NewSha256Verifier
|
train
|
func NewSha256Verifier(expected string) func(*os.File) error {
return func(file *os.File) error {
actual, _, err := utils.ReadSHA256(file)
if err != nil {
return errors.Trace(err)
}
if actual != expected {
err := errors.Errorf("expected sha256 %q, got %q", expected, actual)
return errors.NewNotValid(err, "")
}
return nil
}
}
|
go
|
{
"resource": ""
}
|
q4234
|
Refresh
|
train
|
func (s *Application) Refresh() error {
life, err := s.st.life(s.tag)
if err != nil {
return err
}
s.life = life
return nil
}
|
go
|
{
"resource": ""
}
|
q4235
|
CharmModifiedVersion
|
train
|
func (s *Application) CharmModifiedVersion() (int, error) {
var results params.IntResults
args := params.Entities{
Entities: []params.Entity{{Tag: s.tag.String()}},
}
err := s.st.facade.FacadeCall("CharmModifiedVersion", args, &results)
if err != nil {
return -1, err
}
if len(results.Results) != 1 {
return -1, fmt.Errorf("expected 1 result, got %d", len(results.Results))
}
result := results.Results[0]
if result.Error != nil {
return -1, result.Error
}
return result.Result, nil
}
|
go
|
{
"resource": ""
}
|
q4236
|
SetStatus
|
train
|
func (s *Application) SetStatus(unitName string, appStatus status.Status, info string, data map[string]interface{}) error {
tag := names.NewUnitTag(unitName)
var result params.ErrorResults
args := params.SetStatus{
Entities: []params.EntityStatusArgs{
{
Tag: tag.String(),
Status: appStatus.String(),
Info: info,
Data: data,
},
},
}
err := s.st.facade.FacadeCall("SetApplicationStatus", args, &result)
if err != nil {
return errors.Trace(err)
}
return result.OneError()
}
|
go
|
{
"resource": ""
}
|
q4237
|
Status
|
train
|
func (s *Application) Status(unitName string) (params.ApplicationStatusResult, error) {
tag := names.NewUnitTag(unitName)
var results params.ApplicationStatusResults
args := params.Entities{
Entities: []params.Entity{
{
Tag: tag.String(),
},
},
}
err := s.st.facade.FacadeCall("ApplicationStatus", args, &results)
if err != nil {
return params.ApplicationStatusResult{}, errors.Trace(err)
}
result := results.Results[0]
if result.Error != nil {
return params.ApplicationStatusResult{}, result.Error
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4238
|
WatchLeadershipSettings
|
train
|
func (s *Application) WatchLeadershipSettings() (watcher.NotifyWatcher, error) {
return s.st.LeadershipSettings.WatchLeadershipSettings(s.tag.Id())
}
|
go
|
{
"resource": ""
}
|
q4239
|
ParseIngressRules
|
train
|
func ParseIngressRules(r io.Reader) ([]network.IngressRule, error) {
var rules []network.IngressRule
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
rule, ok, err := parseIngressRule(strings.TrimSpace(line))
if err != nil {
logger.Warningf("failed to parse iptables line %q: %v", line, err)
continue
}
if !ok {
continue
}
rules = append(rules, rule)
}
if err := scanner.Err(); err != nil {
return nil, errors.Annotate(err, "reading iptables output")
}
return rules, nil
}
|
go
|
{
"resource": ""
}
|
q4240
|
popField
|
train
|
func popField(s string) (field, remainder string, ok bool) {
i := strings.IndexRune(s, ' ')
if i == -1 {
return s, "", s != ""
}
field, remainder = s[:i], strings.TrimLeft(s[i+1:], " ")
return field, remainder, true
}
|
go
|
{
"resource": ""
}
|
q4241
|
OAuthConfig
|
train
|
func OAuthConfig(
sdkCtx context.Context,
client subscriptions.Client,
resourceManagerEndpoint string,
subscriptionId string,
) (*adal.OAuthConfig, string, error) {
authURI, err := DiscoverAuthorizationURI(sdkCtx, client, subscriptionId)
if err != nil {
return nil, "", errors.Annotate(err, "detecting auth URI")
}
logger.Debugf("discovered auth URI: %s", authURI)
// The authorization URI scheme and host identifies the AD endpoint.
// The authorization URI path identifies the AD tenant.
tenantId, err := AuthorizationURITenantID(authURI)
if err != nil {
return nil, "", errors.Annotate(err, "getting tenant ID")
}
authURI.Path = ""
adEndpoint := authURI.String()
oauthConfig, err := adal.NewOAuthConfig(adEndpoint, tenantId)
if err != nil {
return nil, "", errors.Annotate(err, "getting OAuth configuration")
}
return oauthConfig, tenantId, nil
}
|
go
|
{
"resource": ""
}
|
q4242
|
newEnviron
|
train
|
func newEnviron(cloud environs.CloudSpec, cfg *config.Config) (*joyentEnviron, error) {
env := &joyentEnviron{
name: cfg.Name(),
cloud: cloud,
}
if err := env.SetConfig(cfg); err != nil {
return nil, err
}
var err error
env.compute, err = newCompute(cloud)
if err != nil {
return nil, err
}
return env, nil
}
|
go
|
{
"resource": ""
}
|
q4243
|
GetLastSent
|
train
|
func (c LastSentClient) GetLastSent(ids []LastSentID) ([]LastSentResult, error) {
var args params.LogForwardingGetLastSentParams
args.IDs = make([]params.LogForwardingID, len(ids))
for i, id := range ids {
args.IDs[i] = params.LogForwardingID{
ModelTag: id.Model.String(),
Sink: id.Sink,
}
}
var apiResults params.LogForwardingGetLastSentResults
err := c.caller.FacadeCall("GetLastSent", args, &apiResults)
if err != nil {
return nil, errors.Trace(err)
}
results := make([]LastSentResult, len(ids))
for i, apiRes := range apiResults.Results {
results[i] = LastSentResult{
LastSentInfo: LastSentInfo{
LastSentID: ids[i],
RecordID: apiRes.RecordID,
},
Error: common.RestoreError(apiRes.Error),
}
if apiRes.RecordTimestamp > 0 {
results[i].RecordTimestamp = time.Unix(0, apiRes.RecordTimestamp)
}
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4244
|
SetLastSent
|
train
|
func (c LastSentClient) SetLastSent(reqs []LastSentInfo) ([]LastSentResult, error) {
var args params.LogForwardingSetLastSentParams
args.Params = make([]params.LogForwardingSetLastSentParam, len(reqs))
for i, req := range reqs {
args.Params[i] = params.LogForwardingSetLastSentParam{
LogForwardingID: params.LogForwardingID{
ModelTag: req.Model.String(),
Sink: req.Sink,
},
RecordID: req.RecordID,
RecordTimestamp: req.RecordTimestamp.UnixNano(),
}
}
var apiResults params.ErrorResults
err := c.caller.FacadeCall("SetLastSent", args, &apiResults)
if err != nil {
return nil, errors.Trace(err)
}
results := make([]LastSentResult, len(reqs))
for i, apiRes := range apiResults.Results {
results[i] = LastSentResult{
LastSentInfo: reqs[i],
Error: common.RestoreError(apiRes.Error),
}
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4245
|
NewReader
|
train
|
func NewReader(config ReaderConfig) (*Reader, error) {
if err := config.validate(); err != nil {
return nil, errors.Trace(err)
}
r := &Reader{config: config}
return r, nil
}
|
go
|
{
"resource": ""
}
|
q4246
|
Now
|
train
|
func (r *Reader) Now() (time.Time, error) {
coll, closer := r.config.Mongo.GetCollection(r.config.Collection)
defer closer()
t, err := readClock(coll)
if errors.Cause(err) == mgo.ErrNotFound {
// No time written yet. When it is written
// for the first time, it'll be globalEpoch.
t = globalEpoch
} else if err != nil {
return time.Time{}, errors.Trace(err)
}
return t, nil
}
|
go
|
{
"resource": ""
}
|
q4247
|
ModelStatus
|
train
|
func (c *ModelStatusAPI) ModelStatus(tags ...names.ModelTag) ([]base.ModelStatus, error) {
result := params.ModelStatusResults{}
models := make([]params.Entity, len(tags))
for i, tag := range tags {
models[i] = params.Entity{Tag: tag.String()}
}
req := params.Entities{
Entities: models,
}
if err := c.facade.FacadeCall("ModelStatus", req, &result); err != nil {
return nil, err
}
return c.processModelStatusResults(result.Results)
}
|
go
|
{
"resource": ""
}
|
q4248
|
OnlyConnect
|
train
|
func OnlyConnect(a agent.Agent, apiOpen api.OpenFunc) (api.Connection, error) {
agentConfig := a.CurrentConfig()
info, ok := agentConfig.APIInfo()
if !ok {
return nil, errors.New("API info not available")
}
conn, _, err := connectFallback(apiOpen, info, agentConfig.OldPassword())
if err != nil {
return nil, errors.Trace(err)
}
return conn, nil
}
|
go
|
{
"resource": ""
}
|
q4249
|
NewExternalControllerConnection
|
train
|
func NewExternalControllerConnection(apiInfo *api.Info) (api.Connection, error) {
return api.Open(apiInfo, api.DialOpts{
Timeout: 2 * time.Second,
RetryDelay: 500 * time.Millisecond,
})
}
|
go
|
{
"resource": ""
}
|
q4250
|
StoreCharmArchive
|
train
|
func StoreCharmArchive(st State, archive CharmArchive) error {
storage := newStateStorage(st.ModelUUID(), st.MongoSession())
storagePath, err := charmArchiveStoragePath(archive.ID)
if err != nil {
return errors.Annotate(err, "cannot generate charm archive name")
}
if err := storage.Put(storagePath, archive.Data, archive.Size); err != nil {
return errors.Annotate(err, "cannot add charm to storage")
}
info := state.CharmInfo{
Charm: archive.Charm,
ID: archive.ID,
StoragePath: storagePath,
SHA256: archive.SHA256,
Macaroon: archive.Macaroon,
Version: archive.CharmVersion,
}
// Now update the charm data in state and mark it as no longer pending.
_, err = st.UpdateUploadedCharm(info)
if err != nil {
alreadyUploaded := err == state.ErrCharmRevisionAlreadyModified ||
errors.Cause(err) == state.ErrCharmRevisionAlreadyModified ||
state.IsCharmAlreadyUploadedError(err)
if err := storage.Remove(storagePath); err != nil {
if alreadyUploaded {
logger.Errorf("cannot remove duplicated charm archive from storage: %v", err)
} else {
logger.Errorf("cannot remove unsuccessfully recorded charm archive from storage: %v", err)
}
}
if alreadyUploaded {
// Somebody else managed to upload and update the charm in
// state before us. This is not an error.
return nil
}
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4251
|
charmArchiveStoragePath
|
train
|
func charmArchiveStoragePath(curl *charm.URL) (string, error) {
uuid, err := utils.NewUUID()
if err != nil {
return "", err
}
return fmt.Sprintf("charms/%s-%s", curl.String(), uuid), nil
}
|
go
|
{
"resource": ""
}
|
q4252
|
NewAPI
|
train
|
func NewAPI(caller base.APICallCloser) *API {
if caller == nil {
panic("caller is nil")
}
clientFacade, facadeCaller := base.NewClientFacade(caller, subnetsFacade)
return &API{
ClientFacade: clientFacade,
facade: facadeCaller,
}
}
|
go
|
{
"resource": ""
}
|
q4253
|
AddSubnet
|
train
|
func (api *API) AddSubnet(subnet names.SubnetTag, providerId network.Id, space names.SpaceTag, zones []string) error {
var response params.ErrorResults
// Prefer ProviderId when set over CIDR.
subnetTag := subnet.String()
if providerId != "" {
subnetTag = ""
}
params := params.AddSubnetsParams{
Subnets: []params.AddSubnetParams{{
SubnetTag: subnetTag,
SubnetProviderId: string(providerId),
SpaceTag: space.String(),
Zones: zones,
}},
}
err := api.facade.FacadeCall("AddSubnets", params, &response)
if err != nil {
return errors.Trace(err)
}
return response.OneError()
}
|
go
|
{
"resource": ""
}
|
q4254
|
CreateSubnet
|
train
|
func (api *API) CreateSubnet(subnet names.SubnetTag, space names.SpaceTag, zones []string, isPublic bool) error {
var response params.ErrorResults
params := params.CreateSubnetsParams{
Subnets: []params.CreateSubnetParams{{
SubnetTag: subnet.String(),
SpaceTag: space.String(),
Zones: zones,
IsPublic: isPublic,
}},
}
err := api.facade.FacadeCall("CreateSubnets", params, &response)
if err != nil {
return errors.Trace(err)
}
return response.OneError()
}
|
go
|
{
"resource": ""
}
|
q4255
|
ListSubnets
|
train
|
func (api *API) ListSubnets(spaceTag *names.SpaceTag, zone string) ([]params.Subnet, error) {
var response params.ListSubnetsResults
var space string
if spaceTag != nil {
space = spaceTag.String()
}
args := params.SubnetsFilters{
SpaceTag: space,
Zone: zone,
}
err := api.facade.FacadeCall("ListSubnets", args, &response)
if err != nil {
return nil, errors.Trace(err)
}
return response.Results, nil
}
|
go
|
{
"resource": ""
}
|
q4256
|
MongoIndexes
|
train
|
func MongoIndexes() []mgo.Index {
return []mgo.Index{{
Key: []string{"expire-at"},
ExpireAfter: expiryTime,
Sparse: true,
}}
}
|
go
|
{
"resource": ""
}
|
q4257
|
SaveMetadataNoExpiry
|
train
|
func (s *storage) SaveMetadataNoExpiry(metadata []Metadata) error {
return s.saveMetadata(metadata, false)
}
|
go
|
{
"resource": ""
}
|
q4258
|
SaveMetadata
|
train
|
func (s *storage) SaveMetadata(metadata []Metadata) error {
return s.saveMetadata(metadata, true)
}
|
go
|
{
"resource": ""
}
|
q4259
|
DeleteMetadata
|
train
|
func (s *storage) DeleteMetadata(imageId string) error {
deleteOperation := func(docId string) txn.Op {
logger.Debugf("deleting metadata (ID=%v) for image (ID=%v)", docId, imageId)
return txn.Op{
C: s.collection,
Id: docId,
Assert: txn.DocExists,
Remove: true,
}
}
noOp := func() ([]txn.Op, error) {
logger.Debugf("no metadata for image ID %v to delete", imageId)
return nil, jujutxn.ErrNoOperations
}
buildTxn := func(attempt int) ([]txn.Op, error) {
// find all metadata docs with given image id
imageMetadata, err := s.metadataForImageId(imageId)
if err != nil {
if err == mgo.ErrNotFound {
return noOp()
}
return nil, err
}
if len(imageMetadata) == 0 {
return noOp()
}
allTxn := make([]txn.Op, len(imageMetadata))
for i, doc := range imageMetadata {
allTxn[i] = deleteOperation(doc.Id)
}
return allTxn, nil
}
err := s.store.RunTransaction(buildTxn)
if err != nil {
return errors.Annotatef(err, "cannot delete metadata for cloud image %v", imageId)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4260
|
AllCloudImageMetadata
|
train
|
func (s *storage) AllCloudImageMetadata() ([]Metadata, error) {
coll, closer := s.store.GetCollection(s.collection)
defer closer()
results := []Metadata{}
docs := []imagesMetadataDoc{}
err := coll.Find(nil).All(&docs)
if err != nil {
return nil, errors.Annotatef(err, "cannot get all image metadata")
}
for _, doc := range docs {
results = append(results, doc.metadata())
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4261
|
FindMetadata
|
train
|
func (s *storage) FindMetadata(criteria MetadataFilter) (map[string][]Metadata, error) {
coll, closer := s.store.GetCollection(s.collection)
defer closer()
logger.Debugf("searching for image metadata %#v", criteria)
searchCriteria := buildSearchClauses(criteria)
var docs []imagesMetadataDoc
if err := coll.Find(searchCriteria).Sort("date_created").All(&docs); err != nil {
return nil, errors.Trace(err)
}
if len(docs) == 0 {
return nil, errors.NotFoundf("matching cloud image metadata")
}
metadata := make(map[string][]Metadata)
for _, doc := range docs {
one := doc.metadata()
metadata[one.Source] = append(metadata[one.Source], one)
}
return metadata, nil
}
|
go
|
{
"resource": ""
}
|
q4262
|
SupportedArchitectures
|
train
|
func (s *storage) SupportedArchitectures(criteria MetadataFilter) ([]string, error) {
coll, closer := s.store.GetCollection(s.collection)
defer closer()
var arches []string
if err := coll.Find(buildSearchClauses(criteria)).Distinct("arch", &arches); err != nil {
return nil, errors.Trace(err)
}
return arches, nil
}
|
go
|
{
"resource": ""
}
|
q4263
|
putState
|
train
|
func putState(stor storage.StorageWriter, data []byte) error {
logger.Debugf("putting %q to bootstrap storage %T", StateFile, stor)
return stor.Put(StateFile, bytes.NewBuffer(data), int64(len(data)))
}
|
go
|
{
"resource": ""
}
|
q4264
|
CreateStateFile
|
train
|
func CreateStateFile(stor storage.Storage) (string, error) {
err := putState(stor, []byte{})
if err != nil {
return "", fmt.Errorf("cannot create initial state file: %v", err)
}
return stor.URL(StateFile)
}
|
go
|
{
"resource": ""
}
|
q4265
|
SaveState
|
train
|
func SaveState(storage storage.StorageWriter, state *BootstrapState) error {
data, err := goyaml.Marshal(state)
if err != nil {
return err
}
return putState(storage, data)
}
|
go
|
{
"resource": ""
}
|
q4266
|
LoadState
|
train
|
func LoadState(stor storage.StorageReader) (*BootstrapState, error) {
r, err := storage.Get(stor, StateFile)
if err != nil {
if errors.IsNotFound(err) {
return nil, environs.ErrNotBootstrapped
}
return nil, err
}
return loadState(r)
}
|
go
|
{
"resource": ""
}
|
q4267
|
AddStateInstance
|
train
|
func AddStateInstance(stor storage.Storage, id instance.Id) error {
state, err := LoadState(stor)
if err == environs.ErrNotBootstrapped {
state = &BootstrapState{}
} else if err != nil {
return errors.Annotate(err, "cannot record state instance-id")
}
state.StateInstances = append(state.StateInstances, id)
return SaveState(stor, state)
}
|
go
|
{
"resource": ""
}
|
q4268
|
RemoveStateInstances
|
train
|
func RemoveStateInstances(stor storage.Storage, ids ...instance.Id) error {
state, err := LoadState(stor)
if err == environs.ErrNotBootstrapped {
return nil
} else if err != nil {
return errors.Annotate(err, "cannot remove recorded state instance-id")
}
var anyFound bool
for i := 0; i < len(state.StateInstances); i++ {
for _, id := range ids {
if state.StateInstances[i] == id {
head := state.StateInstances[:i]
tail := state.StateInstances[i+1:]
state.StateInstances = append(head, tail...)
anyFound = true
i--
break
}
}
}
if !anyFound {
return nil
}
return SaveState(stor, state)
}
|
go
|
{
"resource": ""
}
|
q4269
|
ProviderStateInstances
|
train
|
func ProviderStateInstances(stor storage.StorageReader) ([]instance.Id, error) {
st, err := LoadState(stor)
if err != nil {
return nil, err
}
return st.StateInstances, nil
}
|
go
|
{
"resource": ""
}
|
q4270
|
NewFacade
|
train
|
func NewFacade(
resources facade.Resources,
authorizer facade.Authorizer,
st CAASFirewallerState,
) (*Facade, error) {
if !authorizer.AuthController() {
return nil, common.ErrPerm
}
accessApplication := common.AuthFuncForTagKind(names.ApplicationTagKind)
return &Facade{
LifeGetter: common.NewLifeGetter(
st, common.AuthAny(
common.AuthFuncForTagKind(names.ApplicationTagKind),
common.AuthFuncForTagKind(names.UnitTagKind),
),
),
AgentEntityWatcher: common.NewAgentEntityWatcher(
st,
resources,
accessApplication,
),
resources: resources,
state: st,
}, nil
}
|
go
|
{
"resource": ""
}
|
q4271
|
IsExposed
|
train
|
func (f *Facade) IsExposed(args params.Entities) (params.BoolResults, error) {
results := params.BoolResults{
Results: make([]params.BoolResult, len(args.Entities)),
}
for i, arg := range args.Entities {
exposed, err := f.isExposed(f.state, arg.Tag)
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
results.Results[i].Result = exposed
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q4272
|
Run
|
train
|
func (c *importKeysCommand) Run(context *cmd.Context) error {
client, err := c.NewKeyManagerClient()
if err != nil {
return err
}
defer client.Close()
// TODO(alexisb) - currently keys are global which is not ideal.
// keymanager needs to be updated to allow keys per user
c.user = "admin"
results, err := client.ImportKeys(c.user, c.sshKeyIds...)
if err != nil {
return block.ProcessBlockedError(err, block.BlockChange)
}
for i, result := range results {
if result.Error != nil {
fmt.Fprintf(context.Stderr, "cannot import key id %q: %v\n", c.sshKeyIds[i], result.Error)
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4273
|
ValidateToolsMetadata
|
train
|
func ValidateToolsMetadata(params *ToolsMetadataLookupParams) ([]string, *simplestreams.ResolveInfo, error) {
if len(params.Sources) == 0 {
return nil, nil, fmt.Errorf("required parameter sources not specified")
}
if params.Version == "" && params.Major == 0 {
params.Version = jujuversion.Current.String()
}
var toolsConstraint *ToolsConstraint
if params.Version == "" {
toolsConstraint = NewGeneralToolsConstraint(params.Major, params.Minor, simplestreams.LookupParams{
CloudSpec: simplestreams.CloudSpec{
Region: params.Region,
Endpoint: params.Endpoint,
},
Stream: params.Stream,
Series: []string{params.Series},
Arches: params.Architectures,
})
} else {
versNum, err := version.Parse(params.Version)
if err != nil {
return nil, nil, err
}
toolsConstraint = NewVersionedToolsConstraint(versNum, simplestreams.LookupParams{
CloudSpec: simplestreams.CloudSpec{
Region: params.Region,
Endpoint: params.Endpoint,
},
Stream: params.Stream,
Series: []string{params.Series},
Arches: params.Architectures,
})
}
matchingTools, resolveInfo, err := Fetch(params.Sources, toolsConstraint)
if err != nil {
return nil, resolveInfo, err
}
if len(matchingTools) == 0 {
return nil, resolveInfo, fmt.Errorf("no matching agent binaries found for constraint %+v", toolsConstraint)
}
versions := make([]string, len(matchingTools))
for i, tm := range matchingTools {
vers := version.Binary{
Number: version.MustParse(tm.Version),
Series: tm.Release,
Arch: tm.Arch,
}
versions[i] = vers.String()
}
return versions, resolveInfo, nil
}
|
go
|
{
"resource": ""
}
|
q4274
|
Save
|
train
|
func (fw *firewallRulesState) Save(rule FirewallRule) error {
if err := rule.WellKnownService.validate(); err != nil {
return errors.Trace(err)
}
for _, cidr := range rule.WhitelistCIDRs {
if _, _, err := net.ParseCIDR(cidr); err != nil {
return errors.NotValidf("CIDR %q", cidr)
}
}
serviceStr := string(rule.WellKnownService)
doc := firewallRulesDoc{
Id: serviceStr,
WellKnownService: serviceStr,
WhitelistCIDRS: rule.WhitelistCIDRs,
}
buildTxn := func(int) ([]txn.Op, error) {
model, err := fw.st.Model()
if err != nil {
return nil, errors.Annotate(err, "failed to load model")
}
if err := checkModelActive(fw.st); err != nil {
return nil, errors.Trace(err)
}
_, err = fw.Rule(rule.WellKnownService)
if err != nil && !errors.IsNotFound(err) {
return nil, errors.Trace(err)
}
var ops []txn.Op
if err == nil {
ops = []txn.Op{{
C: firewallRulesC,
Id: serviceStr,
Assert: txn.DocExists,
Update: bson.D{
{"$set", bson.D{{"whitelist-cidrs", rule.WhitelistCIDRs}}},
},
}, model.assertActiveOp()}
} else {
doc.WhitelistCIDRS = rule.WhitelistCIDRs
ops = []txn.Op{{
C: firewallRulesC,
Id: doc.Id,
Assert: txn.DocMissing,
Insert: doc,
}, model.assertActiveOp()}
}
return ops, nil
}
if err := fw.st.db().Run(buildTxn); err != nil {
return errors.Annotate(err, "failed to create firewall rules")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4275
|
Rule
|
train
|
func (fw *firewallRulesState) Rule(service WellKnownServiceType) (*FirewallRule, error) {
coll, closer := fw.st.db().GetCollection(firewallRulesC)
defer closer()
var doc firewallRulesDoc
err := coll.FindId(string(service)).One(&doc)
if err == mgo.ErrNotFound {
return nil, errors.NotFoundf("firewall rules for service %v", service)
}
if err != nil {
return nil, errors.Trace(err)
}
return doc.toRule(), nil
}
|
go
|
{
"resource": ""
}
|
q4276
|
AllRules
|
train
|
func (fw *firewallRulesState) AllRules() ([]*FirewallRule, error) {
coll, closer := fw.st.db().GetCollection(firewallRulesC)
defer closer()
var docs []firewallRulesDoc
err := coll.Find(nil).All(&docs)
if err != nil {
return nil, errors.Trace(err)
}
result := make([]*FirewallRule, len(docs))
for i, doc := range docs {
result[i] = doc.toRule()
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4277
|
acquireExecutionLock
|
train
|
func (w *hookRunner) acquireExecutionLock(action string, interrupt <-chan struct{}) (func(), error) {
spec := machinelock.Spec{
Cancel: interrupt,
Worker: "meterstatus",
Comment: action,
}
releaser, err := w.machineLock.Acquire(spec)
if err != nil {
return nil, errors.Trace(err)
}
return releaser, nil
}
|
go
|
{
"resource": ""
}
|
q4278
|
CreateCloudAccess
|
train
|
func (st *State) CreateCloudAccess(cloud string, user names.UserTag, access permission.Access) error {
if err := permission.ValidateCloudAccess(access); err != nil {
return errors.Trace(err)
}
// Local users must exist.
if user.IsLocal() {
_, err := st.User(user)
if err != nil {
if errors.IsNotFound(err) {
return errors.Annotatef(err, "user %q does not exist locally", user.Name())
}
return errors.Trace(err)
}
}
op := createPermissionOp(cloudGlobalKey(cloud), userGlobalKey(userAccessID(user)), access)
err := st.db().RunTransaction([]txn.Op{op})
if err == txn.ErrAborted {
err = errors.AlreadyExistsf("permission for user %q for cloud %q", user.Id(), cloud)
}
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4279
|
GetCloudAccess
|
train
|
func (st *State) GetCloudAccess(cloud string, user names.UserTag) (permission.Access, error) {
perm, err := st.userPermission(cloudGlobalKey(cloud), userGlobalKey(userAccessID(user)))
if err != nil {
return "", errors.Trace(err)
}
return perm.access(), nil
}
|
go
|
{
"resource": ""
}
|
q4280
|
GetCloudUsers
|
train
|
func (st *State) GetCloudUsers(cloud string) (map[string]permission.Access, error) {
perms, err := st.usersPermissions(cloudGlobalKey(cloud))
if err != nil {
return nil, errors.Trace(err)
}
result := make(map[string]permission.Access)
for _, p := range perms {
result[userIDFromGlobalKey(p.doc.SubjectGlobalKey)] = p.access()
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4281
|
UpdateCloudAccess
|
train
|
func (st *State) UpdateCloudAccess(cloud string, user names.UserTag, access permission.Access) error {
if err := permission.ValidateCloudAccess(access); err != nil {
return errors.Trace(err)
}
buildTxn := func(int) ([]txn.Op, error) {
_, err := st.GetCloudAccess(cloud, user)
if err != nil {
return nil, errors.Trace(err)
}
ops := []txn.Op{updatePermissionOp(cloudGlobalKey(cloud), userGlobalKey(userAccessID(user)), access)}
return ops, nil
}
err := st.db().Run(buildTxn)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4282
|
RemoveCloudAccess
|
train
|
func (st *State) RemoveCloudAccess(cloud string, user names.UserTag) error {
buildTxn := func(int) ([]txn.Op, error) {
_, err := st.GetCloudAccess(cloud, user)
if err != nil {
return nil, err
}
ops := []txn.Op{removePermissionOp(cloudGlobalKey(cloud), userGlobalKey(userAccessID(user)))}
return ops, nil
}
err := st.db().Run(buildTxn)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4283
|
CloudsForUser
|
train
|
func (st *State) CloudsForUser(user names.UserTag, all bool) ([]CloudInfo, error) {
// We only treat the user as a superuser if they pass --all
isControllerSuperuser := false
if all {
var err error
isControllerSuperuser, err = st.isUserSuperuser(user)
if err != nil {
return nil, errors.Trace(err)
}
}
clouds, closer := st.db().GetCollection(cloudsC)
defer closer()
var cloudQuery mongo.Query
if isControllerSuperuser {
// Fast path, we just get all the clouds.
cloudQuery = clouds.Find(nil)
} else {
cloudNames, err := st.cloudNamesForUser(user)
if err != nil {
return nil, errors.Trace(err)
}
cloudQuery = clouds.Find(bson.M{
"_id": bson.M{"$in": cloudNames},
})
}
cloudQuery = cloudQuery.Sort("name")
var cloudDocs []cloudDoc
if err := cloudQuery.All(&cloudDocs); err != nil {
return nil, errors.Trace(err)
}
result := make([]CloudInfo, len(cloudDocs))
for i, c := range cloudDocs {
result[i] = CloudInfo{
Cloud: c.toCloud(),
}
}
if err := st.fillInCloudUserAccess(user, result); err != nil {
return nil, errors.Trace(err)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q4284
|
cloudNamesForUser
|
train
|
func (st *State) cloudNamesForUser(user names.UserTag) ([]string, error) {
// Start by looking up cloud names that the user has access to, and then load only the records that are
// included in that set
permissions, permCloser := st.db().GetRawCollection(permissionsC)
defer permCloser()
findExpr := fmt.Sprintf("^.*#%s$", userGlobalKey(user.Id()))
query := permissions.Find(
bson.D{{"_id", bson.D{{"$regex", findExpr}}}},
).Batch(100)
var doc permissionDoc
iter := query.Iter()
var cloudNames []string
for iter.Next(&doc) {
cloudName := strings.TrimPrefix(doc.ObjectGlobalKey, "cloud#")
cloudNames = append(cloudNames, cloudName)
}
if err := iter.Close(); err != nil {
return nil, errors.Trace(err)
}
return cloudNames, nil
}
|
go
|
{
"resource": ""
}
|
q4285
|
NewMachineInitReader
|
train
|
func NewMachineInitReader(series string) (InitReader, error) {
cloudInitConfigDir, err := paths.CloudInitCfgDir(series)
if err != nil {
return nil, errors.Annotate(err, "determining CloudInitCfgDir for the machine")
}
cloudInitInstanceConfigDir, err := paths.MachineCloudInitDir(series)
if err != nil {
return nil, errors.Annotate(err, "determining MachineCloudInitDir for the machine")
}
curtinInstallConfigFile, err := paths.CurtinInstallConfig(series)
if err != nil {
return nil, errors.Trace(err)
}
cfg := MachineInitReaderConfig{
Series: series,
CloudInitConfigDir: cloudInitConfigDir,
CloudInitInstanceConfigDir: cloudInitInstanceConfigDir,
CurtinInstallConfigFile: curtinInstallConfigFile,
}
return NewMachineInitReaderFromConfig(cfg), nil
}
|
go
|
{
"resource": ""
}
|
q4286
|
GetInitConfig
|
train
|
func (r *MachineInitReader) GetInitConfig() (map[string]interface{}, error) {
series := r.config.Series
containerOS, err := utilsseries.GetOSFromSeries(series)
if err != nil {
return nil, errors.Trace(err)
}
switch containerOS {
case utilsos.Ubuntu, utilsos.CentOS, utilsos.OpenSUSE:
if series != utilsseries.MustHostSeries() {
logger.Debugf("not attempting to get init config for %s, series of machine and container differ", series)
return nil, nil
}
default:
logger.Debugf("not attempting to get init config for %s container", series)
return nil, nil
}
machineCloudInitData, err := r.getMachineCloudCfgDirData()
if err != nil {
return nil, errors.Trace(err)
}
file := filepath.Join(r.config.CloudInitInstanceConfigDir, "vendor-data.txt")
vendorData, err := r.unmarshallConfigFile(file)
if err != nil {
return nil, errors.Trace(err)
}
for k, v := range vendorData {
machineCloudInitData[k] = v
}
_, curtinData, err := fileAsConfigMap(r.config.CurtinInstallConfigFile)
if err != nil {
return nil, errors.Trace(err)
}
for k, v := range curtinData {
machineCloudInitData[k] = v
}
return machineCloudInitData, nil
}
|
go
|
{
"resource": ""
}
|
q4287
|
getMachineCloudCfgDirData
|
train
|
func (r *MachineInitReader) getMachineCloudCfgDirData() (map[string]interface{}, error) {
dir := r.config.CloudInitConfigDir
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, errors.Annotate(err, "determining files in CloudInitCfgDir for the machine")
}
sortedFiles := sortableFileInfos(files)
sort.Sort(sortedFiles)
cloudInit := make(map[string]interface{})
for _, file := range files {
name := file.Name()
if !strings.HasSuffix(name, ".cfg") {
continue
}
_, cloudCfgData, err := fileAsConfigMap(filepath.Join(dir, name))
if err != nil {
return nil, errors.Trace(err)
}
for k, v := range cloudCfgData {
cloudInit[k] = v
}
}
return cloudInit, nil
}
|
go
|
{
"resource": ""
}
|
q4288
|
unmarshallConfigFile
|
train
|
func (r *MachineInitReader) unmarshallConfigFile(file string) (map[string]interface{}, error) {
raw, config, err := fileAsConfigMap(file)
if err == nil {
return config, nil
}
if !errors.IsNotValid(err) {
return nil, errors.Trace(err)
}
// The data maybe be gzipped, base64 encoded, both, or neither.
// If both, it has been gzipped, then base64 encoded.
logger.Tracef("unmarshall failed (%s), file may be compressed", err.Error())
zippedData, err := utils.Gunzip(raw)
if err == nil {
cfg, err := bytesAsConfigMap(zippedData)
return cfg, errors.Trace(err)
}
logger.Tracef("Gunzip of %q failed (%s), maybe it is encoded", file, err)
decodedData, err := base64.StdEncoding.DecodeString(string(raw))
if err == nil {
if buf, err := bytesAsConfigMap(decodedData); err == nil {
return buf, nil
}
}
logger.Tracef("Decoding of %q failed (%s), maybe it is encoded and gzipped", file, err)
decodedZippedBuf, err := utils.Gunzip(decodedData)
if err != nil {
// During testing, it was found that the trusty vendor-data.txt.i file
// can contain only the text "NONE", which doesn't unmarshall or decompress
// we don't want to fail in that case.
if r.config.Series == "trusty" {
logger.Debugf("failed to unmarshall or decompress %q: %s", file, err)
return nil, nil
}
return nil, errors.Annotatef(err, "cannot unmarshall or decompress %q", file)
}
cfg, err := bytesAsConfigMap(decodedZippedBuf)
return cfg, errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q4289
|
fileAsConfigMap
|
train
|
func fileAsConfigMap(file string) ([]byte, map[string]interface{}, error) {
raw, err := ioutil.ReadFile(file)
if err != nil {
return nil, nil, errors.Annotatef(err, "reading config from %q", file)
}
if len(raw) == 0 {
return nil, nil, nil
}
cfg, err := bytesAsConfigMap(raw)
if err != nil {
return raw, cfg, errors.NotValidf("converting %q contents to map: %s", file, err.Error())
}
return raw, cfg, nil
}
|
go
|
{
"resource": ""
}
|
q4290
|
extractPropertiesFromConfig
|
train
|
func extractPropertiesFromConfig(props []string, cfg map[string]interface{}, log loggo.Logger) map[string]interface{} {
foundDataMap := make(map[string]interface{})
for _, k := range props {
key := strings.TrimSpace(k)
switch key {
case "apt-security", "apt-primary", "apt-sources", "apt-sources_list":
if val, ok := cfg["apt"]; ok {
for k, v := range nestedAptConfig(key, val, log) {
// security, sources, and primary all nest under apt, ensure
// we don't overwrite prior translated data.
if apt, ok := foundDataMap["apt"].(map[string]interface{}); ok {
apt[k] = v
} else {
foundDataMap["apt"] = map[string]interface{}{
k: v,
}
}
}
} else {
log.Debugf("%s not found in machine init data", key)
}
case "ca-certs":
// No translation needed, ca-certs the same in both versions of Cloud-Init.
if val, ok := cfg[key]; ok {
foundDataMap[key] = val
} else {
log.Debugf("%s not found in machine init data", key)
}
}
}
return foundDataMap
}
|
go
|
{
"resource": ""
}
|
q4291
|
extractPropertiesFromConfigLegacy
|
train
|
func extractPropertiesFromConfigLegacy(
props []string, cfg map[string]interface{}, log loggo.Logger,
) map[string]interface{} {
foundDataMap := make(map[string]interface{})
aptProcessed := false
for _, k := range props {
key := strings.TrimSpace(k)
switch key {
case "apt-primary", "apt-sources":
if aptProcessed {
continue
}
for _, aptKey := range []string{"apt_mirror", "apt_mirror_search", "apt_mirror_search_dns", "apt_sources"} {
if val, ok := cfg[aptKey]; ok {
foundDataMap[aptKey] = val
} else {
log.Debugf("%s not found in machine init data", aptKey)
}
}
aptProcessed = true
case "apt-sources_list":
// Testing series trusty on MAAS 2.5+ shows that this could be
// treated in the same way as the non-legacy property
// extraction, but we would then be mixing techniques.
// Legacy handling is left unchanged here under the assumption
// that provisioning trusty machines on much newer MAAS
// versions is highly unlikely.
log.Debugf("%q ignored for this machine series", key)
case "apt-security":
// Translation for apt-security unknown at this time.
log.Debugf("%q ignored for this machine series", key)
case "ca-certs":
// No translation needed, ca-certs the same in both versions of Cloud-Init.
if val, ok := cfg[key]; ok {
foundDataMap[key] = val
} else {
log.Debugf("%s not found in machine init data", key)
}
}
}
return foundDataMap
}
|
go
|
{
"resource": ""
}
|
q4292
|
authorizeCharmStoreEntity
|
train
|
func authorizeCharmStoreEntity(csClient charmstoreForDeploy, curl *charm.URL) (*macaroon.Macaroon, error) {
endpoint := "/delegatable-macaroon?id=" + url.QueryEscape(curl.String())
var m *macaroon.Macaroon
if err := csClient.Get(endpoint, &m); err != nil {
return nil, errors.Trace(err)
}
return m, nil
}
|
go
|
{
"resource": ""
}
|
q4293
|
WatchApplication
|
train
|
func (c *Client) WatchApplication(appName string) (watcher.NotifyWatcher, error) {
appTag, err := applicationTag(appName)
if err != nil {
return nil, errors.Trace(err)
}
return common.Watch(c.facade, "Watch", appTag)
}
|
go
|
{
"resource": ""
}
|
q4294
|
Life
|
train
|
func (c *Client) Life(appName string) (life.Value, error) {
appTag, err := applicationTag(appName)
if err != nil {
return "", errors.Trace(err)
}
args := entities(appTag)
var results params.LifeResults
if err := c.facade.FacadeCall("Life", args, &results); err != nil {
return "", err
}
if n := len(results.Results); n != 1 {
return "", errors.Errorf("expected 1 result, got %d", n)
}
if err := results.Results[0].Error; err != nil {
return "", maybeNotFound(err)
}
return life.Value(results.Results[0].Life), nil
}
|
go
|
{
"resource": ""
}
|
q4295
|
IsExposed
|
train
|
func (c *Client) IsExposed(appName string) (bool, error) {
appTag, err := applicationTag(appName)
if err != nil {
return false, errors.Trace(err)
}
args := entities(appTag)
var results params.BoolResults
if err := c.facade.FacadeCall("IsExposed", args, &results); err != nil {
return false, err
}
if n := len(results.Results); n != 1 {
return false, errors.Errorf("expected 1 result, got %d", n)
}
if err := results.Results[0].Error; err != nil {
return false, maybeNotFound(err)
}
return results.Results[0].Result, nil
}
|
go
|
{
"resource": ""
}
|
q4296
|
Arch
|
train
|
func (p CreateMachineParams) Arch() string {
if p.arch != "" {
return p.arch
}
return arch.HostArch()
}
|
go
|
{
"resource": ""
}
|
q4297
|
ValidateDomainParams
|
train
|
func (p CreateMachineParams) ValidateDomainParams() error {
if p.Hostname == "" {
return errors.Errorf("missing required hostname")
}
if len(p.disks) < 2 {
// We need at least the drive and the data source disk.
return errors.Errorf("got %d disks, need at least 2", len(p.disks))
}
var ds, fs bool
for _, d := range p.disks {
if d.Driver() == "qcow2" {
fs = true
}
if d.Driver() == "raw" {
ds = true
}
}
if !ds {
return errors.Trace(errors.Errorf("missing data source disk"))
}
if !fs {
return errors.Trace(errors.Errorf("missing system disk"))
}
return nil
}
|
go
|
{
"resource": ""
}
|
q4298
|
CreateMachine
|
train
|
func CreateMachine(params CreateMachineParams) error {
if params.Hostname == "" {
return fmt.Errorf("hostname is required")
}
setDefaults(¶ms)
templateDir := filepath.Dir(params.UserDataFile)
err := writeMetadata(templateDir)
if err != nil {
return errors.Annotate(err, "failed to write instance metadata")
}
dsPath, err := writeDataSourceVolume(params)
if err != nil {
return errors.Annotatef(err, "failed to write data source volume for %q", params.Host())
}
imgPath, err := writeRootDisk(params)
if err != nil {
return errors.Annotatef(err, "failed to write root volume for %q", params.Host())
}
params.disks = append(params.disks, diskInfo{source: imgPath, driver: "qcow2"})
params.disks = append(params.disks, diskInfo{source: dsPath, driver: "raw"})
domainPath, err := writeDomainXML(templateDir, params)
if err != nil {
return errors.Annotatef(err, "failed to write domain xml for %q", params.Host())
}
out, err := params.runCmdAsRoot("", virsh, "define", domainPath)
if err != nil {
return errors.Annotatef(err, "failed to defined the domain for %q from %s", params.Host(), domainPath)
}
logger.Debugf("created domain: %s", out)
out, err = params.runCmdAsRoot("", virsh, "start", params.Host())
if err != nil {
return errors.Annotatef(err, "failed to start domain %q", params.Host())
}
logger.Debugf("started domain: %s", out)
return err
}
|
go
|
{
"resource": ""
}
|
q4299
|
setDefaults
|
train
|
func setDefaults(p *CreateMachineParams) {
if p.findPath == nil {
p.findPath = paths.DataDir
}
if p.runCmd == nil {
p.runCmd = runAsLibvirt
}
if p.runCmdAsRoot == nil {
p.runCmdAsRoot = run
}
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.