_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q3500
|
DestroyController
|
train
|
func (m *MockZonedEnviron) DestroyController(arg0 context.ProviderCallContext, arg1 string) error {
ret := m.ctrl.Call(m, "DestroyController", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3501
|
InstanceTypes
|
train
|
func (m *MockZonedEnviron) InstanceTypes(arg0 context.ProviderCallContext, arg1 constraints.Value) (instances.InstanceTypesWithCostMetadata, error) {
ret := m.ctrl.Call(m, "InstanceTypes", arg0, arg1)
ret0, _ := ret[0].(instances.InstanceTypesWithCostMetadata)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3502
|
Instances
|
train
|
func (m *MockZonedEnviron) Instances(arg0 context.ProviderCallContext, arg1 []instance.Id) ([]instances.Instance, error) {
ret := m.ctrl.Call(m, "Instances", arg0, arg1)
ret0, _ := ret[0].([]instances.Instance)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3503
|
MaintainInstance
|
train
|
func (m *MockZonedEnviron) MaintainInstance(arg0 context.ProviderCallContext, arg1 environs.StartInstanceParams) error {
ret := m.ctrl.Call(m, "MaintainInstance", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3504
|
PrecheckInstance
|
train
|
func (m *MockZonedEnviron) PrecheckInstance(arg0 context.ProviderCallContext, arg1 environs.PrecheckInstanceParams) error {
ret := m.ctrl.Call(m, "PrecheckInstance", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3505
|
PrepareForBootstrap
|
train
|
func (m *MockZonedEnviron) PrepareForBootstrap(arg0 environs.BootstrapContext, arg1 string) error {
ret := m.ctrl.Call(m, "PrepareForBootstrap", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3506
|
Provider
|
train
|
func (m *MockZonedEnviron) Provider() environs.EnvironProvider {
ret := m.ctrl.Call(m, "Provider")
ret0, _ := ret[0].(environs.EnvironProvider)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3507
|
SetConfig
|
train
|
func (m *MockZonedEnviron) SetConfig(arg0 *config.Config) error {
ret := m.ctrl.Call(m, "SetConfig", arg0)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3508
|
StopInstances
|
train
|
func (m *MockZonedEnviron) StopInstances(arg0 context.ProviderCallContext, arg1 ...instance.Id) error {
varargs := []interface{}{arg0}
for _, a := range arg1 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "StopInstances", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
|
go
|
{
"resource": ""
}
|
q3509
|
StorageProvider
|
train
|
func (m *MockZonedEnviron) StorageProvider(arg0 storage.ProviderType) (storage.Provider, error) {
ret := m.ctrl.Call(m, "StorageProvider", arg0)
ret0, _ := ret[0].(storage.Provider)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3510
|
StorageProvider
|
train
|
func (mr *MockZonedEnvironMockRecorder) StorageProvider(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageProvider", reflect.TypeOf((*MockZonedEnviron)(nil).StorageProvider), arg0)
}
|
go
|
{
"resource": ""
}
|
q3511
|
StorageProviderTypes
|
train
|
func (m *MockZonedEnviron) StorageProviderTypes() ([]storage.ProviderType, error) {
ret := m.ctrl.Call(m, "StorageProviderTypes")
ret0, _ := ret[0].([]storage.ProviderType)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
go
|
{
"resource": ""
}
|
q3512
|
NewStorageAddCommand
|
train
|
func NewStorageAddCommand(ctx Context) (cmd.Command, error) {
return &StorageAddCommand{ctx: ctx}, nil
}
|
go
|
{
"resource": ""
}
|
q3513
|
IAASManifolds
|
train
|
func IAASManifolds(config ManifoldsConfig) dependency.Manifolds {
var externalUpdateProxyFunc func(proxy.Settings) error
if runtime.GOOS == "linux" {
externalUpdateProxyFunc = lxd.ConfigureLXDProxies
}
manifolds := dependency.Manifolds{
toolsVersionCheckerName: ifNotMigrating(toolsversionchecker.Manifold(toolsversionchecker.ManifoldConfig{
AgentName: agentName,
APICallerName: apiCallerName,
})),
authenticationWorkerName: ifNotMigrating(authenticationworker.Manifold(authenticationworker.ManifoldConfig{
AgentName: agentName,
APICallerName: apiCallerName,
})),
// The proxy config updater is a leaf worker that sets http/https/apt/etc
// proxy settings.
proxyConfigUpdater: ifNotMigrating(proxyupdater.Manifold(proxyupdater.ManifoldConfig{
AgentName: agentName,
APICallerName: apiCallerName,
Logger: loggo.GetLogger("juju.worker.proxyupdater"),
WorkerFunc: proxyupdater.NewWorker,
ExternalUpdate: externalUpdateProxyFunc,
InProcessUpdate: proxyconfig.DefaultConfig.Set,
RunFunc: proxyupdater.RunWithStdIn,
})),
hostKeyReporterName: ifNotMigrating(hostkeyreporter.Manifold(hostkeyreporter.ManifoldConfig{
AgentName: agentName,
APICallerName: apiCallerName,
RootDir: config.RootDir,
NewFacade: hostkeyreporter.NewFacade,
NewWorker: hostkeyreporter.NewWorker,
})),
// The upgrader is a leaf worker that returns a specific error
// type recognised by the machine agent, causing other workers
// to be stopped and the agent to be restarted running the new
// tools. We should only need one of these in a consolidated
// agent, but we'll need to be careful about behavioural
// differences, and interactions with the upgrade-steps
// worker.
upgraderName: upgrader.Manifold(upgrader.ManifoldConfig{
AgentName: agentName,
APICallerName: apiCallerName,
UpgradeStepsGateName: upgradeStepsGateName,
UpgradeCheckGateName: upgradeCheckGateName,
PreviousAgentVersion: config.PreviousAgentVersion,
}),
upgradeSeriesWorkerName: ifNotMigrating(upgradeseries.Manifold(upgradeseries.ManifoldConfig{
AgentName: agentName,
APICallerName: apiCallerName,
Logger: loggo.GetLogger("juju.worker.upgradeseries"),
NewFacade: upgradeseries.NewFacade,
NewWorker: upgradeseries.NewWorker,
})),
// The deployer worker is primary for deploying and recalling unit
// agents, according to changes in a set of state units; and for the
// final removal of its agents' units from state when they are no
// longer needed.
deployerName: ifNotMigrating(deployer.Manifold(deployer.ManifoldConfig{
NewDeployContext: config.NewDeployContext,
AgentName: agentName,
APICallerName: apiCallerName,
})),
// The reboot manifold manages a worker which will reboot the
// machine when requested. It needs an API connection and
// waits for upgrades to be complete.
rebootName: ifNotMigrating(reboot.Manifold(reboot.ManifoldConfig{
AgentName: agentName,
APICallerName: apiCallerName,
MachineLock: config.MachineLock,
Clock: config.Clock,
})),
// The storageProvisioner worker manages provisioning
// (deprovisioning), and attachment (detachment) of first-class
// volumes and filesystems.
storageProvisionerName: ifNotMigrating(ifCredentialValid(storageprovisioner.MachineManifold(storageprovisioner.MachineManifoldConfig{
AgentName: agentName,
APICallerName: apiCallerName,
Clock: config.Clock,
NewCredentialValidatorFacade: common.NewCredentialInvalidatorFacade,
}))),
brokerTrackerName: ifNotMigrating(lxdbroker.Manifold(lxdbroker.ManifoldConfig{
APICallerName: apiCallerName,
AgentName: agentName,
MachineLock: config.MachineLock,
NewBrokerFunc: config.NewBrokerFunc,
NewTracker: lxdbroker.NewWorkerTracker,
})),
instanceMutaterName: ifNotMigrating(instancemutater.MachineManifold(instancemutater.MachineManifoldConfig{
AgentName: agentName,
APICallerName: apiCallerName,
BrokerName: brokerTrackerName,
Logger: loggo.GetLogger("juju.worker.instancemutater"),
NewClient: instancemutater.NewClient,
NewWorker: instancemutater.NewContainerWorker,
})),
}
return mergeManifolds(config, manifolds)
}
|
go
|
{
"resource": ""
}
|
q3514
|
CAASManifolds
|
train
|
func CAASManifolds(config ManifoldsConfig) dependency.Manifolds {
return mergeManifolds(config, dependency.Manifolds{
// TODO(caas) - when we support HA, only want this on primary
upgraderName: caasupgrader.Manifold(caasupgrader.ManifoldConfig{
AgentName: agentName,
APICallerName: apiCallerName,
UpgradeStepsGateName: upgradeStepsGateName,
UpgradeCheckGateName: upgradeCheckGateName,
PreviousAgentVersion: config.PreviousAgentVersion,
}),
})
}
|
go
|
{
"resource": ""
}
|
q3515
|
OpenPorts
|
train
|
func (c *rackspaceFirewaller) OpenPorts(ctx context.ProviderCallContext, rules []network.IngressRule) error {
return errors.NotSupportedf("OpenPorts")
}
|
go
|
{
"resource": ""
}
|
q3516
|
IngressRules
|
train
|
func (c *rackspaceFirewaller) IngressRules(ctx context.ProviderCallContext) ([]network.IngressRule, error) {
return nil, errors.NotSupportedf("Ports")
}
|
go
|
{
"resource": ""
}
|
q3517
|
DeleteGroups
|
train
|
func (c *rackspaceFirewaller) DeleteGroups(ctx context.ProviderCallContext, names ...string) error {
return nil
}
|
go
|
{
"resource": ""
}
|
q3518
|
DeleteAllControllerGroups
|
train
|
func (c *rackspaceFirewaller) DeleteAllControllerGroups(ctx context.ProviderCallContext, controllerUUID string) error {
return nil
}
|
go
|
{
"resource": ""
}
|
q3519
|
GetSecurityGroups
|
train
|
func (c *rackspaceFirewaller) GetSecurityGroups(ctx context.ProviderCallContext, ids ...instance.Id) ([]string, error) {
return nil, nil
}
|
go
|
{
"resource": ""
}
|
q3520
|
SetUpGroups
|
train
|
func (c *rackspaceFirewaller) SetUpGroups(ctx context.ProviderCallContext, controllerUUID, machineId string, apiPort int) ([]string, error) {
return nil, nil
}
|
go
|
{
"resource": ""
}
|
q3521
|
filesystemsChanged
|
train
|
func filesystemsChanged(ctx *context, changes []string) error {
tags := make([]names.Tag, len(changes))
for i, change := range changes {
tags[i] = names.NewFilesystemTag(change)
}
alive, dying, dead, err := storageEntityLife(ctx, tags)
if err != nil {
return errors.Trace(err)
}
logger.Debugf("filesystems alive: %v, dying: %v, dead: %v", alive, dying, dead)
if len(alive)+len(dying)+len(dead) == 0 {
return nil
}
// Get filesystem information for filesystems, so we can provision,
// deprovision, attach and detach.
filesystemTags := make([]names.FilesystemTag, 0, len(alive)+len(dying)+len(dead))
for _, tag := range alive {
filesystemTags = append(filesystemTags, tag.(names.FilesystemTag))
}
for _, tag := range dying {
filesystemTags = append(filesystemTags, tag.(names.FilesystemTag))
}
for _, tag := range dead {
filesystemTags = append(filesystemTags, tag.(names.FilesystemTag))
}
filesystemResults, err := ctx.config.Filesystems.Filesystems(filesystemTags)
if err != nil {
return errors.Annotatef(err, "getting filesystem information")
}
aliveFilesystemTags := filesystemTags[:len(alive)]
dyingFilesystemTags := filesystemTags[len(alive) : len(alive)+len(dying)]
deadFilesystemTags := filesystemTags[len(alive)+len(dying):]
aliveFilesystemResults := filesystemResults[:len(alive)]
dyingFilesystemResults := filesystemResults[len(alive) : len(alive)+len(dying)]
deadFilesystemResults := filesystemResults[len(alive)+len(dying):]
if err := processDeadFilesystems(ctx, deadFilesystemTags, deadFilesystemResults); err != nil {
return errors.Annotate(err, "deprovisioning filesystems")
}
if err := processDyingFilesystems(ctx, dyingFilesystemTags, dyingFilesystemResults); err != nil {
return errors.Annotate(err, "processing dying filesystems")
}
if err := processAliveFilesystems(ctx, aliveFilesystemTags, aliveFilesystemResults); err != nil {
return errors.Annotate(err, "provisioning filesystems")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3522
|
filesystemAttachmentsChanged
|
train
|
func filesystemAttachmentsChanged(ctx *context, watcherIds []watcher.MachineStorageId) error {
ids := copyMachineStorageIds(watcherIds)
alive, dying, dead, err := attachmentLife(ctx, ids)
if err != nil {
return errors.Trace(err)
}
logger.Debugf("filesystem attachment alive: %v, dying: %v, dead: %v", alive, dying, dead)
if len(dead) != 0 {
// We should not see dead filesystem attachments;
// attachments go directly from Dying to removed.
logger.Warningf("unexpected dead filesystem attachments: %v", dead)
}
if len(alive)+len(dying) == 0 {
return nil
}
// Get filesystem information for alive and dying filesystem attachments, so
// we can attach/detach.
ids = append(alive, dying...)
filesystemAttachmentResults, err := ctx.config.Filesystems.FilesystemAttachments(ids)
if err != nil {
return errors.Annotatef(err, "getting filesystem attachment information")
}
// Deprovision Dying filesystem attachments.
dyingFilesystemAttachmentResults := filesystemAttachmentResults[len(alive):]
if err := processDyingFilesystemAttachments(ctx, dying, dyingFilesystemAttachmentResults); err != nil {
return errors.Annotate(err, "destroying filesystem attachments")
}
// Provision Alive filesystem attachments.
aliveFilesystemAttachmentResults := filesystemAttachmentResults[:len(alive)]
if err := processAliveFilesystemAttachments(ctx, alive, aliveFilesystemAttachmentResults); err != nil {
return errors.Annotate(err, "creating filesystem attachments")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3523
|
processDyingFilesystems
|
train
|
func processDyingFilesystems(ctx *context, tags []names.FilesystemTag, filesystemResults []params.FilesystemResult) error {
for _, tag := range tags {
removePendingFilesystem(ctx, tag)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3524
|
updatePendingFilesystemAttachment
|
train
|
func updatePendingFilesystemAttachment(
ctx *context,
id params.MachineStorageId,
params storage.FilesystemAttachmentParams,
) {
var incomplete bool
filesystem, ok := ctx.filesystems[params.Filesystem]
if !ok {
incomplete = true
} else {
params.FilesystemId = filesystem.FilesystemId
if filesystem.Volume != (names.VolumeTag{}) {
// The filesystem is volume-backed: if the filesystem
// was created in another session, then the block device
// may not have been seen yet. We must wait for the block
// device watcher to trigger.
if _, ok := ctx.volumeBlockDevices[filesystem.Volume]; !ok {
incomplete = true
}
}
}
if params.InstanceId == "" {
watchMachine(ctx, params.Machine.(names.MachineTag))
incomplete = true
}
if params.FilesystemId == "" {
incomplete = true
}
if incomplete {
ctx.incompleteFilesystemAttachmentParams[id] = params
return
}
delete(ctx.incompleteFilesystemAttachmentParams, id)
scheduleOperations(ctx, &attachFilesystemOp{args: params})
}
|
go
|
{
"resource": ""
}
|
q3525
|
processDeadFilesystems
|
train
|
func processDeadFilesystems(ctx *context, tags []names.FilesystemTag, filesystemResults []params.FilesystemResult) error {
for _, tag := range tags {
removePendingFilesystem(ctx, tag)
}
var destroy []names.FilesystemTag
var remove []names.Tag
for i, result := range filesystemResults {
tag := tags[i]
if result.Error == nil {
logger.Debugf("filesystem %s is provisioned, queuing for deprovisioning", tag.Id())
filesystem, err := filesystemFromParams(result.Result)
if err != nil {
return errors.Annotate(err, "getting filesystem info")
}
updateFilesystem(ctx, filesystem)
destroy = append(destroy, tag)
continue
}
if params.IsCodeNotProvisioned(result.Error) {
logger.Debugf("filesystem %s is not provisioned, queuing for removal", tag.Id())
remove = append(remove, tag)
continue
}
return errors.Annotatef(result.Error, "getting filesystem information for filesystem %s", tag.Id())
}
if len(destroy) > 0 {
ops := make([]scheduleOp, len(destroy))
for i, tag := range destroy {
ops[i] = &removeFilesystemOp{tag: tag}
}
scheduleOperations(ctx, ops...)
}
if err := removeEntities(ctx, remove); err != nil {
return errors.Annotate(err, "removing filesystems from state")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3526
|
processDyingFilesystemAttachments
|
train
|
func processDyingFilesystemAttachments(
ctx *context,
ids []params.MachineStorageId,
filesystemAttachmentResults []params.FilesystemAttachmentResult,
) error {
for _, id := range ids {
removePendingFilesystemAttachment(ctx, id)
}
detach := make([]params.MachineStorageId, 0, len(ids))
remove := make([]params.MachineStorageId, 0, len(ids))
for i, result := range filesystemAttachmentResults {
id := ids[i]
if result.Error == nil {
detach = append(detach, id)
continue
}
if params.IsCodeNotProvisioned(result.Error) {
remove = append(remove, id)
continue
}
return errors.Annotatef(result.Error, "getting information for filesystem attachment %v", id)
}
if len(detach) > 0 {
attachmentParams, err := filesystemAttachmentParams(ctx, detach)
if err != nil {
return errors.Trace(err)
}
ops := make([]scheduleOp, len(attachmentParams))
for i, p := range attachmentParams {
ops[i] = &detachFilesystemOp{args: p}
}
scheduleOperations(ctx, ops...)
}
if err := removeAttachments(ctx, remove); err != nil {
return errors.Annotate(err, "removing attachments from state")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3527
|
processAliveFilesystems
|
train
|
func processAliveFilesystems(ctx *context, tags []names.FilesystemTag, filesystemResults []params.FilesystemResult) error {
// Filter out the already-provisioned filesystems.
pending := make([]names.FilesystemTag, 0, len(tags))
for i, result := range filesystemResults {
tag := tags[i]
if result.Error == nil {
// Filesystem is already provisioned: skip.
logger.Debugf("filesystem %q is already provisioned, nothing to do", tag.Id())
filesystem, err := filesystemFromParams(result.Result)
if err != nil {
return errors.Annotate(err, "getting filesystem info")
}
updateFilesystem(ctx, filesystem)
if !ctx.isApplicationKind() {
if filesystem.Volume != (names.VolumeTag{}) {
// Ensure that volume-backed filesystems' block
// devices are present even after creating the
// filesystem, so that attachments can be made.
maybeAddPendingVolumeBlockDevice(ctx, filesystem.Volume)
}
}
continue
}
if !params.IsCodeNotProvisioned(result.Error) {
return errors.Annotatef(
result.Error, "getting filesystem information for filesystem %q", tag.Id(),
)
}
// The filesystem has not yet been provisioned, so record its tag
// to enquire about parameters below.
pending = append(pending, tag)
}
if len(pending) == 0 {
return nil
}
params, err := filesystemParams(ctx, pending)
if err != nil {
return errors.Annotate(err, "getting filesystem params")
}
for _, params := range params {
if ctx.isApplicationKind() {
logger.Debugf("not queuing filesystem for %v unit", ctx.config.Scope.Id())
continue
}
updatePendingFilesystem(ctx, params)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3528
|
processAliveFilesystemAttachments
|
train
|
func processAliveFilesystemAttachments(
ctx *context,
ids []params.MachineStorageId,
filesystemAttachmentResults []params.FilesystemAttachmentResult,
) error {
// Filter out the already-attached.
pending := make([]params.MachineStorageId, 0, len(ids))
for i, result := range filesystemAttachmentResults {
if result.Error == nil {
// Filesystem attachment is already provisioned: if we
// didn't (re)attach in this session, then we must do
// so now.
action := "nothing to do"
if _, ok := ctx.filesystemAttachments[ids[i]]; !ok {
// Not yet (re)attached in this session.
pending = append(pending, ids[i])
action = "will reattach"
}
logger.Debugf(
"%s is already attached to %s, %s",
ids[i].AttachmentTag, ids[i].MachineTag, action,
)
removePendingFilesystemAttachment(ctx, ids[i])
continue
}
if !params.IsCodeNotProvisioned(result.Error) {
return errors.Annotatef(
result.Error, "getting information for attachment %v", ids[i],
)
}
// The filesystem has not yet been attached, so
// record its tag to enquire about parameters below.
pending = append(pending, ids[i])
}
if len(pending) == 0 {
return nil
}
params, err := filesystemAttachmentParams(ctx, pending)
if err != nil {
return errors.Trace(err)
}
for i, params := range params {
if params.Machine != nil && params.Machine.Kind() != names.MachineTagKind {
logger.Debugf("not queuing filesystem attachment for non-machine %v", params.Machine)
continue
}
updatePendingFilesystemAttachment(ctx, pending[i], params)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3529
|
filesystemAttachmentParams
|
train
|
func filesystemAttachmentParams(
ctx *context, ids []params.MachineStorageId,
) ([]storage.FilesystemAttachmentParams, error) {
paramsResults, err := ctx.config.Filesystems.FilesystemAttachmentParams(ids)
if err != nil {
return nil, errors.Annotate(err, "getting filesystem attachment params")
}
attachmentParams := make([]storage.FilesystemAttachmentParams, len(ids))
for i, result := range paramsResults {
if result.Error != nil {
return nil, errors.Annotate(result.Error, "getting filesystem attachment parameters")
}
params, err := filesystemAttachmentParamsFromParams(result.Result)
if err != nil {
return nil, errors.Annotate(err, "getting filesystem attachment parameters")
}
attachmentParams[i] = params
}
return attachmentParams, nil
}
|
go
|
{
"resource": ""
}
|
q3530
|
filesystemParams
|
train
|
func filesystemParams(ctx *context, tags []names.FilesystemTag) ([]storage.FilesystemParams, error) {
paramsResults, err := ctx.config.Filesystems.FilesystemParams(tags)
if err != nil {
return nil, errors.Annotate(err, "getting filesystem params")
}
allParams := make([]storage.FilesystemParams, len(tags))
for i, result := range paramsResults {
if result.Error != nil {
return nil, errors.Annotate(result.Error, "getting filesystem parameters")
}
params, err := filesystemParamsFromParams(result.Result)
if err != nil {
return nil, errors.Annotate(err, "getting filesystem parameters")
}
allParams[i] = params
}
return allParams, nil
}
|
go
|
{
"resource": ""
}
|
q3531
|
removeFilesystemParams
|
train
|
func removeFilesystemParams(ctx *context, tags []names.FilesystemTag) ([]params.RemoveFilesystemParams, error) {
paramsResults, err := ctx.config.Filesystems.RemoveFilesystemParams(tags)
if err != nil {
return nil, errors.Annotate(err, "getting filesystem params")
}
allParams := make([]params.RemoveFilesystemParams, len(tags))
for i, result := range paramsResults {
if result.Error != nil {
return nil, errors.Annotate(result.Error, "getting filesystem removal parameters")
}
allParams[i] = result.Result
}
return allParams, nil
}
|
go
|
{
"resource": ""
}
|
q3532
|
extractCatalogsForProducts
|
train
|
func (metadata *CloudMetadata) extractCatalogsForProducts(productIds []string) []MetadataCatalog {
result := []MetadataCatalog{}
for _, id := range productIds {
if catalog, ok := metadata.Products[id]; ok {
result = append(result, catalog)
}
}
return result
}
|
go
|
{
"resource": ""
}
|
q3533
|
extractIndexes
|
train
|
func (ind *Indices) extractIndexes(indexIds []string) IndexMetadataSlice {
result := make(IndexMetadataSlice, 0, len(ind.Indexes))
if len(indexIds) == 0 {
// No ids specified so return everything.
for _, metadata := range ind.Indexes {
result = append(result, metadata)
}
} else {
// Return metadata for just the specified ids.
for _, id := range indexIds {
if metadata, ok := ind.Indexes[id]; ok {
result = append(result, metadata)
}
}
}
return result
}
|
go
|
{
"resource": ""
}
|
q3534
|
hasCloud
|
train
|
func (metadata *IndexMetadata) hasCloud(cloud CloudSpec) bool {
for _, metadataCloud := range metadata.Clouds {
if metadataCloud.equals(&cloud) {
return true
}
}
return len(metadata.Clouds) == 0
}
|
go
|
{
"resource": ""
}
|
q3535
|
hasProduct
|
train
|
func (metadata *IndexMetadata) hasProduct(prodIds []string) bool {
for _, pid := range metadata.ProductIds {
if containsString(prodIds, pid) {
return true
}
}
return false
}
|
go
|
{
"resource": ""
}
|
q3536
|
filter
|
train
|
func (entries IndexMetadataSlice) filter(match func(*IndexMetadata) bool) IndexMetadataSlice {
result := IndexMetadataSlice{}
for _, metadata := range entries {
if match(metadata) {
result = append(result, metadata)
}
}
return result
}
|
go
|
{
"resource": ""
}
|
q3537
|
UnsignedIndex
|
train
|
func UnsignedIndex(streamsVersion string, indexFileVersion int) string {
indexFileSuffix := ""
if indexFileVersion > 1 {
indexFileSuffix = fmt.Sprintf("%d", indexFileVersion)
}
return fmt.Sprintf(unsignedIndex, streamsVersion, indexFileSuffix)
}
|
go
|
{
"resource": ""
}
|
q3538
|
getMaybeSignedMetadata
|
train
|
func getMaybeSignedMetadata(source DataSource, params GetMetadataParams, signed bool) ([]interface{}, *ResolveInfo, error) {
makeIndexPath := func(basePath string) string {
pathNoSuffix := fmt.Sprintf(basePath, params.StreamsVersion)
indexPath := pathNoSuffix + UnsignedSuffix
if signed {
indexPath = pathNoSuffix + SignedSuffix
}
return indexPath
}
resolveInfo := &ResolveInfo{}
resolveInfo.Source = source.Description()
resolveInfo.Signed = signed
indexPath := makeIndexPath(defaultIndexPath)
logger.Tracef("looking for data index using path %s", indexPath)
mirrorsPath := fmt.Sprintf(defaultMirrorsPath, params.StreamsVersion)
cons := params.LookupConstraint
indexRef, indexURL, err := fetchIndex(
source, indexPath, mirrorsPath, cons.Params().CloudSpec, signed, params.ValueParams,
)
logger.Tracef("looking for data index using URL %s", indexURL)
if errors.IsNotFound(err) || errors.IsUnauthorized(err) {
legacyIndexPath := makeIndexPath(defaultLegacyIndexPath)
logger.Tracef("%s not accessed, actual error: %v", indexPath, err)
logger.Tracef("%s not accessed, trying legacy index path: %s", indexPath, legacyIndexPath)
indexPath = legacyIndexPath
indexRef, indexURL, err = fetchIndex(
source, indexPath, mirrorsPath, cons.Params().CloudSpec, signed, params.ValueParams,
)
}
resolveInfo.IndexURL = indexURL
if err != nil {
if errors.IsNotFound(err) || errors.IsUnauthorized(err) {
logger.Tracef("cannot load index %q: %v", indexURL, err)
}
return nil, resolveInfo, err
}
logger.Tracef("read metadata index at %q", indexURL)
items, err := indexRef.getLatestMetadataWithFormat(cons, ProductFormat, signed)
if err != nil {
if errors.IsNotFound(err) {
logger.Debugf("skipping index %q because of missing information: %v", indexURL, err)
return nil, resolveInfo, err
}
if _, ok := err.(*noMatchingProductsError); !ok {
logger.Debugf("%v", err)
}
}
if indexRef.Source.Description() == "mirror" {
resolveInfo.MirrorURL = indexRef.Source.(*urlDataSource).baseURL
}
return items, resolveInfo, err
}
|
go
|
{
"resource": ""
}
|
q3539
|
fetchIndex
|
train
|
func fetchIndex(source DataSource, indexPath string, mirrorsPath string, cloudSpec CloudSpec,
signed bool, params ValueParams) (indexRef *IndexReference, indexURL string, _ error) {
indexURL, err := source.URL(indexPath)
if err != nil {
// Some providers return an error if asked for the URL of a non-existent file.
// So the best we can do is use the relative path for the URL when logging messages.
indexURL = indexPath
}
indexRef, err = GetIndexWithFormat(
source, indexPath, IndexFormat, mirrorsPath, signed, cloudSpec, params,
)
return indexRef, indexURL, err
}
|
go
|
{
"resource": ""
}
|
q3540
|
fetchData
|
train
|
func fetchData(source DataSource, path string, requireSigned bool) (data []byte, dataURL string, err error) {
rc, dataURL, err := source.Fetch(path)
if err != nil {
logger.Tracef("fetchData failed for %q: %v", dataURL, err)
return nil, dataURL, errors.NotFoundf("invalid URL %q", dataURL)
}
defer rc.Close()
if requireSigned {
data, err = DecodeCheckSignature(rc, source.PublicSigningKey())
} else {
data, err = ioutil.ReadAll(rc)
}
if err != nil {
return nil, dataURL, errors.Annotatef(err, "cannot read data for source %q at URL %v", source.Description(), dataURL)
}
return data, dataURL, nil
}
|
go
|
{
"resource": ""
}
|
q3541
|
GetIndexWithFormat
|
train
|
func GetIndexWithFormat(source DataSource, indexPath, indexFormat, mirrorsPath string, requireSigned bool,
cloudSpec CloudSpec, params ValueParams) (*IndexReference, error) {
data, url, err := fetchData(source, indexPath, requireSigned)
if err != nil {
if errors.IsNotFound(err) || errors.IsUnauthorized(err) {
return nil, err
}
return nil, fmt.Errorf("cannot read index data, %v", err)
}
var indices Indices
err = json.Unmarshal(data, &indices)
if err != nil {
logger.Errorf("bad JSON index data at URL %q: %v", url, string(data))
return nil, fmt.Errorf("cannot unmarshal JSON index metadata at URL %q: %v", url, err)
}
if indices.Format != indexFormat {
return nil, fmt.Errorf(
"unexpected index file format %q, expected %q at URL %q", indices.Format, indexFormat, url)
}
mirrors, url, err := getMirrorRefs(source, mirrorsPath, requireSigned, params)
if err != nil && !errors.IsNotFound(err) && !errors.IsUnauthorized(err) {
return nil, fmt.Errorf("cannot load mirror metadata at URL %q: %v", url, err)
}
indexRef := &IndexReference{
Source: source,
Indices: indices,
valueParams: params,
}
// Apply any mirror information to the source.
if params.MirrorContentId != "" {
mirrorInfo, err := getMirror(
source, mirrors, params.DataType, params.MirrorContentId, cloudSpec, requireSigned)
if err == nil {
logger.Debugf("using mirrored products path: %s", path.Join(mirrorInfo.MirrorURL, mirrorInfo.Path))
indexRef.Source = NewURLSignedDataSource("mirror", mirrorInfo.MirrorURL, source.PublicSigningKey(), utils.VerifySSLHostnames, source.Priority(), requireSigned)
indexRef.MirroredProductsPath = mirrorInfo.Path
} else {
logger.Tracef("no mirror information available for %s: %v", cloudSpec, err)
}
}
return indexRef, nil
}
|
go
|
{
"resource": ""
}
|
q3542
|
getMirrorRefs
|
train
|
func getMirrorRefs(source DataSource, baseMirrorsPath string, requireSigned bool,
params ValueParams) (MirrorRefs, string, error) {
mirrorsPath := baseMirrorsPath + UnsignedSuffix
if requireSigned {
mirrorsPath = baseMirrorsPath + SignedSuffix
}
var mirrors MirrorRefs
data, url, err := fetchData(source, mirrorsPath, requireSigned)
if err != nil {
if errors.IsNotFound(err) || errors.IsUnauthorized(err) {
return mirrors, url, err
}
return mirrors, url, fmt.Errorf("cannot read mirrors data, %v", err)
}
err = json.Unmarshal(data, &mirrors)
if err != nil {
return mirrors, url, fmt.Errorf("cannot unmarshal JSON mirror metadata at URL %q: %v", url, err)
}
return mirrors, url, err
}
|
go
|
{
"resource": ""
}
|
q3543
|
getMirror
|
train
|
func getMirror(source DataSource, mirrors MirrorRefs, datatype, contentId string, cloudSpec CloudSpec,
requireSigned bool) (*MirrorInfo, error) {
mirrorRef, err := mirrors.getMirrorReference(datatype, contentId, cloudSpec)
if err != nil {
return nil, err
}
mirrorInfo, err := mirrorRef.getMirrorInfo(source, contentId, cloudSpec, MirrorFormat, requireSigned)
if err != nil {
return nil, err
}
if mirrorInfo == nil {
return nil, errors.NotFoundf("mirror metadata for %q and cloud %v", contentId, cloudSpec)
}
return mirrorInfo, nil
}
|
go
|
{
"resource": ""
}
|
q3544
|
GetProductsPath
|
train
|
func (indexRef *IndexReference) GetProductsPath(cons LookupConstraint) (string, error) {
if indexRef.MirroredProductsPath != "" {
return indexRef.MirroredProductsPath, nil
}
prodIds, err := cons.ProductIds()
if err != nil {
return "", err
}
candidates := indexRef.extractIndexes(cons.IndexIds())
// Restrict to the relevant data type entries.
dataTypeMatches := func(metadata *IndexMetadata) bool {
return metadata.DataType == indexRef.valueParams.DataType
}
candidates = candidates.filter(dataTypeMatches)
if len(candidates) == 0 {
// TODO: jam 2015-04-01 This isn't a great error to use,
// because it is generally reserved for file-not-found
// semantics.
// This was formatted as: index file missing "content-download" data not found
// It now formats as: "content-download" data not found
// which at least reads better.
// Shouldn't we be using noMatchingProductsError instead?
return "", errors.NotFoundf("%q data", indexRef.valueParams.DataType)
}
// Restrict by cloud spec, if required.
if cons.Params().CloudSpec != EmptyCloudSpec {
hasRightCloud := func(metadata *IndexMetadata) bool {
return metadata.hasCloud(cons.Params().CloudSpec)
}
candidates = candidates.filter(hasRightCloud)
if len(candidates) == 0 {
return "", errors.NotFoundf("index file has no data for cloud %v", cons.Params().CloudSpec)
}
}
// Restrict by product IDs.
hasProduct := func(metadata *IndexMetadata) bool {
return metadata.hasProduct(prodIds)
}
candidates = candidates.filter(hasProduct)
if len(candidates) == 0 {
return "", newNoMatchingProductsError("index file has no data for product name(s) %q", prodIds)
}
logger.Tracef("candidate matches for products %q are %v", prodIds, candidates)
// Pick arbitrary match.
return candidates[0].ProductsFilePath, nil
}
|
go
|
{
"resource": ""
}
|
q3545
|
extractMirrorRefs
|
train
|
func (mirrorRefs *MirrorRefs) extractMirrorRefs(contentId string) MirrorRefSlice {
for id, refs := range mirrorRefs.Mirrors {
if id == contentId {
return refs
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3546
|
hasCloud
|
train
|
func (mirrorRef *MirrorReference) hasCloud(cloud CloudSpec) bool {
for _, refCloud := range mirrorRef.Clouds {
if refCloud.equals(&cloud) {
return true
}
}
return false
}
|
go
|
{
"resource": ""
}
|
q3547
|
getMirrorReference
|
train
|
func (mirrorRefs *MirrorRefs) getMirrorReference(datatype, contentId string, cloud CloudSpec) (*MirrorReference, error) {
candidates := mirrorRefs.extractMirrorRefs(contentId)
if len(candidates) == 0 {
return nil, errors.NotFoundf("mirror data for %q", contentId)
}
// Restrict by cloud spec and datatype.
hasRightCloud := func(mirrorRef *MirrorReference) bool {
return mirrorRef.hasCloud(cloud) && mirrorRef.DataType == datatype
}
matchingCandidates := candidates.filter(hasRightCloud)
if len(matchingCandidates) == 0 {
// No cloud specific mirrors found so look for a non cloud specific mirror.
for _, candidate := range candidates {
if len(candidate.Clouds) == 0 {
logger.Debugf("using default candidate for content id %q are %v", contentId, candidate)
return &candidate, nil
}
}
return nil, errors.NotFoundf("index file with cloud %v", cloud)
}
logger.Debugf("candidate matches for content id %q are %v", contentId, candidates)
// Pick arbitrary match.
return &matchingCandidates[0], nil
}
|
go
|
{
"resource": ""
}
|
q3548
|
getMirrorInfo
|
train
|
func (mirrorRef *MirrorReference) getMirrorInfo(source DataSource, contentId string, cloud CloudSpec, format string,
requireSigned bool) (*MirrorInfo, error) {
metadata, err := GetMirrorMetadataWithFormat(source, mirrorRef.Path, format, requireSigned)
if err != nil {
return nil, err
}
mirrorInfo, err := metadata.getMirrorInfo(contentId, cloud)
if err != nil {
return nil, err
}
return mirrorInfo, nil
}
|
go
|
{
"resource": ""
}
|
q3549
|
GetMirrorMetadataWithFormat
|
train
|
func GetMirrorMetadataWithFormat(source DataSource, mirrorPath, format string,
requireSigned bool) (*MirrorMetadata, error) {
data, url, err := fetchData(source, mirrorPath, requireSigned)
if err != nil {
if errors.IsNotFound(err) || errors.IsUnauthorized(err) {
return nil, err
}
return nil, fmt.Errorf("cannot read mirror data, %v", err)
}
var mirrors MirrorMetadata
err = json.Unmarshal(data, &mirrors)
if err != nil {
return nil, fmt.Errorf("cannot unmarshal JSON mirror metadata at URL %q: %v", url, err)
}
if mirrors.Format != format {
return nil, fmt.Errorf("unexpected mirror file format %q, expected %q at URL %q", mirrors.Format, format, url)
}
return &mirrors, nil
}
|
go
|
{
"resource": ""
}
|
q3550
|
hasCloud
|
train
|
func (mirrorInfo *MirrorInfo) hasCloud(cloud CloudSpec) bool {
for _, metadataCloud := range mirrorInfo.Clouds {
if metadataCloud.equals(&cloud) {
return true
}
}
return false
}
|
go
|
{
"resource": ""
}
|
q3551
|
getMirrorInfo
|
train
|
func (mirrorMetadata *MirrorMetadata) getMirrorInfo(contentId string, cloud CloudSpec) (*MirrorInfo, error) {
var candidates MirrorInfoSlice
for id, m := range mirrorMetadata.Mirrors {
if id == contentId {
candidates = m
break
}
}
if len(candidates) == 0 {
return nil, errors.NotFoundf("mirror info for %q", contentId)
}
// Restrict by cloud spec.
hasRightCloud := func(mirrorInfo *MirrorInfo) bool {
return mirrorInfo.hasCloud(cloud)
}
candidates = candidates.filter(hasRightCloud)
if len(candidates) == 0 {
return nil, errors.NotFoundf("mirror info with cloud %v", cloud)
}
// Pick arbitrary match.
return &candidates[0], nil
}
|
go
|
{
"resource": ""
}
|
q3552
|
containsString
|
train
|
func containsString(values []string, element string) bool {
for _, value := range values {
if value == element {
return true
}
}
return false
}
|
go
|
{
"resource": ""
}
|
q3553
|
processAliases
|
train
|
func (metadata *CloudMetadata) processAliases(item interface{}) {
for tag := range tags(item) {
aliases, ok := metadata.Aliases[tag]
if !ok {
continue
}
// We have found a set of aliases for one of the fields in the metadata struct.
// Now check to see if the field matches one of the defined aliases.
fields, ok := aliases[fieldByTag(item, tag)]
if !ok {
continue
}
// The alias matches - set all the aliased fields in the struct.
for attr, val := range fields {
setFieldByTag(item, attr, val, true)
}
}
}
|
go
|
{
"resource": ""
}
|
q3554
|
applyAliases
|
train
|
func (metadata *CloudMetadata) applyAliases() {
for _, metadataCatalog := range metadata.Products {
for _, ItemCollection := range metadataCatalog.Items {
for _, item := range ItemCollection.Items {
metadata.processAliases(item)
}
}
}
}
|
go
|
{
"resource": ""
}
|
q3555
|
construct
|
train
|
func (metadata *CloudMetadata) construct(valueType reflect.Type) error {
for _, metadataCatalog := range metadata.Products {
for _, ItemCollection := range metadataCatalog.Items {
if err := ItemCollection.construct(valueType); err != nil {
return err
}
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3556
|
GetCloudMetadataWithFormat
|
train
|
func (indexRef *IndexReference) GetCloudMetadataWithFormat(cons LookupConstraint, format string, requireSigned bool) (*CloudMetadata, error) {
productFilesPath, err := indexRef.GetProductsPath(cons)
if err != nil {
return nil, err
}
logger.Tracef("finding products at path %q", productFilesPath)
data, url, err := fetchData(indexRef.Source, productFilesPath, requireSigned)
if err != nil {
logger.Tracef("can't read product data: %v", err)
return nil, fmt.Errorf("cannot read product data, %v", err)
}
return ParseCloudMetadata(data, format, url, indexRef.valueParams.ValueTemplate)
}
|
go
|
{
"resource": ""
}
|
q3557
|
ParseCloudMetadata
|
train
|
func ParseCloudMetadata(data []byte, format, url string, valueTemplate interface{}) (*CloudMetadata, error) {
var metadata CloudMetadata
err := json.Unmarshal(data, &metadata)
if err != nil {
return nil, fmt.Errorf("cannot unmarshal JSON metadata at URL %q: %v", url, err)
}
if metadata.Format != format {
return nil, fmt.Errorf("unexpected index file format %q, expected %q at URL %q", metadata.Format, format, url)
}
if valueTemplate != nil {
err = metadata.construct(reflect.TypeOf(valueTemplate))
}
if err != nil {
logger.Errorf("bad JSON product data at URL %q: %v", url, string(data))
return nil, fmt.Errorf("cannot unmarshal JSON metadata at URL %q: %v", url, err)
}
metadata.applyAliases()
metadata.denormaliseMetadata()
return &metadata, nil
}
|
go
|
{
"resource": ""
}
|
q3558
|
paramsFromProviderSpaceInfo
|
train
|
func paramsFromProviderSpaceInfo(info *environs.ProviderSpaceInfo) params.RemoteSpace {
result := params.RemoteSpace{
CloudType: info.CloudType,
Name: info.Name,
ProviderId: string(info.ProviderId),
ProviderAttributes: info.ProviderAttributes,
}
for _, subnet := range info.Subnets {
resultSubnet := params.Subnet{
CIDR: subnet.CIDR,
ProviderId: string(subnet.ProviderId),
ProviderNetworkId: string(subnet.ProviderNetworkId),
ProviderSpaceId: string(subnet.SpaceProviderId),
VLANTag: subnet.VLANTag,
Zones: subnet.AvailabilityZones,
}
result.Subnets = append(result.Subnets, resultSubnet)
}
return result
}
|
go
|
{
"resource": ""
}
|
q3559
|
spaceInfoFromState
|
train
|
func spaceInfoFromState(space Space) (*network.SpaceInfo, error) {
result := &network.SpaceInfo{
Name: space.Name(),
ProviderId: space.ProviderId(),
}
subnets, err := space.Subnets()
if err != nil {
return nil, errors.Trace(err)
}
for _, subnet := range subnets {
resultSubnet := network.SubnetInfo{
CIDR: subnet.CIDR(),
ProviderId: subnet.ProviderId(),
ProviderNetworkId: subnet.ProviderNetworkId(),
VLANTag: subnet.VLANTag(),
AvailabilityZones: subnet.AvailabilityZones(),
}
result.Subnets = append(result.Subnets, resultSubnet)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3560
|
findInstanceSpec
|
train
|
func findInstanceSpec(
e *Environ,
ic *instances.InstanceConstraint,
imageMetadata []*imagemetadata.ImageMetadata,
) (*instances.InstanceSpec, error) {
// First construct all available instance types from the supported flavors.
nova := e.nova()
flavors, err := nova.ListFlavorsDetail()
if err != nil {
return nil, err
}
// Not all needed information is available in flavors,
// for e.g. architectures or virtualisation types.
// For these properties, we assume that all instance types support
// all values.
allInstanceTypes := []instances.InstanceType{}
for _, flavor := range flavors {
if !e.flavorFilter.AcceptFlavor(flavor) {
continue
}
instanceType := instances.InstanceType{
Id: flavor.Id,
Name: flavor.Name,
Arches: ic.Arches,
Mem: uint64(flavor.RAM),
CpuCores: uint64(flavor.VCPUs),
RootDisk: uint64(flavor.Disk * 1024),
// tags not currently supported on openstack
}
if ic.Constraints.HasVirtType() {
// Instance Type virtual type depends on the virtual type of the selected image, i.e.
// picking an image with a virt type gives a machine with this virt type.
instanceType.VirtType = ic.Constraints.VirtType
}
allInstanceTypes = append(allInstanceTypes, instanceType)
}
images := instances.ImageMetadataToImages(imageMetadata)
spec, err := instances.FindInstanceSpec(images, ic, allInstanceTypes)
if err != nil {
return nil, err
}
// If instance constraints did not have a virtualisation type,
// but image metadata did, we will have an instance type
// with virtualisation type of an image.
if !ic.Constraints.HasVirtType() && spec.Image.VirtType != "" {
spec.InstanceType.VirtType = &spec.Image.VirtType
}
return spec, nil
}
|
go
|
{
"resource": ""
}
|
q3561
|
NewClient
|
train
|
func NewClient(caller base.APICaller, newWatcher NewWatcherFunc) (*Client, error) {
modelTag, ok := caller.ModelTag()
if !ok {
return nil, errors.New("undertaker client is not appropriate for controller-only API")
}
return &Client{
modelTag: modelTag,
caller: base.NewFacadeCaller(caller, "Undertaker"),
newWatcher: newWatcher,
}, nil
}
|
go
|
{
"resource": ""
}
|
q3562
|
WatchModelResources
|
train
|
func (c *Client) WatchModelResources() (watcher.NotifyWatcher, error) {
var results params.NotifyWatchResults
err := c.entityFacadeCall("WatchModelResources", &results)
if err != nil {
return nil, err
}
if len(results.Results) != 1 {
return nil, errors.Errorf("expected 1 result, got %d", len(results.Results))
}
result := results.Results[0]
if result.Error != nil {
return nil, result.Error
}
w := c.newWatcher(c.caller.RawAPICaller(), result)
return w, nil
}
|
go
|
{
"resource": ""
}
|
q3563
|
NewFirewall
|
train
|
func NewFirewall(cfg environs.ConfigGetter, client FirewallerAPI, c clock.Clock) *Firewall {
return &Firewall{
environ: cfg,
client: client,
clock: c,
}
}
|
go
|
{
"resource": ""
}
|
q3564
|
OpenPorts
|
train
|
func (f Firewall) OpenPorts(ctx context.ProviderCallContext, rules []network.IngressRule) error {
mode := f.environ.Config().FirewallMode()
if mode != config.FwGlobal {
return fmt.Errorf(
"invalid firewall mode %q for opening ports on model",
mode,
)
}
globalGroupName := f.globalGroupName()
seclist, err := f.ensureSecList(f.client.ComposeName(globalGroupName))
if err != nil {
return errors.Trace(err)
}
err = f.ensureSecRules(seclist, rules)
if err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3565
|
ClosePorts
|
train
|
func (f Firewall) ClosePorts(ctx context.ProviderCallContext, rules []network.IngressRule) error {
groupName := f.globalGroupName()
return f.closePortsOnList(ctx, f.client.ComposeName(groupName), rules)
}
|
go
|
{
"resource": ""
}
|
q3566
|
IngressRules
|
train
|
func (f Firewall) IngressRules(ctx context.ProviderCallContext) ([]network.IngressRule, error) {
return f.GlobalIngressRules(ctx)
}
|
go
|
{
"resource": ""
}
|
q3567
|
MachineIngressRules
|
train
|
func (f Firewall) MachineIngressRules(ctx context.ProviderCallContext, machineId string) ([]network.IngressRule, error) {
seclist := f.machineGroupName(machineId)
return f.getIngressRules(ctx, f.client.ComposeName(seclist))
}
|
go
|
{
"resource": ""
}
|
q3568
|
OpenPortsOnInstance
|
train
|
func (f Firewall) OpenPortsOnInstance(ctx context.ProviderCallContext, machineId string, rules []network.IngressRule) error {
machineGroup := f.machineGroupName(machineId)
seclist, err := f.ensureSecList(f.client.ComposeName(machineGroup))
if err != nil {
return errors.Trace(err)
}
err = f.ensureSecRules(seclist, rules)
if err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3569
|
ClosePortsOnInstance
|
train
|
func (f Firewall) ClosePortsOnInstance(ctx context.ProviderCallContext, machineId string, rules []network.IngressRule) error {
// fetch the group name based on the machine id provided
groupName := f.machineGroupName(machineId)
return f.closePortsOnList(ctx, f.client.ComposeName(groupName), rules)
}
|
go
|
{
"resource": ""
}
|
q3570
|
CreateMachineSecLists
|
train
|
func (f Firewall) CreateMachineSecLists(machineId string, apiPort int) ([]string, error) {
defaultSecList, err := f.createDefaultGroupAndRules(apiPort)
if err != nil {
return nil, errors.Trace(err)
}
name := f.machineGroupName(machineId)
resourceName := f.client.ComposeName(name)
secList, err := f.ensureSecList(resourceName)
if err != nil {
return nil, errors.Trace(err)
}
return []string{
defaultSecList.Name,
secList.Name,
}, nil
}
|
go
|
{
"resource": ""
}
|
q3571
|
DeleteMachineSecList
|
train
|
func (f Firewall) DeleteMachineSecList(machineId string) error {
listName := f.machineGroupName(machineId)
globalListName := f.globalGroupName()
err := f.maybeDeleteList(f.client.ComposeName(listName))
if err != nil {
return errors.Trace(err)
}
// check if we can delete the global list as well
err = f.maybeDeleteList(f.client.ComposeName(globalListName))
if err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3572
|
RemoveACLAndRules
|
train
|
func (f Firewall) RemoveACLAndRules(machineId string) error {
groupName := f.machineGroupName(machineId)
resourceName := f.client.ComposeName(groupName)
secRules, err := f.getAllSecurityRules(resourceName)
if err != nil {
return err
}
for _, val := range secRules {
err := f.client.DeleteSecurityRule(val.Name)
if err != nil {
if !api.IsNotFound(err) {
return err
}
}
}
err = f.client.DeleteAcl(resourceName)
if err != nil {
if !api.IsNotFound(err) {
return err
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3573
|
GlobalIngressRules
|
train
|
func (f Firewall) GlobalIngressRules(ctx context.ProviderCallContext) ([]network.IngressRule, error) {
seclist := f.globalGroupName()
return f.getIngressRules(ctx, f.client.ComposeName(seclist))
}
|
go
|
{
"resource": ""
}
|
q3574
|
getDefaultIngressRules
|
train
|
func (f Firewall) getDefaultIngressRules(apiPort int) []network.IngressRule {
return []network.IngressRule{
{
PortRange: corenetwork.PortRange{
FromPort: 22,
ToPort: 22,
Protocol: "tcp",
},
SourceCIDRs: []string{
"0.0.0.0/0",
},
},
{
PortRange: corenetwork.PortRange{
FromPort: 3389,
ToPort: 3389,
Protocol: "tcp",
},
SourceCIDRs: []string{
"0.0.0.0/0",
},
},
{
PortRange: corenetwork.PortRange{
FromPort: apiPort,
ToPort: apiPort,
Protocol: "tcp",
},
SourceCIDRs: []string{
"0.0.0.0/0",
},
},
{
PortRange: corenetwork.PortRange{
FromPort: controller.DefaultStatePort,
ToPort: controller.DefaultStatePort,
Protocol: "tcp",
},
SourceCIDRs: []string{
"0.0.0.0/0",
},
},
}
}
|
go
|
{
"resource": ""
}
|
q3575
|
closePortsOnList
|
train
|
func (f Firewall) closePortsOnList(ctx context.ProviderCallContext, list string, rules []network.IngressRule) error {
// get all security rules based on the dst_list=list
secrules, err := f.getSecRules(list)
if err != nil {
return errors.Trace(err)
}
// converts all security rules into a map of ingress rules
mapping, err := f.secRuleToIngresRule(secrules...)
if err != nil {
return errors.Trace(err)
}
//TODO (gsamfira): optimize this
for name, rule := range mapping {
sort.Strings(rule.SourceCIDRs)
for _, ingressRule := range rules {
sort.Strings(ingressRule.SourceCIDRs)
if reflect.DeepEqual(rule, ingressRule) {
err := f.client.DeleteSecRule(name)
if err != nil {
return errors.Trace(err)
}
}
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3576
|
deleteAllSecRulesOnList
|
train
|
func (f Firewall) deleteAllSecRulesOnList(list string) error {
// get all security rules associated with this list
secrules, err := f.getSecRules(list)
if err != nil {
return errors.Trace(err)
}
// delete everything
for _, rule := range secrules {
err := f.client.DeleteSecRule(rule.Name)
if err != nil {
if api.IsNotFound(err) {
continue
}
return errors.Trace(err)
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3577
|
maybeDeleteList
|
train
|
func (f *Firewall) maybeDeleteList(list string) error {
filter := []api.Filter{
{
Arg: "seclist",
Value: list,
},
}
iter := 0
found := true
var assoc response.AllSecAssociations
for {
if iter >= 10 {
break
}
assoc, err := f.client.AllSecAssociations(filter)
if err != nil {
return errors.Trace(err)
}
if len(assoc.Result) > 0 {
<-f.clock.After(1 * time.Second)
iter++
continue
}
found = false
break
}
if found {
logger.Warningf(
"seclist %s is still has some associations to instance(s): %v. Will not delete",
list, assoc.Result,
)
return nil
}
err := f.deleteAllSecRulesOnList(list)
if err != nil {
return errors.Trace(err)
}
logger.Tracef("deleting seclist %v", list)
err = f.client.DeleteSecList(list)
if err != nil {
if api.IsNotFound(err) {
return nil
}
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3578
|
getAllApplications
|
train
|
func (f Firewall) getAllApplications() ([]response.SecApplication, error) {
// get all user defined sec applications
applications, err := f.client.AllSecApplications(nil)
if err != nil {
return nil, errors.Trace(err)
}
// get also default ones defined in the provider
defaultApps, err := f.client.DefaultSecApplications(nil)
if err != nil {
return nil, errors.Trace(err)
}
allApps := []response.SecApplication{}
for _, val := range applications.Result {
if val.PortProtocolPair() == "" {
// (gsamfira):this should not really happen,
// but I get paranoid when I run out of coffee
continue
}
allApps = append(allApps, val)
}
for _, val := range defaultApps.Result {
if val.PortProtocolPair() == "" {
continue
}
allApps = append(allApps, val)
}
return allApps, nil
}
|
go
|
{
"resource": ""
}
|
q3579
|
getAllApplicationsAsMap
|
train
|
func (f Firewall) getAllApplicationsAsMap() (map[string]response.SecApplication, error) {
// get all defined protocols
// from the current identity and default ones
apps, err := f.getAllApplications()
if err != nil {
return nil, errors.Trace(err)
}
// copy all of them into this map
allApps := map[string]response.SecApplication{}
for _, val := range apps {
if val.String() == "" {
continue
}
if _, ok := allApps[val.String()]; !ok {
allApps[val.String()] = val
}
}
return allApps, nil
}
|
go
|
{
"resource": ""
}
|
q3580
|
convertToSecRules
|
train
|
func (f Firewall) convertToSecRules(seclist response.SecList, rules []network.IngressRule) ([]api.SecRuleParams, error) {
applications, err := f.getAllApplications()
if err != nil {
return nil, errors.Trace(err)
}
iplists, err := f.getAllIPLists()
if err != nil {
return nil, errors.Trace(err)
}
ret := make([]api.SecRuleParams, 0, len(rules))
// for every rule we need to ensure that the there is a relationship
// between security applications and security IP lists
// and from every one of them create a slice of security rule parameters
for _, val := range rules {
app, err := f.ensureApplication(val.PortRange, &applications)
if err != nil {
return nil, errors.Trace(err)
}
ipList, err := f.ensureSecIpList(val.SourceCIDRs, &iplists)
if err != nil {
return nil, errors.Trace(err)
}
uuid, err := utils.NewUUID()
if err != nil {
return nil, errors.Trace(err)
}
name := f.newResourceName(uuid.String())
resourceName := f.client.ComposeName(name)
dstList := fmt.Sprintf("seclist:%s", seclist.Name)
srcList := fmt.Sprintf("seciplist:%s", ipList)
// create the new security rule parameters
rule := api.SecRuleParams{
Action: common.SecRulePermit,
Application: app,
Description: "Juju created security rule",
Disabled: false,
Dst_list: dstList,
Name: resourceName,
Src_list: srcList,
}
// append the new parameters rule
ret = append(ret, rule)
}
return ret, nil
}
|
go
|
{
"resource": ""
}
|
q3581
|
convertApplicationToPortRange
|
train
|
func (f Firewall) convertApplicationToPortRange(app response.SecApplication) corenetwork.PortRange {
appCopy := app
if appCopy.Value2 == -1 {
appCopy.Value2 = appCopy.Value1
}
return corenetwork.PortRange{
FromPort: appCopy.Value1,
ToPort: appCopy.Value2,
Protocol: string(appCopy.Protocol),
}
}
|
go
|
{
"resource": ""
}
|
q3582
|
convertFromSecRules
|
train
|
func (f Firewall) convertFromSecRules(rules ...response.SecRule) (map[string][]network.IngressRule, error) {
applications, err := f.getAllApplicationsAsMap()
if err != nil {
return nil, errors.Trace(err)
}
iplists, err := f.getAllIPListsAsMap()
if err != nil {
return nil, errors.Trace(err)
}
ret := map[string][]network.IngressRule{}
for _, val := range rules {
app := val.Application
srcList := strings.TrimPrefix(val.Src_list, "seciplist:")
dstList := strings.TrimPrefix(val.Dst_list, "seclist:")
portRange := f.convertApplicationToPortRange(applications[app])
if _, ok := ret[dstList]; !ok {
ret[dstList] = []network.IngressRule{
{
PortRange: portRange,
SourceCIDRs: iplists[srcList].Secipentries,
},
}
} else {
toAdd := network.IngressRule{
PortRange: portRange,
SourceCIDRs: iplists[srcList].Secipentries,
}
ret[dstList] = append(ret[dstList], toAdd)
}
}
return ret, nil
}
|
go
|
{
"resource": ""
}
|
q3583
|
globalGroupName
|
train
|
func (f Firewall) globalGroupName() string {
return fmt.Sprintf("juju-%s-global", f.environ.Config().UUID())
}
|
go
|
{
"resource": ""
}
|
q3584
|
machineGroupName
|
train
|
func (f Firewall) machineGroupName(machineId string) string {
return fmt.Sprintf("juju-%s-%s", f.environ.Config().UUID(), machineId)
}
|
go
|
{
"resource": ""
}
|
q3585
|
newResourceName
|
train
|
func (f Firewall) newResourceName(appName string) string {
return fmt.Sprintf("juju-%s-%s", f.environ.Config().UUID(), appName)
}
|
go
|
{
"resource": ""
}
|
q3586
|
getAllSecurityRules
|
train
|
func (f Firewall) getAllSecurityRules(aclName string) ([]response.SecurityRule, error) {
rules, err := f.client.AllSecurityRules(nil)
if err != nil {
return nil, err
}
if aclName == "" {
return rules.Result, nil
}
var ret []response.SecurityRule
for _, val := range rules.Result {
if val.Acl == aclName {
ret = append(ret, val)
}
}
return ret, nil
}
|
go
|
{
"resource": ""
}
|
q3587
|
getSecRules
|
train
|
func (f Firewall) getSecRules(seclist string) ([]response.SecRule, error) {
// we only care about ingress rules
name := fmt.Sprintf("seclist:%s", seclist)
rulesFilter := []api.Filter{
{
Arg: "dst_list",
Value: name,
},
}
rules, err := f.client.AllSecRules(rulesFilter)
if err != nil {
return nil, errors.Trace(err)
}
// gsamfira: the oracle compute API does not allow filtering by action
ret := []response.SecRule{}
for _, val := range rules.Result {
// gsamfira: We set a default policy of DENY. No use in worrying about
// DENY rules (if by any chance someone add one manually for some reason)
if val.Action != common.SecRulePermit {
continue
}
// We only care about rules that have a destination set
// to a security list. Those lists get attached to VMs
// NOTE: someone decided, when writing the oracle API
// that some fields should be bool, some should be string.
// never mind they both are boolean values...but hey.
// I swear...some people like to watch the world burn
if val.Dst_is_ip == "true" {
continue
}
// We only care about rules that have an IP list as source
if val.Src_is_ip == "false" {
continue
}
ret = append(ret, val)
}
return ret, nil
}
|
go
|
{
"resource": ""
}
|
q3588
|
ensureSecRules
|
train
|
func (f Firewall) ensureSecRules(seclist response.SecList, rules []network.IngressRule) error {
// get all security rules associated with the seclist
secRules, err := f.getSecRules(seclist.Name)
if err != nil {
return errors.Trace(err)
}
logger.Tracef("list %v has sec rules: %v", seclist.Name, secRules)
converted, err := f.convertFromSecRules(secRules...)
if err != nil {
return errors.Trace(err)
}
logger.Tracef("converted rules are: %v", converted)
asIngressRules := converted[seclist.Name]
missing := []network.IngressRule{}
// search through all rules and find the missing ones
for _, toAdd := range rules {
found := false
for _, exists := range asIngressRules {
sort.Strings(toAdd.SourceCIDRs)
sort.Strings(exists.SourceCIDRs)
logger.Tracef("comparing %v to %v", toAdd.SourceCIDRs, exists.SourceCIDRs)
if reflect.DeepEqual(toAdd, exists) {
found = true
break
}
}
if found {
continue
}
missing = append(missing, toAdd)
}
if len(missing) == 0 {
return nil
}
logger.Tracef("Found missing rules: %v", missing)
// convert the missing rules back to sec rules
asSecRule, err := f.convertToSecRules(seclist, missing)
if err != nil {
return errors.Trace(err)
}
for _, val := range asSecRule {
_, err = f.client.CreateSecRule(val)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3589
|
ensureSecList
|
train
|
func (f Firewall) ensureSecList(name string) (response.SecList, error) {
logger.Infof("Fetching details for list: %s", name)
// check if the security list is already there
details, err := f.client.SecListDetails(name)
if err != nil {
logger.Infof("Got error fetching details for %s: %v", name, err)
if api.IsNotFound(err) {
logger.Infof("Creating new seclist: %s", name)
details, err := f.client.CreateSecList(
"Juju created security list",
name,
common.SecRulePermit,
common.SecRuleDeny)
if err != nil {
return response.SecList{}, err
}
return details, nil
}
return response.SecList{}, err
}
return details, nil
}
|
go
|
{
"resource": ""
}
|
q3590
|
getAllIPListsAsMap
|
train
|
func (f Firewall) getAllIPListsAsMap() (map[string]response.SecIpList, error) {
allIps, err := f.getAllIPLists()
if err != nil {
return nil, errors.Trace(err)
}
allIpLists := map[string]response.SecIpList{}
for _, val := range allIps {
allIpLists[val.Name] = val
}
return allIpLists, nil
}
|
go
|
{
"resource": ""
}
|
q3591
|
ensureSecIpList
|
train
|
func (f Firewall) ensureSecIpList(cidr []string, cache *[]response.SecIpList) (string, error) {
sort.Strings(cidr)
for _, val := range *cache {
sort.Strings(val.Secipentries)
if reflect.DeepEqual(val.Secipentries, cidr) {
return val.Name, nil
}
}
uuid, err := utils.NewUUID()
if err != nil {
return "", errors.Trace(err)
}
name := f.newResourceName(uuid.String())
resource := f.client.ComposeName(name)
secList, err := f.client.CreateSecIpList(
"Juju created security IP list",
resource, cidr)
if err != nil {
return "", errors.Trace(err)
}
*cache = append(*cache, secList)
return secList.Name, nil
}
|
go
|
{
"resource": ""
}
|
q3592
|
sendError
|
train
|
func (h *backupHandler) sendError(w http.ResponseWriter, err error) {
err, status := common.ServerErrorAndStatus(err)
if err := sendStatusAndJSON(w, status, err); err != nil {
logger.Errorf("%v", err)
}
}
|
go
|
{
"resource": ""
}
|
q3593
|
Get
|
train
|
func (rs *Resources) Get(id string) facade.Resource {
rs.mu.Lock()
defer rs.mu.Unlock()
return rs.resources[id]
}
|
go
|
{
"resource": ""
}
|
q3594
|
Register
|
train
|
func (rs *Resources) Register(r facade.Resource) string {
rs.mu.Lock()
defer rs.mu.Unlock()
rs.maxId++
id := strconv.FormatUint(rs.maxId, 10)
rs.resources[id] = r
rs.stack = append(rs.stack, id)
logger.Tracef("registered unnamed resource: %s", id)
return id
}
|
go
|
{
"resource": ""
}
|
q3595
|
Stop
|
train
|
func (rs *Resources) Stop(id string) error {
// We don't hold the mutex while calling Stop, because
// that might take a while and we don't want to
// stop all other resource manipulation while we do so.
// If resources.Stop is called concurrently, we'll get
// two concurrent calls to Stop, but that should fit
// well with the way we invariably implement Stop.
logger.Tracef("stopping resource: %s", id)
r := rs.Get(id)
if r == nil {
return nil
}
err := r.Stop()
rs.mu.Lock()
defer rs.mu.Unlock()
delete(rs.resources, id)
for pos := 0; pos < len(rs.stack); pos++ {
if rs.stack[pos] == id {
rs.stack = append(rs.stack[0:pos], rs.stack[pos+1:]...)
break
}
}
return err
}
|
go
|
{
"resource": ""
}
|
q3596
|
StopAll
|
train
|
func (rs *Resources) StopAll() {
rs.mu.Lock()
defer rs.mu.Unlock()
for i := len(rs.stack); i > 0; i-- {
id := rs.stack[i-1]
r := rs.resources[id]
logger.Tracef("stopping resource: %s", id)
if err := r.Stop(); err != nil {
logger.Errorf("error stopping %T resource: %v", r, err)
}
}
rs.resources = make(map[string]facade.Resource)
rs.stack = nil
}
|
go
|
{
"resource": ""
}
|
q3597
|
Count
|
train
|
func (rs *Resources) Count() int {
rs.mu.Lock()
defer rs.mu.Unlock()
return len(rs.resources)
}
|
go
|
{
"resource": ""
}
|
q3598
|
ProviderId
|
train
|
func (s *Space) ProviderId() network.Id {
return network.Id(s.doc.ProviderId)
}
|
go
|
{
"resource": ""
}
|
q3599
|
Subnets
|
train
|
func (s *Space) Subnets() (results []*Subnet, err error) {
defer errors.DeferredAnnotatef(&err, "cannot fetch subnets")
name := s.Name()
subnetsCollection, closer := s.st.db().GetCollection(subnetsC)
defer closer()
var doc subnetDoc
// We ignore space-name field for FAN subnets...
iter := subnetsCollection.Find(bson.D{{"space-name", name}, bson.DocElem{"fan-local-underlay", bson.D{{"$exists", false}}}}).Iter()
defer iter.Close()
for iter.Next(&doc) {
subnet := &Subnet{s.st, doc, name}
results = append(results, subnet)
// ...and then add them explicitly as descendants of underlay network.
childIter := subnetsCollection.Find(bson.D{{"fan-local-underlay", doc.CIDR}}).Iter()
for childIter.Next(&doc) {
subnet := &Subnet{s.st, doc, name}
results = append(results, subnet)
}
}
if err := iter.Close(); err != nil {
return nil, err
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.