_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q4400
Manifold
train
func Manifold(config ManifoldConfig) dependency.Manifold { return dependency.Manifold{ Inputs: []string{ config.ClockName, config.AgentName, config.AuthenticatorName, config.HubName, config.MuxName, }, Start: config.start, Output: transportOutput, } }
go
{ "resource": "" }
q4401
APIRequestDuration
train
func (mr *MockMetricsCollectorMockRecorder) APIRequestDuration() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "APIRequestDuration", reflect.TypeOf((*MockMetricsCollector)(nil).APIRequestDuration)) }
go
{ "resource": "" }
q4402
DeprecatedAPIRequestDuration
train
func (m *MockMetricsCollector) DeprecatedAPIRequestDuration() metricobserver.SummaryVec { ret := m.ctrl.Call(m, "DeprecatedAPIRequestDuration") ret0, _ := ret[0].(metricobserver.SummaryVec) return ret0 }
go
{ "resource": "" }
q4403
DeprecatedAPIRequestsTotal
train
func (m *MockMetricsCollector) DeprecatedAPIRequestsTotal() metricobserver.CounterVec { ret := m.ctrl.Call(m, "DeprecatedAPIRequestsTotal") ret0, _ := ret[0].(metricobserver.CounterVec) return ret0 }
go
{ "resource": "" }
q4404
NewMockCounterVec
train
func NewMockCounterVec(ctrl *gomock.Controller) *MockCounterVec { mock := &MockCounterVec{ctrl: ctrl} mock.recorder = &MockCounterVecMockRecorder{mock} return mock }
go
{ "resource": "" }
q4405
NewMockSummaryVec
train
func NewMockSummaryVec(ctrl *gomock.Controller) *MockSummaryVec { mock := &MockSummaryVec{ctrl: ctrl} mock.recorder = &MockSummaryVecMockRecorder{mock} return mock }
go
{ "resource": "" }
q4406
processPost
train
func (h *resourcesMigrationUploadHandler) processPost(r *http.Request, st *state.State) (resource.Resource, error) { var empty resource.Resource query := r.URL.Query() target, isUnit, err := getUploadTarget(query) if err != nil { return empty, errors.Trace(err) } userID := query.Get("user") // Is allowed to be blank res, err := queryToResource(query) if err != nil { return empty, errors.Trace(err) } rSt, err := st.Resources() if err != nil { return empty, errors.Trace(err) } reader := r.Body // Don't associate content with a placeholder resource. if isPlaceholder(query) { reader = nil } outRes, err := setResource(isUnit, target, userID, res, reader, rSt) if err != nil { return empty, errors.Annotate(err, "resource upload failed") } return outRes, nil }
go
{ "resource": "" }
q4407
DialInfo
train
func DialInfo(info Info, opts DialOpts) (*mgo.DialInfo, error) { if len(info.Addrs) == 0 { return nil, stderrors.New("no mongo addresses") } var tlsConfig *tls.Config if !info.DisableTLS { if len(info.CACert) == 0 { return nil, stderrors.New("missing CA certificate") } xcert, err := cert.ParseCert(info.CACert) if err != nil { return nil, fmt.Errorf("cannot parse CA certificate: %v", err) } pool := x509.NewCertPool() pool.AddCert(xcert) tlsConfig = utils.SecureTLSConfig() tlsConfig.RootCAs = pool tlsConfig.ServerName = "juju-mongodb" // TODO(natefinch): revisit this when are full-time on mongo 3. // We have to add non-ECDHE suites because mongo doesn't support ECDHE. moreSuites := []uint16{ tls.TLS_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_AES_256_GCM_SHA384, } tlsConfig.CipherSuites = append(tlsConfig.CipherSuites, moreSuites...) } dial := func(server *mgo.ServerAddr) (_ net.Conn, err error) { if opts.PostDialServer != nil { before := time.Now() defer func() { taken := time.Now().Sub(before) opts.PostDialServer(server.String(), taken, err) }() } addr := server.TCPAddr().String() c, err := net.DialTimeout("tcp", addr, opts.Timeout) if err != nil { logger.Warningf("mongodb connection failed, will retry: %v", err) return nil, err } if tlsConfig != nil { cc := tls.Client(c, tlsConfig) if err := cc.Handshake(); err != nil { logger.Warningf("TLS handshake failed: %v", err) if err := c.Close(); err != nil { logger.Warningf("failed to close connection: %v", err) } return nil, err } c = cc } logger.Debugf("dialled mongodb server at %q", addr) return c, nil } return &mgo.DialInfo{ Addrs: info.Addrs, Timeout: opts.Timeout, DialServer: dial, Direct: opts.Direct, PoolLimit: opts.PoolLimit, }, nil }
go
{ "resource": "" }
q4408
DialWithInfo
train
func DialWithInfo(info MongoInfo, opts DialOpts) (*mgo.Session, error) { if opts.Timeout == 0 { return nil, errors.New("a non-zero Timeout must be specified") } dialInfo, err := DialInfo(info.Info, opts) if err != nil { return nil, err } session, err := mgo.DialWithInfo(dialInfo) if err != nil { return nil, err } if opts.SocketTimeout == 0 { opts.SocketTimeout = SocketTimeout } session.SetSocketTimeout(opts.SocketTimeout) if opts.PostDial != nil { if err := opts.PostDial(session); err != nil { session.Close() return nil, errors.Annotate(err, "PostDial failed") } } if info.Tag != nil || info.Password != "" { user := AdminUser if info.Tag != nil { user = info.Tag.String() } if err := Login(session, user, info.Password); err != nil { session.Close() return nil, errors.Trace(err) } } return session, nil }
go
{ "resource": "" }
q4409
Login
train
func Login(session *mgo.Session, user, password string) error { admin := session.DB("admin") if err := admin.Login(user, password); err != nil { return MaybeUnauthorizedf(err, "cannot log in to admin database as %q", user) } return nil }
go
{ "resource": "" }
q4410
MaybeUnauthorizedf
train
func MaybeUnauthorizedf(err error, message string, args ...interface{}) error { if isUnauthorized(errors.Cause(err)) { err = errors.Unauthorizedf("unauthorized mongo access: %s", err) } return errors.Annotatef(err, message, args...) }
go
{ "resource": "" }
q4411
VolumeSource
train
func (p *StorageProvider) VolumeSource(providerConfig *storage.Config) (storage.VolumeSource, error) { p.MethodCall(p, "VolumeSource", providerConfig) if p.VolumeSourceFunc != nil { return p.VolumeSourceFunc(providerConfig) } return nil, errors.NotSupportedf("volumes") }
go
{ "resource": "" }
q4412
FilesystemSource
train
func (p *StorageProvider) FilesystemSource(providerConfig *storage.Config) (storage.FilesystemSource, error) { p.MethodCall(p, "FilesystemSource", providerConfig) if p.FilesystemSourceFunc != nil { return p.FilesystemSourceFunc(providerConfig) } return nil, errors.NotSupportedf("filesystems") }
go
{ "resource": "" }
q4413
ValidateConfig
train
func (p *StorageProvider) ValidateConfig(providerConfig *storage.Config) error { p.MethodCall(p, "ValidateConfig", providerConfig) if p.ValidateConfigFunc != nil { return p.ValidateConfigFunc(providerConfig) } return nil }
go
{ "resource": "" }
q4414
Supports
train
func (p *StorageProvider) Supports(kind storage.StorageKind) bool { p.MethodCall(p, "Supports", kind) if p.SupportsFunc != nil { return p.SupportsFunc(kind) } return true }
go
{ "resource": "" }
q4415
Scope
train
func (p *StorageProvider) Scope() storage.Scope { p.MethodCall(p, "Scope") return p.StorageScope }
go
{ "resource": "" }
q4416
DefaultPools
train
func (p *StorageProvider) DefaultPools() []*storage.Config { p.MethodCall(p, "DefaultPools") return p.DefaultPools_ }
go
{ "resource": "" }
q4417
NewMockRbacV1Interface
train
func NewMockRbacV1Interface(ctrl *gomock.Controller) *MockRbacV1Interface { mock := &MockRbacV1Interface{ctrl: ctrl} mock.recorder = &MockRbacV1InterfaceMockRecorder{mock} return mock }
go
{ "resource": "" }
q4418
ClusterRoleBindings
train
func (m *MockRbacV1Interface) ClusterRoleBindings() v11.ClusterRoleBindingInterface { ret := m.ctrl.Call(m, "ClusterRoleBindings") ret0, _ := ret[0].(v11.ClusterRoleBindingInterface) return ret0 }
go
{ "resource": "" }
q4419
ClusterRoles
train
func (m *MockRbacV1Interface) ClusterRoles() v11.ClusterRoleInterface { ret := m.ctrl.Call(m, "ClusterRoles") ret0, _ := ret[0].(v11.ClusterRoleInterface) return ret0 }
go
{ "resource": "" }
q4420
RoleBindings
train
func (m *MockRbacV1Interface) RoleBindings(arg0 string) v11.RoleBindingInterface { ret := m.ctrl.Call(m, "RoleBindings", arg0) ret0, _ := ret[0].(v11.RoleBindingInterface) return ret0 }
go
{ "resource": "" }
q4421
RoleBindings
train
func (mr *MockRbacV1InterfaceMockRecorder) RoleBindings(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RoleBindings", reflect.TypeOf((*MockRbacV1Interface)(nil).RoleBindings), arg0) }
go
{ "resource": "" }
q4422
Roles
train
func (m *MockRbacV1Interface) Roles(arg0 string) v11.RoleInterface { ret := m.ctrl.Call(m, "Roles", arg0) ret0, _ := ret[0].(v11.RoleInterface) return ret0 }
go
{ "resource": "" }
q4423
NewMockClusterRoleBindingInterface
train
func NewMockClusterRoleBindingInterface(ctrl *gomock.Controller) *MockClusterRoleBindingInterface { mock := &MockClusterRoleBindingInterface{ctrl: ctrl} mock.recorder = &MockClusterRoleBindingInterfaceMockRecorder{mock} return mock }
go
{ "resource": "" }
q4424
NewMockClusterRoleInterface
train
func NewMockClusterRoleInterface(ctrl *gomock.Controller) *MockClusterRoleInterface { mock := &MockClusterRoleInterface{ctrl: ctrl} mock.recorder = &MockClusterRoleInterfaceMockRecorder{mock} return mock }
go
{ "resource": "" }
q4425
NewMockDatabase
train
func NewMockDatabase(ctrl *gomock.Controller) *MockDatabase { mock := &MockDatabase{ctrl: ctrl} mock.recorder = &MockDatabaseMockRecorder{mock} return mock }
go
{ "resource": "" }
q4426
Copy
train
func (m *MockDatabase) Copy() (state.Database, state.SessionCloser) { ret := m.ctrl.Call(m, "Copy") ret0, _ := ret[0].(state.Database) ret1, _ := ret[1].(state.SessionCloser) return ret0, ret1 }
go
{ "resource": "" }
q4427
CopyForModel
train
func (m *MockDatabase) CopyForModel(arg0 string) (state.Database, state.SessionCloser) { ret := m.ctrl.Call(m, "CopyForModel", arg0) ret0, _ := ret[0].(state.Database) ret1, _ := ret[1].(state.SessionCloser) return ret0, ret1 }
go
{ "resource": "" }
q4428
GetCollectionFor
train
func (m *MockDatabase) GetCollectionFor(arg0, arg1 string) (mongo.Collection, state.SessionCloser) { ret := m.ctrl.Call(m, "GetCollectionFor", arg0, arg1) ret0, _ := ret[0].(mongo.Collection) ret1, _ := ret[1].(state.SessionCloser) return ret0, ret1 }
go
{ "resource": "" }
q4429
GetRawCollection
train
func (m *MockDatabase) GetRawCollection(arg0 string) (*mgo_v2.Collection, state.SessionCloser) { ret := m.ctrl.Call(m, "GetRawCollection", arg0) ret0, _ := ret[0].(*mgo_v2.Collection) ret1, _ := ret[1].(state.SessionCloser) return ret0, ret1 }
go
{ "resource": "" }
q4430
RunRawTransaction
train
func (mr *MockDatabaseMockRecorder) RunRawTransaction(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunRawTransaction", reflect.TypeOf((*MockDatabase)(nil).RunRawTransaction), arg0) }
go
{ "resource": "" }
q4431
RunTransactionFor
train
func (m *MockDatabase) RunTransactionFor(arg0 string, arg1 []txn0.Op) error { ret := m.ctrl.Call(m, "RunTransactionFor", arg0, arg1) ret0, _ := ret[0].(error) return ret0 }
go
{ "resource": "" }
q4432
Schema
train
func (m *MockDatabase) Schema() state.CollectionSchema { ret := m.ctrl.Call(m, "Schema") ret0, _ := ret[0].(state.CollectionSchema) return ret0 }
go
{ "resource": "" }
q4433
TransactionRunner
train
func (m *MockDatabase) TransactionRunner() (txn.Runner, state.SessionCloser) { ret := m.ctrl.Call(m, "TransactionRunner") ret0, _ := ret[0].(txn.Runner) ret1, _ := ret[1].(state.SessionCloser) return ret0, ret1 }
go
{ "resource": "" }
q4434
TransactionRunner
train
func (mr *MockDatabaseMockRecorder) TransactionRunner() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionRunner", reflect.TypeOf((*MockDatabase)(nil).TransactionRunner)) }
go
{ "resource": "" }
q4435
Encode
train
func Encode(r io.Reader, armoredPrivateKey, passphrase string) ([]byte, error) { keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewBufferString(armoredPrivateKey)) if err != nil { return nil, err } privateKey := keyring[0].PrivateKey if privateKey.Encrypted { err = privateKey.Decrypt([]byte(passphrase)) if err != nil { return nil, err } } var buf bytes.Buffer plaintext, err := clearsign.Encode(&buf, privateKey, nil) if err != nil { return nil, err } metadata, err := ioutil.ReadAll(r) if err != nil { return nil, err } dataToSign := metadata if dataToSign[0] == '\n' { dataToSign = dataToSign[1:] } _, err = plaintext.Write(dataToSign) if err != nil { return nil, err } err = plaintext.Close() if err != nil { return nil, err } return buf.Bytes(), nil }
go
{ "resource": "" }
q4436
BridgeEthernetById
train
func (np *Netplan) BridgeEthernetById(deviceId string, bridgeName string) (err error) { ethernet, ok := np.Network.Ethernets[deviceId] if !ok { return errors.NotFoundf("ethernet device with id %q for bridge %q", deviceId, bridgeName) } shouldCreate, err := np.shouldCreateBridge(deviceId, bridgeName) if !shouldCreate { // err may be nil, but we shouldn't continue creating return errors.Trace(err) } np.createBridgeFromInterface(bridgeName, deviceId, &ethernet.Interface) np.Network.Ethernets[deviceId] = ethernet return nil }
go
{ "resource": "" }
q4437
BridgeVLANById
train
func (np *Netplan) BridgeVLANById(deviceId string, bridgeName string) (err error) { vlan, ok := np.Network.VLANs[deviceId] if !ok { return errors.NotFoundf("VLAN device with id %q for bridge %q", deviceId, bridgeName) } shouldCreate, err := np.shouldCreateBridge(deviceId, bridgeName) if !shouldCreate { // err may be nil, but we shouldn't continue creating return errors.Trace(err) } np.createBridgeFromInterface(bridgeName, deviceId, &vlan.Interface) np.Network.VLANs[deviceId] = vlan return nil }
go
{ "resource": "" }
q4438
BridgeBondById
train
func (np *Netplan) BridgeBondById(deviceId string, bridgeName string) (err error) { bond, ok := np.Network.Bonds[deviceId] if !ok { return errors.NotFoundf("bond device with id %q for bridge %q", deviceId, bridgeName) } shouldCreate, err := np.shouldCreateBridge(deviceId, bridgeName) if !shouldCreate { // err may be nil, but we shouldn't continue creating return errors.Trace(err) } np.createBridgeFromInterface(bridgeName, deviceId, &bond.Interface) np.Network.Bonds[deviceId] = bond return nil }
go
{ "resource": "" }
q4439
shouldCreateBridge
train
func (np *Netplan) shouldCreateBridge(deviceId string, bridgeName string) (bool, error) { for bName, bridge := range np.Network.Bridges { for _, i := range bridge.Interfaces { if i == deviceId { // The device is already properly bridged, nothing to do if bridgeName == bName { return false, nil } else { return false, errors.AlreadyExistsf("cannot create bridge %q, device %q in bridge %q", bridgeName, deviceId, bName) } } } if bridgeName == bName { return false, errors.AlreadyExistsf( "cannot create bridge %q with device %q - bridge %q w/ interfaces %q", bridgeName, deviceId, bridgeName, strings.Join(bridge.Interfaces, ", ")) } } return true, nil }
go
{ "resource": "" }
q4440
createBridgeFromInterface
train
func (np *Netplan) createBridgeFromInterface(bridgeName, deviceId string, intf *Interface) { if np.Network.Bridges == nil { np.Network.Bridges = make(map[string]Bridge) } np.Network.Bridges[bridgeName] = Bridge{ Interfaces: []string{deviceId}, Interface: *intf, } *intf = Interface{MTU: intf.MTU} }
go
{ "resource": "" }
q4441
ReadDirectory
train
func ReadDirectory(dirPath string) (np Netplan, err error) { fileInfos, err := ioutil.ReadDir(dirPath) if err != nil { return np, err } np.sourceDirectory = dirPath sortedFileInfos := sortableFileInfos(fileInfos) sort.Sort(sortedFileInfos) for _, fileInfo := range sortedFileInfos { if !fileInfo.IsDir() && strings.HasSuffix(fileInfo.Name(), ".yaml") { np.sourceFiles = append(np.sourceFiles, fileInfo.Name()) } } for _, fileName := range np.sourceFiles { err := np.readYamlFile(path.Join(np.sourceDirectory, fileName)) if err != nil { return np, err } } return np, nil }
go
{ "resource": "" }
q4442
Write
train
func (np *Netplan) Write(inPath string) (filePath string, err error) { if np.writtenFile != "" { return "", errors.Errorf("Cannot write the same netplan twice") } if inPath == "" { i := 99 for ; i > 0; i-- { filePath = path.Join(np.sourceDirectory, fmt.Sprintf("%0.2d-juju.yaml", i)) _, err = os.Stat(filePath) if os.IsNotExist(err) { break } } if i == 0 { return "", errors.Errorf("Can't generate a filename for netplan YAML") } } else { filePath = inPath } tmpFilePath := fmt.Sprintf("%s.tmp.%d", filePath, time.Now().UnixNano()) out, err := Marshal(np) if err != nil { return "", err } err = ioutil.WriteFile(tmpFilePath, out, 0644) if err != nil { return "", err } err = os.Rename(tmpFilePath, filePath) if err != nil { return "", err } np.writtenFile = filePath return filePath, nil }
go
{ "resource": "" }
q4443
Rollback
train
func (np *Netplan) Rollback() (err error) { if np.writtenFile != "" { os.Remove(np.writtenFile) } for oldFile, newFile := range np.backedFiles { err = os.Rename(newFile, oldFile) if err != nil { logger.Errorf("Cannot rename %s to %s - %q", newFile, oldFile, err.Error()) } } np.backedFiles = nil np.writtenFile = "" return nil }
go
{ "resource": "" }
q4444
FindDeviceByNameOrMAC
train
func (np *Netplan) FindDeviceByNameOrMAC(name, mac string) (string, DeviceType, error) { if name != "" { bond, err := np.FindBondByName(name) if err == nil { return bond, TypeBond, nil } if !errors.IsNotFound(err) { return "", "", errors.Trace(err) } vlan, err := np.FindVLANByName(name) if err == nil { return vlan, TypeVLAN, nil } ethernet, err := np.FindEthernetByName(name) if err == nil { return ethernet, TypeEthernet, nil } } // by MAC is less reliable because things like vlans often have the same MAC address if mac != "" { bond, err := np.FindBondByMAC(mac) if err == nil { return bond, TypeBond, nil } if !errors.IsNotFound(err) { return "", "", errors.Trace(err) } vlan, err := np.FindVLANByMAC(mac) if err == nil { return vlan, TypeVLAN, nil } ethernet, err := np.FindEthernetByMAC(mac) if err == nil { return ethernet, TypeEthernet, nil } } return "", "", errors.NotFoundf("device - name %q MAC %q", name, mac) }
go
{ "resource": "" }
q4445
NewModelWatcher
train
func NewModelWatcher(st state.ModelAccessor, resources facade.Resources, authorizer facade.Authorizer) *ModelWatcher { return &ModelWatcher{ st: st, resources: resources, authorizer: authorizer, } }
go
{ "resource": "" }
q4446
WatchForModelConfigChanges
train
func (m *ModelWatcher) WatchForModelConfigChanges() (params.NotifyWatchResult, error) { result := params.NotifyWatchResult{} watch := m.st.WatchForModelConfigChanges() // Consume the initial event. Technically, API // calls to Watch 'transmit' the initial event // in the Watch response. But NotifyWatchers // have no state to transmit. if _, ok := <-watch.Changes(); ok { result.NotifyWatcherId = m.resources.Register(watch) } else { return result, watcher.EnsureErr(watch) } return result, nil }
go
{ "resource": "" }
q4447
ModelConfig
train
func (m *ModelWatcher) ModelConfig() (params.ModelConfigResult, error) { result := params.ModelConfigResult{} config, err := m.st.ModelConfig() if err != nil { return result, err } result.Config = config.AllAttrs() return result, nil }
go
{ "resource": "" }
q4448
recoverViaBootstrap
train
func (w *backstopWorker) recoverViaBootstrap(server *raft.Server) error { newServer := *server newServer.Suffrage = raft.Voter configuration := raft.Configuration{ Servers: []raft.Server{newServer}, } w.config.Logger.Infof("rebootstrapping raft configuration: %#v", configuration) err := w.config.Raft.BootstrapCluster(configuration).Error() if err != nil { return errors.Annotate(err, "re-bootstrapping cluster") } return nil }
go
{ "resource": "" }
q4449
NewStateApplication
train
func NewStateApplication(st *state.State, app *state.Application) Application { return stateApplicationShim{app, st} }
go
{ "resource": "" }
q4450
stateStepsFor21
train
func stateStepsFor21() []Step { return []Step{ &upgradeStep{ description: "add attempt to migration docs", targets: []Target{DatabaseMaster}, run: func(context Context) error { return context.State().AddMigrationAttempt() }, }, &upgradeStep{ description: "add sequences to track used local charm revisions", targets: []Target{DatabaseMaster}, run: func(context Context) error { return context.State().AddLocalCharmSequences() }, }, &upgradeStep{ description: "update lxd cloud/credentials", targets: []Target{DatabaseMaster}, run: func(context Context) error { return updateLXDCloudCredentials(context.State()) }, }, } }
go
{ "resource": "" }
q4451
NewContext
train
func NewContext(api APIClient, dataDir string) *Context { return &Context{ api: api, dataDir: dataDir, payloads: make(map[string]payload.Payload), updates: make(map[string]payload.Payload), } }
go
{ "resource": "" }
q4452
NewContextAPI
train
func NewContextAPI(api APIClient, dataDir string) (*Context, error) { results, err := api.List() if err != nil { return nil, errors.Trace(err) } ctx := NewContext(api, dataDir) for _, result := range results { pl := result.Payload // TODO(ericsnow) Use id instead of pl.FullID(). ctx.payloads[pl.FullID()] = pl.Payload } return ctx, nil }
go
{ "resource": "" }
q4453
ContextComponent
train
func ContextComponent(ctx HookContext) (Component, error) { compCtx, err := ctx.Component(payload.ComponentName) if errors.IsNotFound(err) { return nil, errors.Errorf("component %q not registered", payload.ComponentName) } if err != nil { return nil, errors.Trace(err) } if compCtx == nil { return nil, errors.Errorf("component %q disabled", payload.ComponentName) } return compCtx, nil }
go
{ "resource": "" }
q4454
Get
train
func (c *Context) Get(class, id string) (*payload.Payload, error) { fullID := payload.BuildID(class, id) logger.Tracef("getting %q from hook context", fullID) actual, ok := c.updates[fullID] if !ok { actual, ok = c.payloads[fullID] if !ok { return nil, errors.NotFoundf("%s", fullID) } } return &actual, nil }
go
{ "resource": "" }
q4455
List
train
func (c *Context) List() ([]string, error) { logger.Tracef("listing all payloads in hook context") payloads, err := c.Payloads() if err != nil { return nil, errors.Trace(err) } if len(payloads) == 0 { return nil, nil } var ids []string for _, wl := range payloads { ids = append(ids, wl.FullID()) } sort.Strings(ids) return ids, nil }
go
{ "resource": "" }
q4456
Track
train
func (c *Context) Track(pl payload.Payload) error { logger.Tracef("adding %q to hook context: %#v", pl.FullID(), pl) if err := pl.Validate(); err != nil { return errors.Trace(err) } // TODO(ericsnow) We are likely missing mechanism for local persistence. id := pl.FullID() c.updates[id] = pl return nil }
go
{ "resource": "" }
q4457
Untrack
train
func (c *Context) Untrack(class, id string) error { fullID := payload.BuildID(class, id) logger.Tracef("Calling untrack on payload context %q", fullID) res, err := c.api.Untrack(fullID) if err != nil { return errors.Trace(err) } // TODO(ericsnow) We should not ignore a 0-len result. if len(res) > 0 && res[0].Error != nil { return errors.Trace(res[0].Error) } delete(c.payloads, id) return nil }
go
{ "resource": "" }
q4458
SetStatus
train
func (c *Context) SetStatus(class, id, status string) error { fullID := payload.BuildID(class, id) logger.Tracef("Calling status-set on payload context %q", fullID) res, err := c.api.SetStatus(status, fullID) if err != nil { return errors.Trace(err) } // TODO(ericsnow) We should not ignore a 0-len result. if len(res) > 0 && res[0].Error != nil { // In a hook context, the case where the specified payload does // not exist is a special one. A hook tool is how a charm author // communicates the state of the charm. So returning an error // here in the "missing" case makes less sense than in other // places. We could simply ignore any error that surfaces for // that case. However, returning the error communicates to the // charm author that what they're trying to communicate doesn't // make sense. return errors.Trace(res[0].Error) } return nil }
go
{ "resource": "" }
q4459
NewAPI
train
func NewAPI(caller base.APICaller, newWatcher NewWatcherFunc) (*API, error) { modelTag, ok := caller.ModelTag() if !ok { return nil, errors.New("machine undertaker client requires a model API connection") } api := API{ facade: base.NewFacadeCaller(caller, "MachineUndertaker"), modelTag: modelTag, newWatcher: newWatcher, } return &api, nil }
go
{ "resource": "" }
q4460
AllMachineRemovals
train
func (api *API) AllMachineRemovals() ([]names.MachineTag, error) { var results params.EntitiesResults args := wrapEntities(api.modelTag) err := api.facade.FacadeCall("AllMachineRemovals", &args, &results) if err != nil { return nil, errors.Trace(err) } if len(results.Results) != 1 { return nil, errors.Errorf("expected one result, got %d", len(results.Results)) } result := results.Results[0] if result.Error != nil { return nil, errors.Trace(result.Error) } machines := make([]names.MachineTag, len(result.Entities)) for i, entity := range result.Entities { tag, err := names.ParseMachineTag(entity.Tag) if err != nil { return nil, errors.Trace(err) } machines[i] = tag } return machines, nil }
go
{ "resource": "" }
q4461
GetProviderInterfaceInfo
train
func (api *API) GetProviderInterfaceInfo(machine names.MachineTag) ([]network.ProviderInterfaceInfo, error) { var result params.ProviderInterfaceInfoResults args := wrapEntities(machine) err := api.facade.FacadeCall("GetMachineProviderInterfaceInfo", &args, &result) if err != nil { return nil, errors.Trace(err) } if len(result.Results) != 1 { return nil, errors.Errorf("expected one result, got %d", len(result.Results)) } item := result.Results[0] if item.MachineTag != machine.String() { return nil, errors.Errorf("expected interface info for %s but got %s", machine, item.MachineTag) } infos := make([]network.ProviderInterfaceInfo, len(item.Interfaces)) for i, info := range item.Interfaces { infos[i].InterfaceName = info.InterfaceName infos[i].MACAddress = info.MACAddress infos[i].ProviderId = network.Id(info.ProviderId) } return infos, nil }
go
{ "resource": "" }
q4462
CompleteRemoval
train
func (api *API) CompleteRemoval(machine names.MachineTag) error { args := wrapEntities(machine) return api.facade.FacadeCall("CompleteMachineRemovals", &args, nil) }
go
{ "resource": "" }
q4463
WatchMachineRemovals
train
func (api *API) WatchMachineRemovals() (watcher.NotifyWatcher, error) { var results params.NotifyWatchResults args := wrapEntities(api.modelTag) err := api.facade.FacadeCall("WatchMachineRemovals", &args, &results) if err != nil { return nil, errors.Trace(err) } if len(results.Results) != 1 { return nil, errors.Errorf("expected one result, got %d", len(results.Results)) } result := results.Results[0] if err := result.Error; err != nil { return nil, errors.Trace(result.Error) } w := api.newWatcher(api.facade.RawAPICaller(), result) return w, nil }
go
{ "resource": "" }
q4464
AddBranch
train
func (c *Client) AddBranch(branchName string) error { var result params.ErrorResult err := c.facade.FacadeCall("AddBranch", argForBranch(branchName), &result) if err != nil { return errors.Trace(err) } if result.Error != nil { return errors.Trace(result.Error) } return nil }
go
{ "resource": "" }
q4465
CommitBranch
train
func (c *Client) CommitBranch(branchName string) (int, error) { var result params.IntResult err := c.facade.FacadeCall("CommitBranch", argForBranch(branchName), &result) if err != nil { return 0, errors.Trace(err) } if result.Error != nil { return 0, errors.Trace(result.Error) } return result.Result, nil }
go
{ "resource": "" }
q4466
HasActiveBranch
train
func (c *Client) HasActiveBranch(branchName string) (bool, error) { var result params.BoolResult err := c.facade.FacadeCall("HasActiveBranch", argForBranch(branchName), &result) if err != nil { return false, errors.Trace(err) } if result.Error != nil { return false, errors.Trace(result.Error) } return result.Result, nil }
go
{ "resource": "" }
q4467
BranchInfo
train
func (c *Client) BranchInfo( branchName string, detailed bool, formatTime func(time.Time) string, ) (model.GenerationSummaries, error) { arg := params.BranchInfoArgs{Detailed: detailed} if branchName != "" { arg.BranchNames = []string{branchName} } var result params.GenerationResults err := c.facade.FacadeCall("BranchInfo", arg, &result) if err != nil { return nil, errors.Trace(err) } if result.Error != nil { return nil, errors.Trace(result.Error) } return generationInfoFromResult(result, detailed, formatTime), nil }
go
{ "resource": "" }
q4468
Name
train
func (s *enumService) Name() string { if s.name != nil { return syscall.UTF16ToString((*[1 << 16]uint16)(unsafe.Pointer(s.name))[:]) } return "" }
go
{ "resource": "" }
q4469
CreateService
train
func (m *manager) CreateService(name, exepath string, c mgr.Config, args ...string) (windowsService, error) { // The Create function relies on the fact that this calls Connect(which connects to localhost) and not // ConnectRemote. If we get to the point where we need to call ConnectRemote we need to stop using // series.HostSeries inside Create. s, err := mgr.Connect() if err != nil { return nil, err } defer s.Disconnect() return s.CreateService(name, exepath, c, args...) }
go
{ "resource": "" }
q4470
OpenService
train
func (m *manager) OpenService(name string) (windowsService, error) { s, err := mgr.Connect() if err != nil { return nil, err } defer s.Disconnect() return s.OpenService(name) }
go
{ "resource": "" }
q4471
CloseHandle
train
func (m *manager) CloseHandle(handle windows.Handle) error { return windows.CloseServiceHandle(handle) }
go
{ "resource": "" }
q4472
Start
train
func (s *SvcManager) Start(name string) error { running, err := s.Running(name) if err != nil { return errors.Trace(err) } if running { return nil } service, err := s.getService(name) if err != nil { return errors.Trace(err) } defer service.Close() err = service.Start() if err != nil { return err } return nil }
go
{ "resource": "" }
q4473
Stop
train
func (s *SvcManager) Stop(name string) error { running, err := s.Running(name) if err != nil { return errors.Trace(err) } if !running { return nil } service, err := s.getService(name) if err != nil { return errors.Trace(err) } defer service.Close() _, err = service.Control(svc.Stop) if err != nil { return errors.Trace(err) } return nil }
go
{ "resource": "" }
q4474
Delete
train
func (s *SvcManager) Delete(name string) error { exists, err := s.exists(name) if err != nil { return err } if !exists { return nil } service, err := s.getService(name) if err != nil { return errors.Trace(err) } defer service.Close() err = service.Delete() if err == c_ERROR_SERVICE_DOES_NOT_EXIST { return nil } else if err != nil { return errors.Trace(err) } return nil }
go
{ "resource": "" }
q4475
Running
train
func (s *SvcManager) Running(name string) (bool, error) { status, err := s.status(name) if err != nil { return false, errors.Trace(err) } logger.Infof("Service %q Status %v", name, status) if status == svc.Running { return true, nil } return false, nil }
go
{ "resource": "" }
q4476
Config
train
func (s *SvcManager) Config(name string) (mgr.Config, error) { exists, err := s.exists(name) if err != nil { return mgr.Config{}, err } if !exists { return mgr.Config{}, c_ERROR_SERVICE_DOES_NOT_EXIST } service, err := s.getService(name) if err != nil { return mgr.Config{}, errors.Trace(err) } defer service.Close() return service.Config() }
go
{ "resource": "" }
q4477
ChangeServicePassword
train
func (s *SvcManager) ChangeServicePassword(svcName, newPassword string) error { currentConfig, err := s.Config(svcName) if err != nil { // If access is denied when accessing the service it means // we can't own it, so there's no reason to return an error // since we only want to change the password on services started // by us. if errors.Cause(err) == syscall.ERROR_ACCESS_DENIED { return nil } return errors.Trace(err) } if currentConfig.ServiceStartName == jujudUser { currentConfig.Password = newPassword service, err := s.getService(svcName) if err != nil { return errors.Trace(err) } defer service.Close() err = service.UpdateConfig(currentConfig) if err != nil { return errors.Trace(err) } } if err != nil { return errors.Trace(err) } return nil }
go
{ "resource": "" }
q4478
Upgrade
train
func (c *Client) Upgrade(agentTag string, v version.Number) error { var result params.ErrorResult arg := params.KubernetesUpgradeArg{ AgentTag: agentTag, Version: v, } if err := c.facade.FacadeCall("UpgradeOperator", arg, &result); err != nil { return errors.Trace(err) } if result.Error != nil { return errors.Trace(result.Error) } return nil }
go
{ "resource": "" }
q4479
cacheZones
train
func (cache *addSubnetsCache) cacheZones(ctx context.ProviderCallContext) error { if cache.allZones != nil { // Already cached. logger.Tracef("using cached zones: %v", cache.allZones.SortedValues()) return nil } allZones, err := AllZones(ctx, cache.api) if err != nil { return errors.Annotate(err, "given Zones cannot be validated") } cache.allZones = set.NewStrings() cache.availableZones = set.NewStrings() for _, zone := range allZones.Results { // AllZones() does not use the Error result field, so no // need to check it here. if cache.allZones.Contains(zone.Name) { logger.Warningf("ignoring duplicated zone %q", zone.Name) continue } if zone.Available { cache.availableZones.Add(zone.Name) } cache.allZones.Add(zone.Name) } logger.Tracef( "%d known and %d available zones cached: %v", cache.allZones.Size(), cache.availableZones.Size(), cache.allZones.SortedValues(), ) if cache.allZones.IsEmpty() { cache.allZones = nil // Cached an empty list. return errors.Errorf("no zones defined") } return nil }
go
{ "resource": "" }
q4480
AddSubnets
train
func AddSubnets(ctx context.ProviderCallContext, api NetworkBacking, args params.AddSubnetsParams) (params.ErrorResults, error) { results := params.ErrorResults{ Results: make([]params.ErrorResult, len(args.Subnets)), } if len(args.Subnets) == 0 { return results, nil } cache := NewAddSubnetsCache(api) for i, arg := range args.Subnets { err := addOneSubnet(ctx, api, arg, cache) if err != nil { results.Results[i].Error = common.ServerError(err) } } return results, nil }
go
{ "resource": "" }
q4481
NewStorage
train
func NewStorage(modelUUID string, session *mgo.Session) Storage { return stateStorage{modelUUID, session} }
go
{ "resource": "" }
q4482
resourceName
train
func resourceName(tag names.Tag, envName string) string { return fmt.Sprintf("juju-%s-%s", envName, tag) }
go
{ "resource": "" }
q4483
_runInstances
train
func _runInstances(e *ec2.EC2, ctx context.ProviderCallContext, ri *ec2.RunInstances, c environs.StatusCallbackFunc) (resp *ec2.RunInstancesResp, err error) { try := 1 for a := shortAttempt.Start(); a.Next(); { c(status.Allocating, fmt.Sprintf("Start instance attempt %d", try), nil) resp, err = e.RunInstances(ri) if err == nil || !isNotFoundError(err) { break } try++ } return resp, maybeConvertCredentialError(err, ctx) }
go
{ "resource": "" }
q4484
groupInfoByName
train
func (e *environ) groupInfoByName(ctx context.ProviderCallContext, groupName string) (ec2.SecurityGroupInfo, error) { resp, err := e.securityGroupsByNameOrID(groupName) if err != nil { return ec2.SecurityGroupInfo{}, maybeConvertCredentialError(err, ctx) } if len(resp.Groups) != 1 { return ec2.SecurityGroupInfo{}, errors.NewNotFound(fmt.Errorf( "expected one security group named %q, got %v", groupName, resp.Groups, ), "") } return resp.Groups[0], nil }
go
{ "resource": "" }
q4485
groupByName
train
func (e *environ) groupByName(ctx context.ProviderCallContext, groupName string) (ec2.SecurityGroup, error) { groupInfo, err := e.groupInfoByName(ctx, groupName) return groupInfo.SecurityGroup, err }
go
{ "resource": "" }
q4486
gatherInstances
train
func (e *environ) gatherInstances( ctx context.ProviderCallContext, ids []instance.Id, insts []instances.Instance, filter *ec2.Filter, ) error { resp, err := e.ec2.Instances(nil, filter) if err != nil { return maybeConvertCredentialError(err, ctx) } n := 0 // For each requested id, add it to the returned instances // if we find it in the response. for i, id := range ids { if insts[i] != nil { n++ continue } for j := range resp.Reservations { r := &resp.Reservations[j] for k := range r.Instances { if r.Instances[k].InstanceId != string(id) { continue } inst := r.Instances[k] // TODO(wallyworld): lookup the details to fill in the instance type data insts[i] = &ec2Instance{e: e, Instance: &inst} n++ } } } if n < len(ids) { return environs.ErrPartialInstances } return nil }
go
{ "resource": "" }
q4487
NetworkInterfaces
train
func (e *environ) NetworkInterfaces(ctx context.ProviderCallContext, instId instance.Id) ([]network.InterfaceInfo, error) { var err error var networkInterfacesResp *ec2.NetworkInterfacesResp for a := shortAttempt.Start(); a.Next(); { logger.Tracef("retrieving NICs for instance %q", instId) filter := ec2.NewFilter() filter.Add("attachment.instance-id", string(instId)) networkInterfacesResp, err = e.ec2.NetworkInterfaces(nil, filter) logger.Tracef("instance %q NICs: %#v (err: %v)", instId, networkInterfacesResp, err) if err != nil { err = maybeConvertCredentialError(err, ctx) if common.IsCredentialNotValid(err) { // no need to re-try: there is a problem with credentials break } logger.Errorf("failed to get instance %q interfaces: %v (retrying)", instId, err) continue } if len(networkInterfacesResp.Interfaces) == 0 { logger.Tracef("instance %q has no NIC attachment yet, retrying...", instId) continue } logger.Tracef("found instance %q NICS: %#v", instId, networkInterfacesResp.Interfaces) break } if err != nil { // either the instance doesn't exist or we couldn't get through to // the ec2 api return nil, errors.Annotatef(err, "cannot get instance %q network interfaces", instId) } ec2Interfaces := networkInterfacesResp.Interfaces result := make([]network.InterfaceInfo, len(ec2Interfaces)) for i, iface := range ec2Interfaces { resp, err := e.ec2.Subnets([]string{iface.SubnetId}, nil) if err != nil { return nil, errors.Annotatef(maybeConvertCredentialError(err, ctx), "failed to retrieve subnet %q info", iface.SubnetId) } if len(resp.Subnets) != 1 { return nil, errors.Errorf("expected 1 subnet, got %d", len(resp.Subnets)) } subnet := resp.Subnets[0] cidr := subnet.CIDRBlock result[i] = network.InterfaceInfo{ DeviceIndex: iface.Attachment.DeviceIndex, MACAddress: iface.MACAddress, CIDR: cidr, ProviderId: network.Id(iface.Id), ProviderSubnetId: network.Id(iface.SubnetId), AvailabilityZones: []string{subnet.AvailZone}, VLANTag: 0, // Not supported on EC2. // Getting the interface name is not supported on EC2, so fake it. InterfaceName: fmt.Sprintf("unsupported%d", iface.Attachment.DeviceIndex), Disabled: false, NoAutoStart: false, ConfigType: network.ConfigDHCP, InterfaceType: network.EthernetInterface, Address: network.NewScopedAddress(iface.PrivateIPAddress, network.ScopeCloudLocal), } } return result, nil }
go
{ "resource": "" }
q4488
Spaces
train
func (e *environ) Spaces(ctx context.ProviderCallContext) ([]network.SpaceInfo, error) { return nil, errors.NotSupportedf("Spaces") }
go
{ "resource": "" }
q4489
Subnets
train
func (e *environ) Subnets(ctx context.ProviderCallContext, instId instance.Id, subnetIds []network.Id) ([]network.SubnetInfo, error) { var results []network.SubnetInfo subIdSet := make(map[string]bool) for _, subId := range subnetIds { subIdSet[string(subId)] = false } if instId != instance.UnknownId { interfaces, err := e.NetworkInterfaces(ctx, instId) if err != nil { return results, errors.Trace(err) } if len(subnetIds) == 0 { for _, iface := range interfaces { subIdSet[string(iface.ProviderSubnetId)] = false } } for _, iface := range interfaces { _, ok := subIdSet[string(iface.ProviderSubnetId)] if !ok { logger.Tracef("subnet %q not in %v, skipping", iface.ProviderSubnetId, subnetIds) continue } subIdSet[string(iface.ProviderSubnetId)] = true info, err := makeSubnetInfo(iface.CIDR, iface.ProviderSubnetId, iface.ProviderNetworkId, iface.AvailabilityZones) if err != nil { // Error will already have been logged. continue } results = append(results, info) } } else { resp, _, err := e.subnetsForVPC(ctx) if err != nil { return nil, errors.Annotatef(err, "failed to retrieve subnets") } if len(subnetIds) == 0 { for _, subnet := range resp.Subnets { subIdSet[subnet.Id] = false } } for _, subnet := range resp.Subnets { _, ok := subIdSet[subnet.Id] if !ok { logger.Tracef("subnet %q not in %v, skipping", subnet.Id, subnetIds) continue } subIdSet[subnet.Id] = true cidr := subnet.CIDRBlock info, err := makeSubnetInfo(cidr, network.Id(subnet.Id), network.Id(subnet.VPCId), []string{subnet.AvailZone}) if err != nil { // Error will already have been logged. continue } results = append(results, info) } } notFound := []string{} for subId, found := range subIdSet { if !found { notFound = append(notFound, subId) } } if len(notFound) != 0 { return nil, errors.Errorf("failed to find the following subnet ids: %v", notFound) } return results, nil }
go
{ "resource": "" }
q4490
AllInstances
train
func (e *environ) AllInstances(ctx context.ProviderCallContext) ([]instances.Instance, error) { return e.AllInstancesByState(ctx, "pending", "running") }
go
{ "resource": "" }
q4491
AllInstancesByState
train
func (e *environ) AllInstancesByState(ctx context.ProviderCallContext, states ...string) ([]instances.Instance, error) { // NOTE(axw) we use security group filtering here because instances // start out untagged. If Juju were to abort after starting an instance, // but before tagging it, it would be leaked. We only need to do this // for AllInstances, as it is the result of AllInstances that is used // in "harvesting" unknown instances by the provisioner. // // One possible alternative is to modify ec2.RunInstances to allow the // caller to specify ClientToken, and then format it like // <controller-uuid>:<model-uuid>:<machine-id> // (with base64-encoding to keep the size under the 64-byte limit) // // It is possible to filter on "client-token", and specify wildcards; // therefore we could use client-token filters everywhere in the ec2 // provider instead of tags or security groups. The only danger is if // we need to make non-idempotent calls to RunInstances for the machine // ID. I don't think this is needed, but I am not confident enough to // change this fundamental right now. // // An EC2 API call is required to resolve the group name to an id, as // VPC enabled accounts do not support name based filtering. groupName := e.jujuGroupName() group, err := e.groupByName(ctx, groupName) if isNotFoundError(err) { // If there's no group, then there cannot be any instances. return nil, nil } else if err != nil { return nil, errors.Trace(maybeConvertCredentialError(err, ctx)) } filter := ec2.NewFilter() filter.Add("instance-state-name", states...) filter.Add("instance.group-id", group.Id) return e.allInstances(ctx, filter) }
go
{ "resource": "" }
q4492
allControllerManagedInstances
train
func (e *environ) allControllerManagedInstances(ctx context.ProviderCallContext, controllerUUID string) ([]instance.Id, error) { filter := ec2.NewFilter() filter.Add("instance-state-name", aliveInstanceStates...) e.addControllerFilter(filter, controllerUUID) return e.allInstanceIDs(ctx, filter) }
go
{ "resource": "" }
q4493
destroyControllerManagedEnvirons
train
func (e *environ) destroyControllerManagedEnvirons(ctx context.ProviderCallContext, controllerUUID string) error { // Terminate all instances managed by the controller. instIds, err := e.allControllerManagedInstances(ctx, controllerUUID) if err != nil { return errors.Annotate(err, "listing instances") } if err := e.terminateInstances(ctx, instIds); err != nil { return errors.Annotate(err, "terminating instances") } // Delete all volumes managed by the controller. (No need to delete root disks manually.) volIds, err := e.allControllerManagedVolumes(ctx, controllerUUID, false) if err != nil { return errors.Annotate(err, "listing volumes") } errs := foreachVolume(e.ec2, ctx, volIds, destroyVolume) for i, err := range errs { if err == nil { continue } // (anastasiamac 2018-03-21) This is strange - we do try // to destroy all volumes but afterwards, if we have encountered any errors, // we will return first one...The same logic happens on detach..?... return errors.Annotatef(err, "destroying volume %q", volIds[i]) } // Delete security groups managed by the controller. groups, err := e.controllerSecurityGroups(ctx, controllerUUID) if err != nil { return errors.Trace(err) } for _, g := range groups { if err := deleteSecurityGroupInsistently(e.ec2, ctx, g, clock.WallClock); err != nil { return errors.Annotatef( err, "cannot delete security group %q (%q)", g.Name, g.Id, ) } } return nil }
go
{ "resource": "" }
q4494
controllerSecurityGroups
train
func (e *environ) controllerSecurityGroups(ctx context.ProviderCallContext, controllerUUID string) ([]ec2.SecurityGroup, error) { filter := ec2.NewFilter() e.addControllerFilter(filter, controllerUUID) resp, err := e.ec2.SecurityGroups(nil, filter) if err != nil { return nil, errors.Annotate(maybeConvertCredentialError(err, ctx), "listing security groups") } groups := make([]ec2.SecurityGroup, len(resp.Groups)) for i, info := range resp.Groups { groups[i] = ec2.SecurityGroup{Id: info.Id, Name: info.Name} } return groups, nil }
go
{ "resource": "" }
q4495
cleanEnvironmentSecurityGroups
train
func (e *environ) cleanEnvironmentSecurityGroups(ctx context.ProviderCallContext) error { jujuGroup := e.jujuGroupName() g, err := e.groupByName(ctx, jujuGroup) if isNotFoundError(err) { return nil } if err != nil { return errors.Annotatef(err, "cannot retrieve default security group: %q", jujuGroup) } if err := deleteSecurityGroupInsistently(e.ec2, ctx, g, clock.WallClock); err != nil { return errors.Annotate(err, "cannot delete default security group") } return nil }
go
{ "resource": "" }
q4496
setUpGroups
train
func (e *environ) setUpGroups(ctx context.ProviderCallContext, controllerUUID, machineId string, apiPorts []int) ([]ec2.SecurityGroup, error) { perms := []ec2.IPPerm{{ Protocol: "tcp", FromPort: 22, ToPort: 22, SourceIPs: []string{"0.0.0.0/0"}, }} for _, apiPort := range apiPorts { perms = append(perms, ec2.IPPerm{ Protocol: "tcp", FromPort: apiPort, ToPort: apiPort, SourceIPs: []string{"0.0.0.0/0"}, }) } perms = append(perms, ec2.IPPerm{ Protocol: "tcp", FromPort: 0, ToPort: 65535, }, ec2.IPPerm{ Protocol: "udp", FromPort: 0, ToPort: 65535, }, ec2.IPPerm{ Protocol: "icmp", FromPort: -1, ToPort: -1, }) // Ensure there's a global group for Juju-related traffic. jujuGroup, err := e.ensureGroup(ctx, controllerUUID, e.jujuGroupName(), perms) if err != nil { return nil, err } var machineGroup ec2.SecurityGroup switch e.Config().FirewallMode() { case config.FwInstance: machineGroup, err = e.ensureGroup(ctx, controllerUUID, e.machineGroupName(machineId), nil) case config.FwGlobal: machineGroup, err = e.ensureGroup(ctx, controllerUUID, e.globalGroupName(), nil) } if err != nil { return nil, err } return []ec2.SecurityGroup{jujuGroup, machineGroup}, nil }
go
{ "resource": "" }
q4497
newPermSetForGroup
train
func newPermSetForGroup(ps []ec2.IPPerm, group ec2.SecurityGroup) permSet { m := make(permSet) for _, p := range ps { k := permKey{ protocol: p.Protocol, fromPort: p.FromPort, toPort: p.ToPort, } if len(p.SourceIPs) > 0 { for _, ip := range p.SourceIPs { k.ipAddr = ip m[k] = true } } else { k.groupId = group.Id m[k] = true } } return m }
go
{ "resource": "" }
q4498
ipPerms
train
func (m permSet) ipPerms() (ps []ec2.IPPerm) { // We could compact the permissions, but it // hardly seems worth it. for p := range m { ipp := ec2.IPPerm{ Protocol: p.protocol, FromPort: p.fromPort, ToPort: p.toPort, } if p.ipAddr != "" { ipp.SourceIPs = []string{p.ipAddr} } else { ipp.SourceGroups = []ec2.UserSecurityGroup{{Id: p.groupId}} } ps = append(ps, ipp) } return }
go
{ "resource": "" }
q4499
isZoneConstrainedError
train
func isZoneConstrainedError(err error) bool { switch err := errors.Cause(err).(type) { case *ec2.Error: switch err.Code { case "Unsupported", "InsufficientInstanceCapacity": // A big hammer, but we've now seen several different error messages // for constrained zones, and who knows how many more there might // be. If the message contains "Availability Zone", it's a fair // bet that it's constrained or otherwise unusable. return strings.Contains(err.Message, "Availability Zone") case "InvalidInput": // If the region has a default VPC, then we will receive an error // if the AZ does not have a default subnet. Until we have proper // support for networks, we'll skip over these. return strings.HasPrefix(err.Message, "No default subnet for availability zone") case "VolumeTypeNotAvailableInZone": return true } } return false }
go
{ "resource": "" }