_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q3900
|
Destroy
|
train
|
func (k *kubernetesClient) Destroy(callbacks context.ProviderCallContext) error {
watcher, err := k.WatchNamespace()
if err != nil {
return errors.Trace(err)
}
defer watcher.Kill()
if err := k.deleteNamespace(); err != nil {
return errors.Annotate(err, "deleting model namespace")
}
// Delete any storage classes created as part of this model.
// Storage classes live outside the namespace so need to be deleted separately.
modelSelector := fmt.Sprintf("%s==%s", labelModel, k.namespace)
err = k.client().StorageV1().StorageClasses().DeleteCollection(&v1.DeleteOptions{
PropagationPolicy: &defaultPropagationPolicy,
}, v1.ListOptions{
LabelSelector: modelSelector,
})
if err != nil && !k8serrors.IsNotFound(err) {
return errors.Annotate(err, "deleting model storage classes")
}
for {
select {
case <-callbacks.Dying():
return nil
case <-watcher.Changes():
// ensure namespace has been deleted - notfound error expected.
_, err := k.GetNamespace(k.namespace)
if errors.IsNotFound(err) {
// namespace ha been deleted.
return nil
}
if err != nil {
return errors.Trace(err)
}
logger.Debugf("namespace %q is still been terminating", k.namespace)
}
}
}
|
go
|
{
"resource": ""
}
|
q3901
|
APIVersion
|
train
|
func (k *kubernetesClient) APIVersion() (string, error) {
body, err := k.client().CoreV1().RESTClient().Get().AbsPath("/version").Do().Raw()
if err != nil {
return "", err
}
var info apimachineryversion.Info
err = json.Unmarshal(body, &info)
if err != nil {
return "", errors.Annotatef(err, "got '%s' querying API version", string(body))
}
version := info.GitVersion
// git version is "vX.Y.Z", strip the "v"
version = strings.Trim(version, "v")
return version, nil
}
|
go
|
{
"resource": ""
}
|
q3902
|
ensureOCIImageSecret
|
train
|
func (k *kubernetesClient) ensureOCIImageSecret(
imageSecretName,
appName string,
imageDetails *caas.ImageDetails,
annotations k8sannotations.Annotation,
) error {
if imageDetails.Password == "" {
return errors.New("attempting to create a secret with no password")
}
secretData, err := createDockerConfigJSON(imageDetails)
if err != nil {
return errors.Trace(err)
}
newSecret := &core.Secret{
ObjectMeta: v1.ObjectMeta{
Name: imageSecretName,
Namespace: k.namespace,
Labels: map[string]string{labelApplication: appName},
Annotations: annotations.ToMap()},
Type: core.SecretTypeDockerConfigJson,
Data: map[string][]byte{
core.DockerConfigJsonKey: secretData,
},
}
return errors.Trace(k.ensureSecret(newSecret))
}
|
go
|
{
"resource": ""
}
|
q3903
|
updateSecret
|
train
|
func (k *kubernetesClient) updateSecret(sec *core.Secret) error {
_, err := k.client().CoreV1().Secrets(k.namespace).Update(sec)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3904
|
getSecret
|
train
|
func (k *kubernetesClient) getSecret(secretName string) (*core.Secret, error) {
secret, err := k.client().CoreV1().Secrets(k.namespace).Get(secretName, v1.GetOptions{IncludeUninitialized: true})
if err != nil {
if k8serrors.IsNotFound(err) {
return nil, errors.NotFoundf("secret %q", secretName)
}
return nil, errors.Trace(err)
}
return secret, nil
}
|
go
|
{
"resource": ""
}
|
q3905
|
createSecret
|
train
|
func (k *kubernetesClient) createSecret(secret *core.Secret) error {
_, err := k.client().CoreV1().Secrets(k.namespace).Create(secret)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3906
|
deleteSecret
|
train
|
func (k *kubernetesClient) deleteSecret(secretName string) error {
secrets := k.client().CoreV1().Secrets(k.namespace)
err := secrets.Delete(secretName, &v1.DeleteOptions{
PropagationPolicy: &defaultPropagationPolicy,
})
if k8serrors.IsNotFound(err) {
return nil
}
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3907
|
OperatorExists
|
train
|
func (k *kubernetesClient) OperatorExists(appName string) (caas.OperatorState, error) {
var result caas.OperatorState
statefulsets := k.client().AppsV1().StatefulSets(k.namespace)
operator, err := statefulsets.Get(k.operatorName(appName), v1.GetOptions{IncludeUninitialized: true})
if k8serrors.IsNotFound(err) {
return result, nil
}
if err != nil {
return result, errors.Trace(err)
}
result.Exists = true
result.Terminating = operator.DeletionTimestamp != nil
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3908
|
ValidateStorageClass
|
train
|
func (k *kubernetesClient) ValidateStorageClass(config map[string]interface{}) error {
cfg, err := newStorageConfig(config)
if err != nil {
return errors.Trace(err)
}
sc, err := k.getStorageClass(cfg.storageClass)
if err != nil {
return errors.NewNotValid(err, fmt.Sprintf("storage class %q", cfg.storageClass))
}
if cfg.storageProvisioner == "" {
return nil
}
if sc.Provisioner != cfg.storageProvisioner {
return errors.NewNotValid(
nil,
fmt.Sprintf("storage class %q has provisoner %q, not %q", cfg.storageClass, sc.Provisioner, cfg.storageProvisioner))
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3909
|
maybeGetVolumeClaimSpec
|
train
|
func (k *kubernetesClient) maybeGetVolumeClaimSpec(params volumeParams) (*core.PersistentVolumeClaimSpec, error) {
storageClassName := params.storageConfig.storageClass
haveStorageClass := false
if storageClassName == "" {
return nil, errors.New("cannot create a volume claim spec without a storage class")
}
// See if the requested storage class exists already.
sc, err := k.getStorageClass(storageClassName)
if err != nil && !k8serrors.IsNotFound(err) {
return nil, errors.Annotatef(err, "looking for storage class %q", storageClassName)
}
if err == nil {
haveStorageClass = true
storageClassName = sc.Name
}
if !haveStorageClass {
params.storageConfig.storageClass = storageClassName
sc, err := k.EnsureStorageProvisioner(caas.StorageProvisioner{
Name: params.storageConfig.storageClass,
Namespace: k.namespace,
Provisioner: params.storageConfig.storageProvisioner,
Parameters: params.storageConfig.parameters,
ReclaimPolicy: string(params.storageConfig.reclaimPolicy),
})
if err != nil && !errors.IsNotFound(err) {
return nil, errors.Trace(err)
}
if err == nil {
haveStorageClass = true
storageClassName = sc.Name
}
}
if !haveStorageClass {
return nil, errors.NewNotFound(nil, fmt.Sprintf(
"cannot create persistent volume as storage class %q cannot be found", storageClassName))
}
accessMode := params.accessMode
if accessMode == "" {
accessMode = core.ReadWriteOnce
}
return &core.PersistentVolumeClaimSpec{
StorageClassName: &storageClassName,
Resources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceStorage: params.requestedVolumeSize,
},
},
AccessModes: []core.PersistentVolumeAccessMode{accessMode},
}, nil
}
|
go
|
{
"resource": ""
}
|
q3910
|
getStorageClass
|
train
|
func (k *kubernetesClient) getStorageClass(name string) (*k8sstorage.StorageClass, error) {
storageClasses := k.client().StorageV1().StorageClasses()
qualifiedName := qualifiedStorageClassName(k.namespace, name)
sc, err := storageClasses.Get(qualifiedName, v1.GetOptions{})
if err == nil {
return sc, nil
}
if !k8serrors.IsNotFound(err) {
return nil, errors.Trace(err)
}
return storageClasses.Get(name, v1.GetOptions{})
}
|
go
|
{
"resource": ""
}
|
q3911
|
EnsureStorageProvisioner
|
train
|
func (k *kubernetesClient) EnsureStorageProvisioner(cfg caas.StorageProvisioner) (*caas.StorageProvisioner, error) {
// First see if the named storage class exists.
sc, err := k.getStorageClass(cfg.Name)
if err == nil {
return &caas.StorageProvisioner{
Name: sc.Name,
Provisioner: sc.Provisioner,
Parameters: sc.Parameters,
}, nil
}
if !k8serrors.IsNotFound(err) {
return nil, errors.Annotatef(err, "getting storage class %q", cfg.Name)
}
// If it's not found but there's no provisioner specified, we can't
// create it so just return not found.
if cfg.Provisioner == "" {
return nil, errors.NewNotFound(nil,
fmt.Sprintf("storage class %q doesn't exist, but no storage provisioner has been specified",
cfg.Name))
}
// Create the storage class with the specified provisioner.
var reclaimPolicy *core.PersistentVolumeReclaimPolicy
if cfg.ReclaimPolicy != "" {
policy := core.PersistentVolumeReclaimPolicy(cfg.ReclaimPolicy)
reclaimPolicy = &policy
}
storageClasses := k.client().StorageV1().StorageClasses()
sc = &k8sstorage.StorageClass{
ObjectMeta: v1.ObjectMeta{
Name: qualifiedStorageClassName(cfg.Namespace, cfg.Name),
},
Provisioner: cfg.Provisioner,
ReclaimPolicy: reclaimPolicy,
Parameters: cfg.Parameters,
}
if cfg.Namespace != "" {
sc.Labels = map[string]string{labelModel: k.namespace}
}
_, err = storageClasses.Create(sc)
if err != nil {
return nil, errors.Annotatef(err, "creating storage class %q", cfg.Name)
}
return &caas.StorageProvisioner{
Name: sc.Name,
Provisioner: sc.Provisioner,
Parameters: sc.Parameters,
}, nil
}
|
go
|
{
"resource": ""
}
|
q3912
|
DeleteOperator
|
train
|
func (k *kubernetesClient) DeleteOperator(appName string) (err error) {
logger.Debugf("deleting %s operator", appName)
operatorName := k.operatorName(appName)
legacy := isLegacyName(operatorName)
// First delete the config map(s).
configMaps := k.client().CoreV1().ConfigMaps(k.namespace)
configMapName := operatorConfigMapName(operatorName)
err = configMaps.Delete(configMapName, &v1.DeleteOptions{
PropagationPolicy: &defaultPropagationPolicy,
})
if err != nil && !k8serrors.IsNotFound(err) {
return nil
}
// Delete artefacts created by k8s itself.
configMapName = appName + "-configurations-config"
if legacy {
configMapName = "juju-" + configMapName
}
err = configMaps.Delete(configMapName, &v1.DeleteOptions{
PropagationPolicy: &defaultPropagationPolicy,
})
if err != nil && !k8serrors.IsNotFound(err) {
return nil
}
// Finally the operator itself.
if err := k.deleteStatefulSet(operatorName); err != nil {
return errors.Trace(err)
}
pods := k.client().CoreV1().Pods(k.namespace)
podsList, err := pods.List(v1.ListOptions{
LabelSelector: operatorSelector(appName),
})
if err != nil {
return errors.Trace(err)
}
deploymentName := appName
if legacy {
deploymentName = "juju-" + appName
}
pvs := k.client().CoreV1().PersistentVolumes()
for _, p := range podsList.Items {
// Delete secrets.
for _, c := range p.Spec.Containers {
secretName := appSecretName(deploymentName, c.Name)
if err := k.deleteSecret(secretName); err != nil {
return errors.Annotatef(err, "deleting %s secret for container %s", appName, c.Name)
}
}
// Delete operator storage volumes.
volumeNames, err := k.deleteVolumeClaims(appName, &p)
if err != nil {
return errors.Trace(err)
}
// Just in case the volume reclaim policy is retain, we force deletion
// for operators as the volume is an inseparable part of the operator.
for _, volName := range volumeNames {
err = pvs.Delete(volName, &v1.DeleteOptions{
PropagationPolicy: &defaultPropagationPolicy,
})
if err != nil && !k8serrors.IsNotFound(err) {
return errors.Annotatef(err, "deleting operator persistent volume %v for %v",
volName, appName)
}
}
}
return errors.Trace(k.deleteDeployment(operatorName))
}
|
go
|
{
"resource": ""
}
|
q3913
|
GetService
|
train
|
func (k *kubernetesClient) GetService(appName string, includeClusterIP bool) (*caas.Service, error) {
services := k.client().CoreV1().Services(k.namespace)
servicesList, err := services.List(v1.ListOptions{
LabelSelector: applicationSelector(appName),
IncludeUninitialized: true,
})
if err != nil {
return nil, errors.Trace(err)
}
var result caas.Service
// We may have the stateful set or deployment but service not done yet.
if len(servicesList.Items) > 0 {
service := servicesList.Items[0]
result.Id = string(service.GetUID())
result.Addresses = getSvcAddresses(&service, includeClusterIP)
}
deploymentName := k.deploymentName(appName)
statefulsets := k.client().AppsV1().StatefulSets(k.namespace)
ss, err := statefulsets.Get(deploymentName, v1.GetOptions{})
if err == nil {
if ss.Spec.Replicas != nil {
scale := int(*ss.Spec.Replicas)
result.Scale = &scale
}
message, ssStatus, err := k.getStatefulSetStatus(ss)
if err != nil {
return nil, errors.Annotatef(err, "getting status for %s", ss.Name)
}
result.Status = status.StatusInfo{
Status: ssStatus,
Message: message,
}
return &result, nil
}
if !k8serrors.IsNotFound(err) {
return nil, errors.Trace(err)
}
deployments := k.client().AppsV1().Deployments(k.namespace)
deployment, err := deployments.Get(deploymentName, v1.GetOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
return nil, errors.Trace(err)
}
if err == nil {
if deployment.Spec.Replicas != nil {
scale := int(*deployment.Spec.Replicas)
result.Scale = &scale
}
message, ssStatus, err := k.getDeploymentStatus(deployment)
if err != nil {
return nil, errors.Annotatef(err, "getting status for %s", ss.Name)
}
result.Status = status.StatusInfo{
Status: ssStatus,
Message: message,
}
}
return &result, nil
}
|
go
|
{
"resource": ""
}
|
q3914
|
DeleteService
|
train
|
func (k *kubernetesClient) DeleteService(appName string) (err error) {
logger.Debugf("deleting application %s", appName)
deploymentName := k.deploymentName(appName)
if err := k.deleteService(deploymentName); err != nil {
return errors.Trace(err)
}
if err := k.deleteStatefulSet(deploymentName); err != nil {
return errors.Trace(err)
}
if err := k.deleteDeployment(deploymentName); err != nil {
return errors.Trace(err)
}
secrets := k.client().CoreV1().Secrets(k.namespace)
secretList, err := secrets.List(v1.ListOptions{
LabelSelector: applicationSelector(appName),
})
if err != nil {
return errors.Trace(err)
}
for _, s := range secretList.Items {
if err := k.deleteSecret(s.Name); err != nil {
return errors.Trace(err)
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3915
|
EnsureCustomResourceDefinition
|
train
|
func (k *kubernetesClient) EnsureCustomResourceDefinition(appName string, podSpec *caas.PodSpec) error {
for name, crd := range podSpec.CustomResourceDefinitions {
crd, err := k.ensureCustomResourceDefinitionTemplate(name, crd)
if err != nil {
return errors.Annotate(err, fmt.Sprintf("ensure custom resource definition %q", name))
}
logger.Debugf("ensured custom resource definition %q", crd.ObjectMeta.Name)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3916
|
Upgrade
|
train
|
func (k *kubernetesClient) Upgrade(appName string, vers version.Number) error {
var resourceName string
if appName == JujuControllerStackName {
// upgrading controller.
resourceName = appName
} else {
// upgrading operator.
resourceName = k.operatorName(appName)
}
logger.Debugf("Upgrading %q", resourceName)
statefulsets := k.client().AppsV1().StatefulSets(k.namespace)
existingStatefulSet, err := statefulsets.Get(resourceName, v1.GetOptions{IncludeUninitialized: true})
if err != nil && !k8serrors.IsNotFound(err) {
return errors.Trace(err)
}
// TODO(wallyworld) - only support stateful set at the moment
if err != nil {
return errors.NotSupportedf("upgrading %v", appName)
}
for i, c := range existingStatefulSet.Spec.Template.Spec.Containers {
if !podcfg.IsJujuOCIImage(c.Image) {
continue
}
tagSep := strings.LastIndex(c.Image, ":")
c.Image = fmt.Sprintf("%s:%s", c.Image[:tagSep], vers.String())
existingStatefulSet.Spec.Template.Spec.Containers[i] = c
}
_, err = statefulsets.Update(existingStatefulSet)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3917
|
createStatefulSet
|
train
|
func (k *kubernetesClient) createStatefulSet(spec *apps.StatefulSet) error {
_, err := k.client().AppsV1().StatefulSets(k.namespace).Create(spec)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3918
|
deleteStatefulSet
|
train
|
func (k *kubernetesClient) deleteStatefulSet(name string) error {
deployments := k.client().AppsV1().StatefulSets(k.namespace)
err := deployments.Delete(name, &v1.DeleteOptions{
PropagationPolicy: &defaultPropagationPolicy,
})
if k8serrors.IsNotFound(err) {
return nil
}
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3919
|
ensureK8sService
|
train
|
func (k *kubernetesClient) ensureK8sService(spec *core.Service) error {
services := k.client().CoreV1().Services(k.namespace)
// Set any immutable fields if the service already exists.
existing, err := services.Get(spec.Name, v1.GetOptions{IncludeUninitialized: true})
if err == nil {
spec.Spec.ClusterIP = existing.Spec.ClusterIP
spec.ObjectMeta.ResourceVersion = existing.ObjectMeta.ResourceVersion
}
_, err = services.Update(spec)
if k8serrors.IsNotFound(err) {
_, err = services.Create(spec)
}
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3920
|
deleteService
|
train
|
func (k *kubernetesClient) deleteService(deploymentName string) error {
services := k.client().CoreV1().Services(k.namespace)
err := services.Delete(deploymentName, &v1.DeleteOptions{
PropagationPolicy: &defaultPropagationPolicy,
})
if k8serrors.IsNotFound(err) {
return nil
}
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3921
|
ExposeService
|
train
|
func (k *kubernetesClient) ExposeService(appName string, resourceTags map[string]string, config application.ConfigAttributes) error {
logger.Debugf("creating/updating ingress resource for %s", appName)
host := config.GetString(caas.JujuExternalHostNameKey, "")
if host == "" {
return errors.Errorf("external hostname required")
}
ingressClass := config.GetString(ingressClassKey, defaultIngressClass)
ingressSSLRedirect := config.GetBool(ingressSSLRedirectKey, defaultIngressSSLRedirect)
ingressSSLPassthrough := config.GetBool(ingressSSLPassthroughKey, defaultIngressSSLPassthrough)
ingressAllowHTTP := config.GetBool(ingressAllowHTTPKey, defaultIngressAllowHTTPKey)
httpPath := config.GetString(caas.JujuApplicationPath, caas.JujuDefaultApplicationPath)
if httpPath == "$appname" {
httpPath = appName
}
if !strings.HasPrefix(httpPath, "/") {
httpPath = "/" + httpPath
}
deploymentName := k.deploymentName(appName)
svc, err := k.client().CoreV1().Services(k.namespace).Get(deploymentName, v1.GetOptions{})
if err != nil {
return errors.Trace(err)
}
if len(svc.Spec.Ports) == 0 {
return errors.Errorf("cannot create ingress rule for service %q without a port", svc.Name)
}
spec := &v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: deploymentName,
Labels: resourceTags,
Annotations: map[string]string{
"ingress.kubernetes.io/rewrite-target": "",
"ingress.kubernetes.io/ssl-redirect": strconv.FormatBool(ingressSSLRedirect),
"kubernetes.io/ingress.class": ingressClass,
"kubernetes.io/ingress.allow-http": strconv.FormatBool(ingressAllowHTTP),
"ingress.kubernetes.io/ssl-passthrough": strconv.FormatBool(ingressSSLPassthrough),
},
},
Spec: v1beta1.IngressSpec{
Rules: []v1beta1.IngressRule{{
Host: host,
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{{
Path: httpPath,
Backend: v1beta1.IngressBackend{
ServiceName: svc.Name, ServicePort: svc.Spec.Ports[0].TargetPort},
}}},
}}},
},
}
return k.ensureIngress(spec)
}
|
go
|
{
"resource": ""
}
|
q3922
|
UnexposeService
|
train
|
func (k *kubernetesClient) UnexposeService(appName string) error {
logger.Debugf("deleting ingress resource for %s", appName)
return k.deleteIngress(appName)
}
|
go
|
{
"resource": ""
}
|
q3923
|
WatchUnits
|
train
|
func (k *kubernetesClient) WatchUnits(appName string) (watcher.NotifyWatcher, error) {
selector := applicationSelector(appName)
logger.Debugf("selecting units %q to watch", selector)
w, err := k.client().CoreV1().Pods(k.namespace).Watch(v1.ListOptions{
LabelSelector: selector,
Watch: true,
IncludeUninitialized: true,
})
if err != nil {
return nil, errors.Trace(err)
}
return k.newWatcher(w, appName, k.clock)
}
|
go
|
{
"resource": ""
}
|
q3924
|
WatchService
|
train
|
func (k *kubernetesClient) WatchService(appName string) (watcher.NotifyWatcher, error) {
// Application may be a statefulset or deployment. It may not have
// been set up when the watcher is started so we don't know which it
// is ahead of time. So use a multi-watcher to cover both cases.
statefulsets := k.client().AppsV1().StatefulSets(k.namespace)
sswatcher, err := statefulsets.Watch(v1.ListOptions{
LabelSelector: applicationSelector(appName),
Watch: true,
})
if err != nil {
return nil, errors.Trace(err)
}
w1, err := k.newWatcher(sswatcher, appName, k.clock)
if err != nil {
return nil, errors.Trace(err)
}
deployments := k.client().AppsV1().Deployments(k.namespace)
dwatcher, err := deployments.Watch(v1.ListOptions{
LabelSelector: applicationSelector(appName),
Watch: true,
})
if err != nil {
return nil, errors.Trace(err)
}
w2, err := k.newWatcher(dwatcher, appName, k.clock)
if err != nil {
return nil, errors.Trace(err)
}
return watcher.NewMultiNotifyWatcher(w1, w2), nil
}
|
go
|
{
"resource": ""
}
|
q3925
|
WatchOperator
|
train
|
func (k *kubernetesClient) WatchOperator(appName string) (watcher.NotifyWatcher, error) {
pods := k.client().CoreV1().Pods(k.namespace)
w, err := pods.Watch(v1.ListOptions{
LabelSelector: operatorSelector(appName),
Watch: true,
})
if err != nil {
return nil, errors.Trace(err)
}
return k.newWatcher(w, appName, k.clock)
}
|
go
|
{
"resource": ""
}
|
q3926
|
Operator
|
train
|
func (k *kubernetesClient) Operator(appName string) (*caas.Operator, error) {
pods := k.client().CoreV1().Pods(k.namespace)
podsList, err := pods.List(v1.ListOptions{
LabelSelector: operatorSelector(appName),
})
if err != nil {
return nil, errors.Trace(err)
}
if len(podsList.Items) == 0 {
return nil, errors.NotFoundf("operator pod for application %q", appName)
}
opPod := podsList.Items[0]
terminated := opPod.DeletionTimestamp != nil
now := time.Now()
statusMessage, opStatus, since, err := k.getPODStatus(opPod, now)
return &caas.Operator{
Id: string(opPod.UID),
Dying: terminated,
Status: status.StatusInfo{
Status: opStatus,
Message: statusMessage,
Since: &since,
},
}, nil
}
|
go
|
{
"resource": ""
}
|
q3927
|
ensureConfigMap
|
train
|
func (k *kubernetesClient) ensureConfigMap(configMap *core.ConfigMap) error {
configMaps := k.client().CoreV1().ConfigMaps(k.namespace)
_, err := configMaps.Update(configMap)
if k8serrors.IsNotFound(err) {
_, err = configMaps.Create(configMap)
}
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3928
|
deleteConfigMap
|
train
|
func (k *kubernetesClient) deleteConfigMap(configMapName string) error {
err := k.client().CoreV1().ConfigMaps(k.namespace).Delete(configMapName, &v1.DeleteOptions{
PropagationPolicy: &defaultPropagationPolicy,
})
if k8serrors.IsNotFound(err) {
return nil
}
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3929
|
createConfigMap
|
train
|
func (k *kubernetesClient) createConfigMap(configMap *core.ConfigMap) error {
_, err := k.client().CoreV1().ConfigMaps(k.namespace).Create(configMap)
return errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3930
|
getConfigMap
|
train
|
func (k *kubernetesClient) getConfigMap(cmName string) (*core.ConfigMap, error) {
cm, err := k.client().CoreV1().ConfigMaps(k.namespace).Get(cmName, v1.GetOptions{IncludeUninitialized: true})
if err != nil {
if k8serrors.IsNotFound(err) {
return nil, errors.NotFoundf("configmap %q", cmName)
}
return nil, errors.Trace(err)
}
return cm, nil
}
|
go
|
{
"resource": ""
}
|
q3931
|
legacyAppName
|
train
|
func (k *kubernetesClient) legacyAppName(appName string) bool {
statefulsets := k.client().AppsV1().StatefulSets(k.namespace)
legacyName := "juju-operator-" + appName
_, err := statefulsets.Get(legacyName, v1.GetOptions{IncludeUninitialized: true})
return err == nil
}
|
go
|
{
"resource": ""
}
|
q3932
|
Model
|
train
|
func (g StatePoolModelGetter) Model(modelUUID string) (Model, func(), error) {
model, ph, err := g.StatePool.GetModel(modelUUID)
if err != nil {
return nil, nil, errors.Trace(err)
}
return model, func() { ph.Release() }, nil
}
|
go
|
{
"resource": ""
}
|
q3933
|
NewRemoveCloudCommand
|
train
|
func NewRemoveCloudCommand() cmd.Command {
store := jujuclient.NewFileClientStore()
c := &removeCloudCommand{
OptionalControllerCommand: modelcmd.OptionalControllerCommand{Store: store},
store: store,
}
c.removeCloudAPIFunc = c.cloudAPI
return modelcmd.WrapBase(c)
}
|
go
|
{
"resource": ""
}
|
q3934
|
newOracleVolumeSource
|
train
|
func newOracleVolumeSource(env *OracleEnviron, name, uuid string, api StorageAPI, clock clock.Clock) (*oracleVolumeSource, error) {
if env == nil {
return nil, errors.NotFoundf("environ")
}
if api == nil {
return nil, errors.NotFoundf("storage client")
}
return &oracleVolumeSource{
env: env,
envName: name,
modelUUID: uuid,
api: api,
clock: clock,
}, nil
}
|
go
|
{
"resource": ""
}
|
q3935
|
resourceName
|
train
|
func (s *oracleVolumeSource) resourceName(tag string) string {
return s.api.ComposeName(s.env.namespace.Value(s.envName + "-" + tag))
}
|
go
|
{
"resource": ""
}
|
q3936
|
CreateVolumes
|
train
|
func (s *oracleVolumeSource) CreateVolumes(ctx context.ProviderCallContext, params []storage.VolumeParams) ([]storage.CreateVolumesResult, error) {
if params == nil {
return []storage.CreateVolumesResult{}, nil
}
results := make([]storage.CreateVolumesResult, len(params))
for i, volume := range params {
vol, err := s.createVolume(volume)
if err != nil {
results[i].Error = errors.Trace(err)
continue
}
results[i].Volume = vol
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q3937
|
fetchVolumeStatus
|
train
|
func (s *oracleVolumeSource) fetchVolumeStatus(name, desiredStatus string) (complete bool, err error) {
details, err := s.api.StorageVolumeDetails(name)
if err != nil {
return false, errors.Trace(err)
}
if details.Status == ociCommon.VolumeError {
return false, errors.Errorf("volume entered error state: %q", details.Status_detail)
}
return string(details.Status) == desiredStatus, nil
}
|
go
|
{
"resource": ""
}
|
q3938
|
fetchVolumeAttachmentStatus
|
train
|
func (s *oracleVolumeSource) fetchVolumeAttachmentStatus(name, desiredStatus string) (bool, error) {
details, err := s.api.StorageAttachmentDetails(name)
if err != nil {
return false, errors.Trace(err)
}
return string(details.State) == desiredStatus, nil
}
|
go
|
{
"resource": ""
}
|
q3939
|
getFreeIndexNumber
|
train
|
func (s *oracleVolumeSource) getFreeIndexNumber(existing []int, max int) (int, error) {
if len(existing) == 0 {
return 1, nil
}
sort.Ints(existing)
for i := 0; i <= len(existing)-1; i++ {
if i+1 >= max {
break
}
if i+1 == len(existing) {
return existing[i] + 1, nil
}
if existing[0] > 1 {
return existing[0] - 1, nil
}
diff := existing[i+1] - existing[i]
if diff > 1 {
return existing[i] + 1, nil
}
}
return 0, errors.Errorf("no free index")
}
|
go
|
{
"resource": ""
}
|
q3940
|
NewCloudSpec
|
train
|
func NewCloudSpec(
resources facade.Resources,
getCloudSpec func(names.ModelTag) (environs.CloudSpec, error),
watchCloudSpec func(tag names.ModelTag) (state.NotifyWatcher, error),
getAuthFunc common.GetAuthFunc,
) CloudSpecAPI {
return cloudSpecAPI{resources, getCloudSpec, watchCloudSpec, getAuthFunc}
}
|
go
|
{
"resource": ""
}
|
q3941
|
CloudSpec
|
train
|
func (s cloudSpecAPI) CloudSpec(args params.Entities) (params.CloudSpecResults, error) {
authFunc, err := s.getAuthFunc()
if err != nil {
return params.CloudSpecResults{}, err
}
results := params.CloudSpecResults{
Results: make([]params.CloudSpecResult, len(args.Entities)),
}
for i, arg := range args.Entities {
tag, err := names.ParseModelTag(arg.Tag)
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
if !authFunc(tag) {
results.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
results.Results[i] = s.GetCloudSpec(tag)
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q3942
|
GetCloudSpec
|
train
|
func (s cloudSpecAPI) GetCloudSpec(tag names.ModelTag) params.CloudSpecResult {
var result params.CloudSpecResult
spec, err := s.getCloudSpec(tag)
if err != nil {
result.Error = common.ServerError(err)
return result
}
var paramsCloudCredential *params.CloudCredential
if spec.Credential != nil && spec.Credential.AuthType() != "" {
paramsCloudCredential = ¶ms.CloudCredential{
AuthType: string(spec.Credential.AuthType()),
Attributes: spec.Credential.Attributes(),
}
}
result.Result = ¶ms.CloudSpec{
Type: spec.Type,
Name: spec.Name,
Region: spec.Region,
Endpoint: spec.Endpoint,
IdentityEndpoint: spec.IdentityEndpoint,
StorageEndpoint: spec.StorageEndpoint,
Credential: paramsCloudCredential,
CACertificates: spec.CACertificates,
}
return result
}
|
go
|
{
"resource": ""
}
|
q3943
|
WatchCloudSpecsChanges
|
train
|
func (s cloudSpecAPI) WatchCloudSpecsChanges(args params.Entities) (params.NotifyWatchResults, error) {
authFunc, err := s.getAuthFunc()
if err != nil {
return params.NotifyWatchResults{}, err
}
results := params.NotifyWatchResults{
Results: make([]params.NotifyWatchResult, len(args.Entities)),
}
for i, arg := range args.Entities {
tag, err := names.ParseModelTag(arg.Tag)
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
if !authFunc(tag) {
results.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
w, err := s.watchCloudSpecChanges(tag)
if err == nil {
results.Results[i] = w
} else {
results.Results[i].Error = common.ServerError(err)
}
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q3944
|
IsCmdNotFoundErr
|
train
|
func IsCmdNotFoundErr(err error) bool {
err = errors.Cause(err)
if os.IsNotExist(err) {
// Executable could not be found, go 1.3 and later
return true
}
if err == exec.ErrNotFound {
return true
}
if execErr, ok := err.(*exec.Error); ok {
// Executable could not be found, go 1.2
if os.IsNotExist(execErr.Err) || execErr.Err == exec.ErrNotFound {
return true
}
}
return false
}
|
go
|
{
"resource": ""
}
|
q3945
|
AllAttrs
|
train
|
func (c ConfigValues) AllAttrs() map[string]interface{} {
result := make(map[string]interface{})
for attr, val := range c {
result[attr] = val
}
return result
}
|
go
|
{
"resource": ""
}
|
q3946
|
NewFacade
|
train
|
func NewFacade(apiCaller base.APICaller, claimant names.MachineTag, entity names.Tag) (Facade, error) {
facade, err := singular.NewAPI(apiCaller, claimant, entity)
if err != nil {
return nil, errors.Trace(err)
}
return facade, nil
}
|
go
|
{
"resource": ""
}
|
q3947
|
NewWorker
|
train
|
func NewWorker(config FlagConfig) (worker.Worker, error) {
worker, err := NewFlagWorker(config)
if err != nil {
return nil, errors.Trace(err)
}
return worker, nil
}
|
go
|
{
"resource": ""
}
|
q3948
|
NewSimpleWorker
|
train
|
func NewSimpleWorker(doWork func(stopCh <-chan struct{}) error) worker.Worker {
w := &simpleWorker{}
w.tomb.Go(func() error {
return doWork(w.tomb.Dying())
})
return w
}
|
go
|
{
"resource": ""
}
|
q3949
|
NewUnregisterCmd
|
train
|
func NewUnregisterCmd(ctx HookContext) (*UnregisterCmd, error) {
return &UnregisterCmd{hookContextFunc: componentHookContext(ctx)}, nil
}
|
go
|
{
"resource": ""
}
|
q3950
|
Run
|
train
|
func (c *UnregisterCmd) Run(ctx *cmd.Context) error {
//TODO(wwitzel3) make Unregister accept class and id and
// compose the ID in the API layer using BuildID
logger.Tracef(`Running unregister command with id "%s/%s"`, c.class, c.id)
hctx, err := c.hookContextFunc()
if err != nil {
return errors.Trace(err)
}
// TODO(ericsnow) Verify that Untrack gives a meaningful error when
// the ID is not found.
if err := hctx.Untrack(c.class, c.id); err != nil {
return errors.Trace(err)
}
// TODO(ericsnow) Is the flush really necessary?
// We flush to state immediately so that status reflects the
// payload correctly.
if err := hctx.Flush(); err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3951
|
startIntrospection
|
train
|
func startIntrospection(cfg introspectionConfig) error {
if runtime.GOOS != "linux" {
logger.Debugf("introspection worker not supported on %q", runtime.GOOS)
return nil
}
socketName := cfg.NewSocketName(cfg.Agent.CurrentConfig().Tag())
w, err := cfg.WorkerFunc(introspection.Config{
SocketName: socketName,
DepEngine: cfg.Engine,
StatePool: cfg.StatePoolReporter,
PubSub: cfg.PubSubReporter,
MachineLock: cfg.MachineLock,
PrometheusGatherer: cfg.PrometheusGatherer,
Presence: cfg.PresenceRecorder,
})
if err != nil {
return errors.Trace(err)
}
go func() {
cfg.Engine.Wait()
logger.Debugf("engine stopped, stopping introspection")
w.Kill()
w.Wait()
logger.Debugf("introspection stopped")
}()
return nil
}
|
go
|
{
"resource": ""
}
|
q3952
|
newPrometheusRegistry
|
train
|
func newPrometheusRegistry() (*prometheus.Registry, error) {
r := prometheus.NewRegistry()
if err := r.Register(prometheus.NewGoCollector()); err != nil {
return nil, errors.Trace(err)
}
if err := r.Register(prometheus.NewProcessCollector(
prometheus.ProcessCollectorOpts{})); err != nil {
return nil, errors.Trace(err)
}
return r, nil
}
|
go
|
{
"resource": ""
}
|
q3953
|
NextSubnetIP
|
train
|
func NextSubnetIP(subnet *net.IPNet, ipsInUse []net.IP) (net.IP, error) {
ones, bits := subnet.Mask.Size()
subnetMaskUint32 := ipUint32(net.IP(subnet.Mask))
inUse := big.NewInt(0)
for _, ip := range ipsInUse {
if !subnet.Contains(ip) {
continue
}
index := ipIndex(ip, subnetMaskUint32)
inUse = inUse.SetBit(inUse, index, 1)
}
// Now iterate through all addresses in the subnet and return the
// first address that is not in use. We start at the first non-
// reserved address, and stop short of the last address in the
// subnet (i.e. all non-mask bits set), which is the broadcast
// address for the subnet.
n := ipUint32(subnet.IP)
for i := reservedAddressRangeEnd + 1; i < (1<<uint64(bits-ones) - 1); i++ {
ip := uint32IP(n + uint32(i))
if !ip.IsGlobalUnicast() {
continue
}
index := ipIndex(ip, subnetMaskUint32)
if inUse.Bit(index) == 0 {
return ip, nil
}
}
return nil, errors.Errorf("no addresses available in %s", subnet)
}
|
go
|
{
"resource": ""
}
|
q3954
|
NthSubnetIP
|
train
|
func NthSubnetIP(subnet *net.IPNet, n int) net.IP {
ones, bits := subnet.Mask.Size()
base := ipUint32(subnet.IP)
var valid int
for i := reservedAddressRangeEnd + 1; i < (1<<uint64(bits-ones) - 1); i++ {
ip := uint32IP(base + uint32(i))
if !ip.IsGlobalUnicast() {
continue
}
if n == valid {
return ip
}
valid++
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3955
|
processNewConfig
|
train
|
func (lf *LogForwarder) processNewConfig(currentSender SendCloser) (SendCloser, error) {
lf.mu.Lock()
defer lf.mu.Unlock()
closeExisting := func() error {
lf.enabled = false
// If we are already sending, close the current sender.
if currentSender != nil {
return currentSender.Close()
}
return nil
}
// Get the new config and set up log forwarding if enabled.
cfg, ok, err := lf.args.LogForwardConfig.LogForwardConfig()
if err != nil {
closeExisting()
return nil, errors.Trace(err)
}
if !ok || !cfg.Enabled {
logger.Infof("config change - log forwarding not enabled")
return nil, closeExisting()
}
// If the config is not valid, we don't want to exit with an error
// and bounce the worker; we'll just log the issue and wait for another
// config change to come through.
// We'll continue sending using the current sink.
if err := cfg.Validate(); err != nil {
logger.Errorf("invalid log forward config change: %v", err)
return currentSender, nil
}
// Shutdown the existing sink since we need to now create a new one.
if err := closeExisting(); err != nil {
return nil, errors.Trace(err)
}
sink, err := OpenTrackingSink(TrackingSinkArgs{
Name: lf.args.Name,
Config: cfg,
Caller: lf.args.Caller,
OpenSink: lf.args.OpenSink,
})
if err != nil {
return nil, errors.Trace(err)
}
lf.enabledCh <- true
return sink, nil
}
|
go
|
{
"resource": ""
}
|
q3956
|
waitForEnabled
|
train
|
func (lf *LogForwarder) waitForEnabled() (bool, error) {
lf.mu.Lock()
enabled := lf.enabled
lf.mu.Unlock()
if enabled {
return true, nil
}
select {
case <-lf.catacomb.Dying():
return false, tomb.ErrDying
case enabled = <-lf.enabledCh:
}
lf.mu.Lock()
defer lf.mu.Unlock()
if !lf.enabled && enabled {
logger.Infof("log forward enabled, starting to stream logs to syslog sink")
}
lf.enabled = enabled
return enabled, nil
}
|
go
|
{
"resource": ""
}
|
q3957
|
NewLogForwarder
|
train
|
func NewLogForwarder(args OpenLogForwarderArgs) (*LogForwarder, error) {
lf := &LogForwarder{
args: args,
enabledCh: make(chan bool, 1),
}
err := catacomb.Invoke(catacomb.Plan{
Site: &lf.catacomb,
Work: func() error {
return errors.Trace(lf.loop())
},
})
if err != nil {
return nil, errors.Trace(err)
}
return lf, nil
}
|
go
|
{
"resource": ""
}
|
q3958
|
watchProfileChangesLoop
|
train
|
func (m MutaterMachine) watchProfileChangesLoop(removed <-chan struct{}, profileChangeWatcher watcher.NotifyWatcher) error {
m.logger.Tracef("watching change on MutaterMachine %s", m.id)
for {
select {
case <-m.context.dying():
return m.context.errDying()
case <-profileChangeWatcher.Changes():
info, err := m.machineApi.CharmProfilingInfo()
if err != nil {
// If the machine is not provisioned then we need to wait for
// new changes from the watcher.
if params.IsCodeNotProvisioned(errors.Cause(err)) {
m.logger.Tracef("got not provisioned machine-%s on charm profiling info, wait for another change", m.id)
continue
}
return errors.Trace(err)
}
if err = m.processMachineProfileChanges(info); err != nil && errors.IsNotValid(err) {
// Return to stop mutating the machine, but no need to restart
// the worker.
return nil
} else if err != nil {
return errors.Trace(err)
}
case <-removed:
if err := m.machineApi.Refresh(); err != nil {
return errors.Trace(err)
}
if m.machineApi.Life() == params.Dead {
return nil
}
}
}
}
|
go
|
{
"resource": ""
}
|
q3959
|
NewControllerAPIv7
|
train
|
func NewControllerAPIv7(ctx facade.Context) (*ControllerAPI, error) {
st := ctx.State()
authorizer := ctx.Auth()
pool := ctx.StatePool()
resources := ctx.Resources()
presence := ctx.Presence()
hub := ctx.Hub()
return NewControllerAPI(
st,
pool,
authorizer,
resources,
presence,
hub,
)
}
|
go
|
{
"resource": ""
}
|
q3960
|
NewControllerAPIv6
|
train
|
func NewControllerAPIv6(ctx facade.Context) (*ControllerAPIv6, error) {
v7, err := NewControllerAPIv7(ctx)
if err != nil {
return nil, errors.Trace(err)
}
return &ControllerAPIv6{v7}, nil
}
|
go
|
{
"resource": ""
}
|
q3961
|
NewControllerAPIv5
|
train
|
func NewControllerAPIv5(ctx facade.Context) (*ControllerAPIv5, error) {
v6, err := NewControllerAPIv6(ctx)
if err != nil {
return nil, errors.Trace(err)
}
return &ControllerAPIv5{v6}, nil
}
|
go
|
{
"resource": ""
}
|
q3962
|
NewControllerAPIv4
|
train
|
func NewControllerAPIv4(ctx facade.Context) (*ControllerAPIv4, error) {
v5, err := NewControllerAPIv5(ctx)
if err != nil {
return nil, errors.Trace(err)
}
return &ControllerAPIv4{v5}, nil
}
|
go
|
{
"resource": ""
}
|
q3963
|
NewControllerAPIv3
|
train
|
func NewControllerAPIv3(ctx facade.Context) (*ControllerAPIv3, error) {
v4, err := NewControllerAPIv4(ctx)
if err != nil {
return nil, errors.Trace(err)
}
return &ControllerAPIv3{v4}, nil
}
|
go
|
{
"resource": ""
}
|
q3964
|
NewControllerAPI
|
train
|
func NewControllerAPI(
st *state.State,
pool *state.StatePool,
authorizer facade.Authorizer,
resources facade.Resources,
presence facade.Presence,
hub facade.Hub,
) (*ControllerAPI, error) {
if !authorizer.AuthClient() {
return nil, errors.Trace(common.ErrPerm)
}
// Since we know this is a user tag (because AuthClient is true),
// we just do the type assertion to the UserTag.
apiUser, _ := authorizer.GetAuthTag().(names.UserTag)
model, err := st.Model()
if err != nil {
return nil, errors.Trace(err)
}
return &ControllerAPI{
ControllerConfigAPI: common.NewStateControllerConfig(st),
ModelStatusAPI: common.NewModelStatusAPI(
common.NewModelManagerBackend(model, pool),
authorizer,
apiUser,
),
CloudSpecAPI: cloudspec.NewCloudSpec(
resources,
cloudspec.MakeCloudSpecGetter(pool),
cloudspec.MakeCloudSpecWatcherForModel(st),
common.AuthFuncForTag(model.ModelTag()),
),
state: st,
statePool: pool,
authorizer: authorizer,
apiUser: apiUser,
resources: resources,
presence: presence,
hub: hub,
}, nil
}
|
go
|
{
"resource": ""
}
|
q3965
|
MongoVersion
|
train
|
func (c *ControllerAPI) MongoVersion() (params.StringResult, error) {
result := params.StringResult{}
if err := c.checkHasAdmin(); err != nil {
return result, errors.Trace(err)
}
version, err := c.state.MongoVersion()
if err != nil {
return result, errors.Trace(err)
}
result.Result = version
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3966
|
ListBlockedModels
|
train
|
func (c *ControllerAPI) ListBlockedModels() (params.ModelBlockInfoList, error) {
results := params.ModelBlockInfoList{}
if err := c.checkHasAdmin(); err != nil {
return results, errors.Trace(err)
}
blocks, err := c.state.AllBlocksForController()
if err != nil {
return results, errors.Trace(err)
}
modelBlocks := make(map[string][]string)
for _, block := range blocks {
uuid := block.ModelUUID()
types, ok := modelBlocks[uuid]
if !ok {
types = []string{block.Type().String()}
} else {
types = append(types, block.Type().String())
}
modelBlocks[uuid] = types
}
for uuid, blocks := range modelBlocks {
model, ph, err := c.statePool.GetModel(uuid)
if err != nil {
logger.Debugf("unable to retrieve model %s: %v", uuid, err)
continue
}
results.Models = append(results.Models, params.ModelBlockInfo{
UUID: model.UUID(),
Name: model.Name(),
OwnerTag: model.Owner().String(),
Blocks: blocks,
})
ph.Release()
}
// Sort the resulting sequence by model name, then owner.
sort.Sort(orderedBlockInfo(results.Models))
return results, nil
}
|
go
|
{
"resource": ""
}
|
q3967
|
ModelConfig
|
train
|
func (c *ControllerAPI) ModelConfig() (params.ModelConfigResults, error) {
result := params.ModelConfigResults{}
if err := c.checkHasAdmin(); err != nil {
return result, errors.Trace(err)
}
controllerState := c.statePool.SystemState()
controllerModel, err := controllerState.Model()
if err != nil {
return result, errors.Trace(err)
}
cfg, err := controllerModel.Config()
if err != nil {
return result, errors.Trace(err)
}
result.Config = make(map[string]params.ConfigValue)
for name, val := range cfg.AllAttrs() {
result.Config[name] = params.ConfigValue{
Value: val,
}
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3968
|
HostedModelConfigs
|
train
|
func (c *ControllerAPI) HostedModelConfigs() (params.HostedModelConfigsResults, error) {
result := params.HostedModelConfigsResults{}
if err := c.checkHasAdmin(); err != nil {
return result, errors.Trace(err)
}
modelUUIDs, err := c.state.AllModelUUIDs()
if err != nil {
return result, errors.Trace(err)
}
for _, modelUUID := range modelUUIDs {
if modelUUID == c.state.ControllerModelUUID() {
continue
}
st, err := c.statePool.Get(modelUUID)
if err != nil {
// This model could have been removed.
if errors.IsNotFound(err) {
continue
}
return result, errors.Trace(err)
}
defer st.Release()
model, err := st.Model()
if err != nil {
return result, errors.Trace(err)
}
config := params.HostedModelConfig{
Name: model.Name(),
OwnerTag: model.Owner().String(),
}
modelConf, err := model.Config()
if err != nil {
config.Error = common.ServerError(err)
} else {
config.Config = modelConf.AllAttrs()
}
cloudSpec := c.GetCloudSpec(model.ModelTag())
if config.Error == nil {
config.CloudSpec = cloudSpec.Result
config.Error = cloudSpec.Error
}
result.Models = append(result.Models, config)
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3969
|
WatchAllModels
|
train
|
func (c *ControllerAPI) WatchAllModels() (params.AllWatcherId, error) {
if err := c.checkHasAdmin(); err != nil {
return params.AllWatcherId{}, errors.Trace(err)
}
w := c.state.WatchAllModels(c.statePool)
return params.AllWatcherId{
AllWatcherId: c.resources.Register(w),
}, nil
}
|
go
|
{
"resource": ""
}
|
q3970
|
GetControllerAccess
|
train
|
func (c *ControllerAPI) GetControllerAccess(req params.Entities) (params.UserAccessResults, error) {
results := params.UserAccessResults{}
isAdmin, err := c.authorizer.HasPermission(permission.SuperuserAccess, c.state.ControllerTag())
if err != nil {
return results, errors.Trace(err)
}
users := req.Entities
results.Results = make([]params.UserAccessResult, len(users))
for i, user := range users {
userTag, err := names.ParseUserTag(user.Tag)
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
if !isAdmin && !c.authorizer.AuthOwner(userTag) {
results.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
access, err := c.state.UserPermission(userTag, c.state.ControllerTag())
if err != nil {
results.Results[i].Error = common.ServerError(err)
continue
}
results.Results[i].Result = ¶ms.UserAccess{
Access: string(access),
UserTag: userTag.String()}
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q3971
|
InitiateMigration
|
train
|
func (c *ControllerAPI) InitiateMigration(reqArgs params.InitiateMigrationArgs) (
params.InitiateMigrationResults, error,
) {
out := params.InitiateMigrationResults{
Results: make([]params.InitiateMigrationResult, len(reqArgs.Specs)),
}
if err := c.checkHasAdmin(); err != nil {
return out, errors.Trace(err)
}
for i, spec := range reqArgs.Specs {
result := &out.Results[i]
result.ModelTag = spec.ModelTag
id, err := c.initiateOneMigration(spec)
if err != nil {
result.Error = common.ServerError(err)
} else {
result.MigrationId = id
}
}
return out, nil
}
|
go
|
{
"resource": ""
}
|
q3972
|
ModifyControllerAccess
|
train
|
func (c *ControllerAPI) ModifyControllerAccess(args params.ModifyControllerAccessRequest) (params.ErrorResults, error) {
result := params.ErrorResults{
Results: make([]params.ErrorResult, len(args.Changes)),
}
if len(args.Changes) == 0 {
return result, nil
}
hasPermission, err := c.authorizer.HasPermission(permission.SuperuserAccess, c.state.ControllerTag())
if err != nil {
return result, errors.Trace(err)
}
for i, arg := range args.Changes {
if !hasPermission {
result.Results[i].Error = common.ServerError(common.ErrPerm)
continue
}
controllerAccess := permission.Access(arg.Access)
if err := permission.ValidateControllerAccess(controllerAccess); err != nil {
// TODO(wallyworld) - remove in Juju 3.0
// Backwards compatibility requires us to accept add-model.
if controllerAccess != permission.AddModelAccess {
result.Results[i].Error = common.ServerError(err)
continue
}
}
targetUserTag, err := names.ParseUserTag(arg.UserTag)
if err != nil {
result.Results[i].Error = common.ServerError(errors.Annotate(err, "could not modify controller access"))
continue
}
result.Results[i].Error = common.ServerError(
ChangeControllerAccess(c.state, c.apiUser, targetUserTag, arg.Action, controllerAccess))
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q3973
|
ConfigSet
|
train
|
func (c *ControllerAPI) ConfigSet(args params.ControllerConfigSet) error {
if err := c.checkHasAdmin(); err != nil {
return errors.Trace(err)
}
if err := c.state.UpdateControllerConfig(args.Config, nil); err != nil {
return errors.Trace(err)
}
// TODO(thumper): add a version to controller config to allow for
// simultaneous updates and races in publishing, potentially across
// HA servers.
cfg, err := c.state.ControllerConfig()
if err != nil {
return errors.Trace(err)
}
if _, err := c.hub.Publish(
controller.ConfigChanged,
controller.ConfigChangedMessage{cfg}); err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3974
|
grantControllerCloudAccess
|
train
|
func grantControllerCloudAccess(accessor *state.State, targetUserTag names.UserTag, access permission.Access) error {
controllerInfo, err := accessor.ControllerInfo()
if err != nil {
return errors.Trace(err)
}
cloud := controllerInfo.CloudName
err = accessor.CreateCloudAccess(cloud, targetUserTag, access)
if errors.IsAlreadyExists(err) {
cloudAccess, err := accessor.GetCloudAccess(cloud, targetUserTag)
if errors.IsNotFound(err) {
// Conflicts with prior check, must be inconsistent state.
err = txn.ErrExcessiveContention
}
if err != nil {
return errors.Annotate(err, "could not look up cloud access for user")
}
// Only set access if greater access is being granted.
if cloudAccess.EqualOrGreaterCloudAccessThan(access) {
return errors.Errorf("user already has %q access or greater", access)
}
if _, err = accessor.SetUserAccess(targetUserTag, names.NewCloudTag(cloud), access); err != nil {
return errors.Annotate(err, "could not set cloud access for user")
}
return nil
}
if err != nil {
return errors.Trace(err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3975
|
ChangeControllerAccess
|
train
|
func ChangeControllerAccess(accessor *state.State, apiUser, targetUserTag names.UserTag, action params.ControllerAction, access permission.Access) error {
switch action {
case params.GrantControllerAccess:
err := grantControllerAccess(accessor, targetUserTag, apiUser, access)
if err != nil {
return errors.Annotate(err, "could not grant controller access")
}
return nil
case params.RevokeControllerAccess:
return revokeControllerAccess(accessor, targetUserTag, apiUser, access)
default:
return errors.Errorf("unknown action %q", action)
}
}
|
go
|
{
"resource": ""
}
|
q3976
|
Check
|
train
|
func (t token) Check(attempt int, trapdoorKey interface{}) error {
// This validation, which could be done at Token creation time, is deferred
// until this point for historical reasons. In particular, this code was
// extracted from a *leadership* implementation which has a LeadershipCheck
// method returning a token; if it returned an error as well it would seem
// to imply that the method implemented a check itself, rather than a check
// factory.
//
// Fixing that would be great but seems out of scope.
if err := t.secretary.CheckLease(t.leaseKey); err != nil {
return errors.Annotatef(err, "cannot check lease %q", t.leaseKey.Lease)
}
if err := t.secretary.CheckHolder(t.holderName); err != nil {
return errors.Annotatef(err, "cannot check holder %q", t.holderName)
}
return check{
leaseKey: t.leaseKey,
holderName: t.holderName,
attempt: attempt,
trapdoorKey: trapdoorKey,
response: make(chan error),
stop: t.stop,
}.invoke(t.checks)
}
|
go
|
{
"resource": ""
}
|
q3977
|
invoke
|
train
|
func (c check) invoke(ch chan<- check) error {
for {
select {
case <-c.stop:
return errStopped
case ch <- c:
ch = nil
case err := <-c.response:
return errors.Trace(err)
}
}
}
|
go
|
{
"resource": ""
}
|
q3978
|
respond
|
train
|
func (c check) respond(err error) {
select {
case <-c.stop:
case c.response <- err:
}
}
|
go
|
{
"resource": ""
}
|
q3979
|
NewDataSource
|
train
|
func NewDataSource(baseURL string) simplestreams.DataSource {
requireSigned := true
return simplestreams.NewURLSignedDataSource(
sourceDescription,
baseURL,
keys.JujuPublicKey,
utils.VerifySSLHostnames,
simplestreams.DEFAULT_CLOUD_DATA,
requireSigned)
}
|
go
|
{
"resource": ""
}
|
q3980
|
FetchMetadata
|
train
|
func FetchMetadata(stream string, sources ...simplestreams.DataSource) ([]*Metadata, error) {
params := simplestreams.GetMetadataParams{
StreamsVersion: streamsVersion,
LookupConstraint: &constraint{
LookupParams: simplestreams.LookupParams{Stream: stream},
majorVersion: jujuversion.Current.Major,
},
ValueParams: simplestreams.ValueParams{
DataType: downloadType,
MirrorContentId: contentId(stream),
FilterFunc: appendArchives,
ValueTemplate: Metadata{},
},
}
items, _, err := simplestreams.GetMetadata(sources, params)
if err != nil {
return nil, errors.Annotate(err, "error fetching simplestreams metadata")
}
allMeta := make([]*Metadata, len(items))
for i, item := range items {
allMeta[i] = item.(*Metadata)
}
sort.Sort(byVersion(allMeta))
return allMeta, nil
}
|
go
|
{
"resource": ""
}
|
q3981
|
appendArchives
|
train
|
func appendArchives(
source simplestreams.DataSource,
matchingItems []interface{},
items map[string]interface{},
cons simplestreams.LookupConstraint,
) ([]interface{}, error) {
var majorVersion int
if guiConstraint, ok := cons.(*constraint); ok {
majorVersion = guiConstraint.majorVersion
}
for _, item := range items {
meta := item.(*Metadata)
if majorVersion != 0 && majorVersion != meta.JujuMajorVersion {
continue
}
fullPath, err := source.URL(meta.Path)
if err != nil {
return nil, errors.Annotate(err, "cannot retrieve metadata full path")
}
meta.FullPath = fullPath
vers, err := version.Parse(meta.StringVersion)
if err != nil {
return nil, errors.Annotate(err, "cannot parse metadata version")
}
meta.Version = vers
meta.Source = source
matchingItems = append(matchingItems, meta)
}
return matchingItems, nil
}
|
go
|
{
"resource": ""
}
|
q3982
|
NewState
|
train
|
func NewState(caller base.APICaller) (*State, error) {
facadeCaller := base.NewFacadeCaller(caller, storageProvisionerFacade)
return &State{facadeCaller}, nil
}
|
go
|
{
"resource": ""
}
|
q3983
|
WatchVolumes
|
train
|
func (st *State) WatchVolumes(scope names.Tag) (watcher.StringsWatcher, error) {
return st.watchStorageEntities("WatchVolumes", scope)
}
|
go
|
{
"resource": ""
}
|
q3984
|
WatchVolumeAttachments
|
train
|
func (st *State) WatchVolumeAttachments(scope names.Tag) (watcher.MachineStorageIdsWatcher, error) {
return st.watchAttachments("WatchVolumeAttachments", scope, apiwatcher.NewVolumeAttachmentsWatcher)
}
|
go
|
{
"resource": ""
}
|
q3985
|
WatchVolumeAttachmentPlans
|
train
|
func (st *State) WatchVolumeAttachmentPlans(scope names.Tag) (watcher.MachineStorageIdsWatcher, error) {
return st.watchAttachments("WatchVolumeAttachmentPlans", scope, apiwatcher.NewVolumeAttachmentPlansWatcher)
}
|
go
|
{
"resource": ""
}
|
q3986
|
WatchFilesystemAttachments
|
train
|
func (st *State) WatchFilesystemAttachments(scope names.Tag) (watcher.MachineStorageIdsWatcher, error) {
return st.watchAttachments("WatchFilesystemAttachments", scope, apiwatcher.NewFilesystemAttachmentsWatcher)
}
|
go
|
{
"resource": ""
}
|
q3987
|
VolumeBlockDevices
|
train
|
func (st *State) VolumeBlockDevices(ids []params.MachineStorageId) ([]params.BlockDeviceResult, error) {
args := params.MachineStorageIds{ids}
var results params.BlockDeviceResults
err := st.facade.FacadeCall("VolumeBlockDevices", args, &results)
if err != nil {
return nil, err
}
if len(results.Results) != len(ids) {
return nil, errors.Errorf("expected %d result(s), got %d", len(ids), len(results.Results))
}
return results.Results, nil
}
|
go
|
{
"resource": ""
}
|
q3988
|
VolumeParams
|
train
|
func (st *State) VolumeParams(tags []names.VolumeTag) ([]params.VolumeParamsResult, error) {
args := params.Entities{
Entities: make([]params.Entity, len(tags)),
}
for i, tag := range tags {
args.Entities[i].Tag = tag.String()
}
var results params.VolumeParamsResults
err := st.facade.FacadeCall("VolumeParams", args, &results)
if err != nil {
return nil, err
}
if len(results.Results) != len(tags) {
return nil, errors.Errorf("expected %d result(s), got %d", len(tags), len(results.Results))
}
return results.Results, nil
}
|
go
|
{
"resource": ""
}
|
q3989
|
VolumeAttachmentParams
|
train
|
func (st *State) VolumeAttachmentParams(ids []params.MachineStorageId) ([]params.VolumeAttachmentParamsResult, error) {
args := params.MachineStorageIds{ids}
var results params.VolumeAttachmentParamsResults
err := st.facade.FacadeCall("VolumeAttachmentParams", args, &results)
if err != nil {
return nil, err
}
if len(results.Results) != len(ids) {
return nil, errors.Errorf("expected %d result(s), got %d", len(ids), len(results.Results))
}
return results.Results, nil
}
|
go
|
{
"resource": ""
}
|
q3990
|
FilesystemAttachmentParams
|
train
|
func (st *State) FilesystemAttachmentParams(ids []params.MachineStorageId) ([]params.FilesystemAttachmentParamsResult, error) {
args := params.MachineStorageIds{ids}
var results params.FilesystemAttachmentParamsResults
err := st.facade.FacadeCall("FilesystemAttachmentParams", args, &results)
if err != nil {
return nil, err
}
if len(results.Results) != len(ids) {
return nil, errors.Errorf("expected %d result(s), got %d", len(ids), len(results.Results))
}
return results.Results, nil
}
|
go
|
{
"resource": ""
}
|
q3991
|
Life
|
train
|
func (st *State) Life(tags []names.Tag) ([]params.LifeResult, error) {
var results params.LifeResults
args := params.Entities{
Entities: make([]params.Entity, len(tags)),
}
for i, tag := range tags {
args.Entities[i].Tag = tag.String()
}
if err := st.facade.FacadeCall("Life", args, &results); err != nil {
return nil, err
}
if len(results.Results) != len(tags) {
return nil, errors.Errorf("expected %d result(s), got %d", len(tags), len(results.Results))
}
return results.Results, nil
}
|
go
|
{
"resource": ""
}
|
q3992
|
AttachmentLife
|
train
|
func (st *State) AttachmentLife(ids []params.MachineStorageId) ([]params.LifeResult, error) {
var results params.LifeResults
args := params.MachineStorageIds{ids}
if err := st.facade.FacadeCall("AttachmentLife", args, &results); err != nil {
return nil, err
}
if len(results.Results) != len(ids) {
return nil, errors.Errorf("expected %d result(s), got %d", len(ids), len(results.Results))
}
return results.Results, nil
}
|
go
|
{
"resource": ""
}
|
q3993
|
EnsureDead
|
train
|
func (st *State) EnsureDead(tags []names.Tag) ([]params.ErrorResult, error) {
var results params.ErrorResults
args := params.Entities{
Entities: make([]params.Entity, len(tags)),
}
for i, tag := range tags {
args.Entities[i].Tag = tag.String()
}
if err := st.facade.FacadeCall("EnsureDead", args, &results); err != nil {
return nil, err
}
if len(results.Results) != len(tags) {
return nil, errors.Errorf("expected %d result(s), got %d", len(tags), len(results.Results))
}
return results.Results, nil
}
|
go
|
{
"resource": ""
}
|
q3994
|
RemoveAttachments
|
train
|
func (st *State) RemoveAttachments(ids []params.MachineStorageId) ([]params.ErrorResult, error) {
var results params.ErrorResults
args := params.MachineStorageIds{ids}
if err := st.facade.FacadeCall("RemoveAttachment", args, &results); err != nil {
return nil, err
}
if len(results.Results) != len(ids) {
return nil, errors.Errorf("expected %d result(s), got %d", len(ids), len(results.Results))
}
return results.Results, nil
}
|
go
|
{
"resource": ""
}
|
q3995
|
InstanceIds
|
train
|
func (st *State) InstanceIds(tags []names.MachineTag) ([]params.StringResult, error) {
var results params.StringResults
args := params.Entities{
Entities: make([]params.Entity, len(tags)),
}
for i, tag := range tags {
args.Entities[i].Tag = tag.String()
}
err := st.facade.FacadeCall("InstanceId", args, &results)
if err != nil {
return nil, errors.Trace(err)
}
if len(results.Results) != 1 {
return nil, errors.Errorf("expected %d result(s), got %d", len(results.Results), len(tags))
}
return results.Results, nil
}
|
go
|
{
"resource": ""
}
|
q3996
|
SetStatus
|
train
|
func (st *State) SetStatus(args []params.EntityStatusArgs) error {
var result params.ErrorResults
err := st.facade.FacadeCall("SetStatus", params.SetStatus{args}, &result)
if err != nil {
return err
}
return result.Combine()
}
|
go
|
{
"resource": ""
}
|
q3997
|
newSubordinateRelationsWatcher
|
train
|
func newSubordinateRelationsWatcher(backend *state.State, subordinateApp *state.Application, principalName string) (
state.StringsWatcher, error,
) {
w := &subRelationsWatcher{
backend: backend,
app: subordinateApp,
principalName: principalName,
relations: make(map[string]bool),
out: make(chan []string),
}
err := catacomb.Invoke(catacomb.Plan{
Site: &w.catacomb,
Work: w.loop,
})
return w, errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q3998
|
OneError
|
train
|
func (m *MetricResults) OneError() error {
for _, r := range m.Results {
if err := r.Error; err != nil {
return err
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q3999
|
Validate
|
train
|
func (hi Info) Validate() error {
switch hi.Kind {
case hooks.RelationJoined, hooks.RelationChanged, hooks.RelationDeparted:
if hi.RemoteUnit == "" {
return fmt.Errorf("%q hook requires a remote unit", hi.Kind)
}
fallthrough
case hooks.Install, hooks.Start, hooks.ConfigChanged, hooks.UpgradeCharm, hooks.Stop, hooks.RelationBroken,
hooks.CollectMetrics, hooks.MeterStatusChanged, hooks.UpdateStatus, hooks.PreSeriesUpgrade, hooks.PostSeriesUpgrade:
return nil
case hooks.Action:
return fmt.Errorf("hooks.Kind Action is deprecated")
case hooks.StorageAttached, hooks.StorageDetaching:
if !names.IsValidStorage(hi.StorageId) {
return fmt.Errorf("invalid storage ID %q", hi.StorageId)
}
return nil
// TODO(fwereade): define these in charm/hooks...
case LeaderElected, LeaderDeposed, LeaderSettingsChanged:
return nil
}
return fmt.Errorf("unknown hook kind %q", hi.Kind)
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.