repo
stringlengths
5
67
sha
stringlengths
40
40
path
stringlengths
4
234
url
stringlengths
85
339
language
stringclasses
6 values
split
stringclasses
3 values
doc
stringlengths
3
51.2k
sign
stringlengths
5
8.01k
problem
stringlengths
13
51.2k
output
stringlengths
0
3.87M
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
nfs/pkg/volume/provision.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/nfs/pkg/volume/provision.go#L187-L194
go
train
// ShouldProvision returns whether provisioning should be attempted for the given // claim.
func (p *nfsProvisioner) ShouldProvision(claim *v1.PersistentVolumeClaim) bool
// ShouldProvision returns whether provisioning should be attempted for the given // claim. func (p *nfsProvisioner) ShouldProvision(claim *v1.PersistentVolumeClaim) bool
{ // As long as the export limit has not been reached we're ok to provision ok := p.checkExportLimit() if !ok { glog.Infof("export limit reached. skipping claim %s/%s", claim.Namespace, claim.Name) } return ok }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
nfs/pkg/volume/provision.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/nfs/pkg/volume/provision.go#L198-L243
go
train
// Provision creates a volume i.e. the storage asset and returns a PV object for // the volume.
func (p *nfsProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error)
// Provision creates a volume i.e. the storage asset and returns a PV object for // the volume. func (p *nfsProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error)
{ volume, err := p.createVolume(options) if err != nil { return nil, err } annotations := make(map[string]string) annotations[annCreatedBy] = createdBy annotations[annExportBlock] = volume.exportBlock annotations[annExportID] = strconv.FormatUint(uint64(volume.exportID), 10) annotations[annProjectBlock] = volume.projectBlock annotations[annProjectID] = strconv.FormatUint(uint64(volume.projectID), 10) if volume.supGroup != 0 { annotations[VolumeGidAnnotationKey] = strconv.FormatUint(volume.supGroup, 10) } // Only use legacy mount options annotation if StorageClass.MountOptions is empty if volume.mountOptions != "" && options.MountOptions == nil { annotations[MountOptionAnnotation] = volume.mountOptions } annotations[annProvisionerID] = string(p.identity) pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: options.PVName, Labels: map[string]string{}, Annotations: annotations, }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy, AccessModes: options.PVC.Spec.AccessModes, Capacity: v1.ResourceList{ v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], }, PersistentVolumeSource: v1.PersistentVolumeSource{ NFS: &v1.NFSVolumeSource{ Server: volume.server, Path: volume.path, ReadOnly: false, }, }, MountOptions: options.MountOptions, }, } return pv, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
nfs/pkg/volume/provision.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/nfs/pkg/volume/provision.go#L261-L305
go
train
// createVolume creates a volume i.e. the storage asset. It creates a unique // directory under /export and exports it. Returns the server IP, the path, a // zero/non-zero supplemental group, the block it added to either the ganesha // config or /etc/exports, and the exportID // TODO return values
func (p *nfsProvisioner) createVolume(options controller.VolumeOptions) (volume, error)
// createVolume creates a volume i.e. the storage asset. It creates a unique // directory under /export and exports it. Returns the server IP, the path, a // zero/non-zero supplemental group, the block it added to either the ganesha // config or /etc/exports, and the exportID // TODO return values func (p *nfsProvisioner) createVolume(options controller.VolumeOptions) (volume, error)
{ gid, rootSquash, mountOptions, err := p.validateOptions(options) if err != nil { return volume{}, fmt.Errorf("error validating options for volume: %v", err) } server, err := p.getServer() if err != nil { return volume{}, fmt.Errorf("error getting NFS server IP for volume: %v", err) } if ok := p.checkExportLimit(); !ok { return volume{}, &controller.IgnoredError{Reason: fmt.Sprintf("export limit of %v has been reached", p.maxExports)} } path := path.Join(p.exportDir, options.PVName) err = p.createDirectory(options.PVName, gid) if err != nil { return volume{}, fmt.Errorf("error creating directory for volume: %v", err) } exportBlock, exportID, err := p.createExport(options.PVName, rootSquash) if err != nil { os.RemoveAll(path) return volume{}, fmt.Errorf("error creating export for volume: %v", err) } projectBlock, projectID, err := p.createQuota(options.PVName, options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]) if err != nil { os.RemoveAll(path) return volume{}, fmt.Errorf("error creating quota for volume: %v", err) } return volume{ server: server, path: path, exportBlock: exportBlock, exportID: exportID, projectBlock: projectBlock, projectID: projectID, supGroup: 0, mountOptions: mountOptions, }, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
nfs/pkg/volume/provision.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/nfs/pkg/volume/provision.go#L356-L441
go
train
// getServer gets the server IP to put in a provisioned PV's spec.
func (p *nfsProvisioner) getServer() (string, error)
// getServer gets the server IP to put in a provisioned PV's spec. func (p *nfsProvisioner) getServer() (string, error)
{ if p.outOfCluster { if p.serverHostname != "" { return p.serverHostname, nil } // TODO make this better out, err := exec.Command("hostname", "-i").Output() if err != nil { return "", fmt.Errorf("hostname -i failed with error: %v, output: %s", err, out) } addresses := strings.Fields(string(out)) if len(addresses) > 0 { return addresses[0], nil } return "", fmt.Errorf("hostname -i had bad output %s, no address to use", string(out)) } nodeName := os.Getenv(p.nodeEnv) if nodeName != "" { glog.Infof("using node name %s=%s as NFS server IP", p.nodeEnv, nodeName) return nodeName, nil } podIP := os.Getenv(p.podIPEnv) if podIP == "" { return "", fmt.Errorf("pod IP env %s must be set even if intent is to use service cluster IP as NFS server IP", p.podIPEnv) } serviceName := os.Getenv(p.serviceEnv) if serviceName == "" { glog.Infof("using potentially unstable pod IP %s=%s as NFS server IP (because neither service env %s nor node env %s are set)", p.podIPEnv, podIP, p.serviceEnv, p.nodeEnv) return podIP, nil } // Service env was set, now find and validate it namespace := os.Getenv(p.namespaceEnv) if namespace == "" { return "", fmt.Errorf("service env %s is set but namespace env %s isn't; no way to get the service cluster IP", p.serviceEnv, p.namespaceEnv) } service, err := p.client.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("error getting service %s=%s in namespace %s=%s", p.serviceEnv, serviceName, p.namespaceEnv, namespace) } // Do some validation of the service before provisioning useless volumes valid := false type endpointPort struct { port int32 protocol v1.Protocol } expectedPorts := map[endpointPort]bool{ {2049, v1.ProtocolTCP}: true, {20048, v1.ProtocolTCP}: true, {111, v1.ProtocolUDP}: true, {111, v1.ProtocolTCP}: true, } endpoints, err := p.client.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) for _, subset := range endpoints.Subsets { // One service can't have multiple nfs-provisioner endpoints. If it had, kubernetes would round-robin // the request which would probably go to the wrong instance. if len(subset.Addresses) != 1 { continue } if subset.Addresses[0].IP != podIP { continue } actualPorts := make(map[endpointPort]bool) for _, port := range subset.Ports { actualPorts[endpointPort{port.Port, port.Protocol}] = true } if !reflect.DeepEqual(expectedPorts, actualPorts) { continue } valid = true break } if !valid { return "", fmt.Errorf("service %s=%s is not valid; check that it has for ports %v exactly one endpoint, this pod's IP %s=%s", p.serviceEnv, serviceName, expectedPorts, p.podIPEnv, podIP) } if service.Spec.ClusterIP == v1.ClusterIPNone { return "", fmt.Errorf("service %s=%s is valid but it doesn't have a cluster IP", p.serviceEnv, serviceName) } glog.Infof("using service %s=%s cluster IP %s as NFS server IP", p.serviceEnv, serviceName, service.Spec.ClusterIP) return service.Spec.ClusterIP, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
nfs/pkg/volume/provision.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/nfs/pkg/volume/provision.go#L449-L485
go
train
// createDirectory creates the given directory in exportDir with appropriate // permissions and ownership according to the given gid parameter string.
func (p *nfsProvisioner) createDirectory(directory, gid string) error
// createDirectory creates the given directory in exportDir with appropriate // permissions and ownership according to the given gid parameter string. func (p *nfsProvisioner) createDirectory(directory, gid string) error
{ // TODO quotas path := path.Join(p.exportDir, directory) if _, err := os.Stat(path); !os.IsNotExist(err) { return fmt.Errorf("the path already exists") } perm := os.FileMode(0777 | os.ModeSetgid) if gid != "none" { // Execute permission is required for stat, which kubelet uses during unmount. perm = os.FileMode(0071 | os.ModeSetgid) } if err := os.MkdirAll(path, perm); err != nil { return err } // Due to umask, need to chmod if err := os.Chmod(path, perm); err != nil { os.RemoveAll(path) return err } if gid != "none" { groupID, err := strconv.ParseUint(gid, 10, 64) if err != nil { os.RemoveAll(path) return fmt.Errorf("strconv.ParseUint failed with error: %v", err) } cmd := exec.Command("chgrp", strconv.FormatUint(groupID, 10), path) out, err := cmd.CombinedOutput() if err != nil { os.RemoveAll(path) return fmt.Errorf("chgrp failed with error: %v, output: %s", err, out) } } return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
nfs/pkg/volume/provision.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/nfs/pkg/volume/provision.go#L489-L504
go
train
// createExport creates the export by adding a block to the appropriate config // file and exporting it
func (p *nfsProvisioner) createExport(directory string, rootSquash bool) (string, uint16, error)
// createExport creates the export by adding a block to the appropriate config // file and exporting it func (p *nfsProvisioner) createExport(directory string, rootSquash bool) (string, uint16, error)
{ path := path.Join(p.exportDir, directory) block, exportID, err := p.exporter.AddExportBlock(path, rootSquash, p.exportSubnet) if err != nil { return "", 0, fmt.Errorf("error adding export block for path %s: %v", path, err) } err = p.exporter.Export(path) if err != nil { p.exporter.RemoveExportBlock(block, exportID) return "", 0, fmt.Errorf("error exporting export block %s: %v", block, err) } return block, exportID, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
nfs/pkg/volume/provision.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/nfs/pkg/volume/provision.go#L508-L525
go
train
// createQuota creates a quota for the directory by adding a project to // represent the directory and setting a quota on it
func (p *nfsProvisioner) createQuota(directory string, capacity resource.Quantity) (string, uint16, error)
// createQuota creates a quota for the directory by adding a project to // represent the directory and setting a quota on it func (p *nfsProvisioner) createQuota(directory string, capacity resource.Quantity) (string, uint16, error)
{ path := path.Join(p.exportDir, directory) limit := strconv.FormatInt(capacity.Value(), 10) block, projectID, err := p.quotaer.AddProject(path, limit) if err != nil { return "", 0, fmt.Errorf("error adding project for path %s: %v", path, err) } err = p.quotaer.SetQuota(projectID, path, limit) if err != nil { p.quotaer.RemoveProject(block, projectID) return "", 0, fmt.Errorf("error setting quota for path %s: %v", path, err) } return block, projectID, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/deleter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/deleter.go#L61-L66
go
train
// NewDeleter creates a Deleter object to handle the cleanup and deletion of local PVs // allocated by this provisioner
func NewDeleter(config *common.RuntimeConfig, cleanupTracker *CleanupStatusTracker) *Deleter
// NewDeleter creates a Deleter object to handle the cleanup and deletion of local PVs // allocated by this provisioner func NewDeleter(config *common.RuntimeConfig, cleanupTracker *CleanupStatusTracker) *Deleter
{ return &Deleter{ RuntimeConfig: config, CleanupStatus: cleanupTracker, } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/deleter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/deleter.go#L70-L106
go
train
// DeletePVs will scan through all the existing PVs that are released, and cleanup and // delete them
func (d *Deleter) DeletePVs()
// DeletePVs will scan through all the existing PVs that are released, and cleanup and // delete them func (d *Deleter) DeletePVs()
{ for _, pv := range d.Cache.ListPVs() { if pv.Status.Phase != v1.VolumeReleased { continue } name := pv.Name switch pv.Spec.PersistentVolumeReclaimPolicy { case v1.PersistentVolumeReclaimRetain: glog.V(4).Infof("reclaimVolume[%s]: policy is Retain, nothing to do", name) case v1.PersistentVolumeReclaimRecycle: glog.V(4).Infof("reclaimVolume[%s]: policy is Recycle which is not supported", name) d.RuntimeConfig.Recorder.Eventf(pv, v1.EventTypeWarning, "VolumeUnsupportedReclaimPolicy", "Volume has unsupported PersistentVolumeReclaimPolicy: Recycle") case v1.PersistentVolumeReclaimDelete: glog.V(4).Infof("reclaimVolume[%s]: policy is Delete", name) // Cleanup volume err := d.deletePV(pv) if err != nil { mode, modeErr := d.getVolMode(pv) if modeErr != nil { mode = "unknown" } deleteType := metrics.DeleteTypeProcess if d.shouldRunJob(mode) { deleteType = metrics.DeleteTypeJob } metrics.PersistentVolumeDeleteFailedTotal.WithLabelValues(string(mode), deleteType).Inc() cleaningLocalPVErr := fmt.Errorf("Error cleaning PV %q: %v", name, err.Error()) d.RuntimeConfig.Recorder.Eventf(pv, v1.EventTypeWarning, common.EventVolumeFailedDelete, cleaningLocalPVErr.Error()) glog.Error(err) continue } default: // Unknown PersistentVolumeReclaimPolicy d.RuntimeConfig.Recorder.Eventf(pv, v1.EventTypeWarning, "VolumeUnknownReclaimPolicy", "Volume has unrecognized PersistentVolumeReclaimPolicy") } } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/deleter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/deleter.go#L357-L366
go
train
// runJob runs a cleaning job. // The advantages of using a Job to do block cleaning (which is a process that can take several hours) is as follows // 1) By naming the job based on the specific name of the volume, one ensures that only one instance of a cleaning // job will be active for any given volume. Any attempt to create another will fail due to name collision. This // avoids any concurrent cleaning problems. // 2) The above approach also ensures that we don't accidentally create a new PV when a cleaning job is in progress. // Even if a user accidentally deletes the PV, the presence of the cleaning job would prevent the provisioner from // attempting to re-create it. This would be the case even if the Daemonset had two provisioners running on the same // host (which can sometimes happen as the Daemonset controller follows "at least one" semantics). // 3) Admins get transparency on what is going on with a released volume by just running kubectl commands // to check for any corresponding cleaning job for a given volume and looking into its progress or failure. // // To achieve these advantages, the provisioner names the cleaning job with a constant name based on the PV name. // If a job completes successfully, then the job is first deleted and then the cleaned PV (to enable its rediscovery). // A failed Job is left "as is" (after a few retries to execute) for admins to intervene/debug and resolve. This is the // safest thing to do in this scenario as it is even in a non-Job based approach. Please note that for successful jobs, // deleting it does delete the logs of the job run. This is probably an acceptable initial implementation as the logs // of successful run are not as interesting. Long term, we might want to fetch the logs of the successful Jobs too, // before deleting them, but for the initial implementation we will keep things simple and perhaps decide the // enhancement based on user feedback.
func (d *Deleter) runJob(pv *v1.PersistentVolume, volMode v1.PersistentVolumeMode, mountPath string, config common.MountConfig) error
// runJob runs a cleaning job. // The advantages of using a Job to do block cleaning (which is a process that can take several hours) is as follows // 1) By naming the job based on the specific name of the volume, one ensures that only one instance of a cleaning // job will be active for any given volume. Any attempt to create another will fail due to name collision. This // avoids any concurrent cleaning problems. // 2) The above approach also ensures that we don't accidentally create a new PV when a cleaning job is in progress. // Even if a user accidentally deletes the PV, the presence of the cleaning job would prevent the provisioner from // attempting to re-create it. This would be the case even if the Daemonset had two provisioners running on the same // host (which can sometimes happen as the Daemonset controller follows "at least one" semantics). // 3) Admins get transparency on what is going on with a released volume by just running kubectl commands // to check for any corresponding cleaning job for a given volume and looking into its progress or failure. // // To achieve these advantages, the provisioner names the cleaning job with a constant name based on the PV name. // If a job completes successfully, then the job is first deleted and then the cleaned PV (to enable its rediscovery). // A failed Job is left "as is" (after a few retries to execute) for admins to intervene/debug and resolve. This is the // safest thing to do in this scenario as it is even in a non-Job based approach. Please note that for successful jobs, // deleting it does delete the logs of the job run. This is probably an acceptable initial implementation as the logs // of successful run are not as interesting. Long term, we might want to fetch the logs of the successful Jobs too, // before deleting them, but for the initial implementation we will keep things simple and perhaps decide the // enhancement based on user feedback. func (d *Deleter) runJob(pv *v1.PersistentVolume, volMode v1.PersistentVolumeMode, mountPath string, config common.MountConfig) error
{ if d.JobContainerImage == "" { return fmt.Errorf("cannot run cleanup job without specifying job image name in the environment variable") } job, err := NewCleanupJob(pv, volMode, d.JobContainerImage, d.Node.Name, d.Namespace, mountPath, config) if err != nil { return err } return d.RuntimeConfig.APIUtil.CreateJob(job) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/deleter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/deleter.go#L375-L380
go
train
// InProgress returns true if the cleaning for the specified PV is in progress.
func (c *CleanupStatusTracker) InProgress(pvName string, isJob bool) bool
// InProgress returns true if the cleaning for the specified PV is in progress. func (c *CleanupStatusTracker) InProgress(pvName string, isJob bool) bool
{ if isJob { return c.JobController.IsCleaningJobRunning(pvName) } return c.ProcTable.IsRunning(pvName) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/deleter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/deleter.go#L384-L389
go
train
// RemoveStatus removes and returns the status and start time of a completed cleaning process. // The method returns an error if the process has not yet completed.
func (c *CleanupStatusTracker) RemoveStatus(pvName string, isJob bool) (CleanupState, *time.Time, error)
// RemoveStatus removes and returns the status and start time of a completed cleaning process. // The method returns an error if the process has not yet completed. func (c *CleanupStatusTracker) RemoveStatus(pvName string, isJob bool) (CleanupState, *time.Time, error)
{ if isJob { return c.JobController.RemoveJob(pvName) } return c.ProcTable.RemoveEntry(pvName) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
gluster/file/cmd/glusterfile-provisioner/glusterfile-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/gluster/file/cmd/glusterfile-provisioner/glusterfile-provisioner.go#L92-L98
go
train
//NewglusterfileProvisioner create a new provisioner.
func NewglusterfileProvisioner(client kubernetes.Interface, id string) controller.Provisioner
//NewglusterfileProvisioner create a new provisioner. func NewglusterfileProvisioner(client kubernetes.Interface, id string) controller.Provisioner
{ return &glusterfileProvisioner{ client: client, identity: id, allocator: gidallocator.New(client), } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
gluster/file/cmd/glusterfile-provisioner/glusterfile-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/gluster/file/cmd/glusterfile-provisioner/glusterfile-provisioner.go#L137-L289
go
train
// Provision creates a storage asset and returns a PV object representing it.
func (p *glusterfileProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error)
// Provision creates a storage asset and returns a PV object representing it. func (p *glusterfileProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error)
{ sourceVolID := "" volID := "" var glusterfs *v1.GlusterfsVolumeSource smartclone := true if options.PVC.Spec.Selector != nil { return nil, fmt.Errorf("claim Selector is not supported") } if !util.AccessModesContainedInAll(p.GetAccessModes(), options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", options.PVC.Spec.AccessModes, p.GetAccessModes()) } klog.V(1).Infof("VolumeOptions %v", options) p.options = options gidAllocate := true for k, v := range options.Parameters { switch dstrings.ToLower(k) { case "smartclone": smartclone = dstrings.ToLower(v) == "true" case "gidmin": // Let allocator handle case "gidmax": // Let allocator handle case "gidallocate": b, err := strconv.ParseBool(v) if err != nil { return nil, fmt.Errorf("invalid value %s for parameter %s: %v", v, k, err) } gidAllocate = b } } var gid *int if gidAllocate { allocate, err := p.allocator.AllocateNext(options) if err != nil { return nil, fmt.Errorf("allocator error: %v", err) } gid = &allocate } cfg, parseErr := p.parseClassParameters(options.Parameters, p.client) if parseErr != nil { return nil, fmt.Errorf("failed to parse storage class parameters: %v", parseErr) } klog.V(4).Infof("creating volume with configuration %+v", *cfg) modeAnn := "url:" + cfg.url + "," + "user:" + cfg.user + "," + "secret:" + cfg.secretName + "," + "secretnamespace:" + cfg.secretNamespace klog.V(1).Infof("Allocated GID %d for PVC %s", *gid, options.PVC.Name) gidStr := strconv.FormatInt(int64(*gid), 10) volSize := options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] volSizeBytes := volSize.Value() volszInt := int(util.RoundUpToGiB(volSizeBytes)) if smartclone && (options.PVC.Annotations[CloneRequestAnn] != "") { if sourcePVCRef, ok := options.PVC.Annotations[CloneRequestAnn]; ok { var ns string parts := dstrings.SplitN(sourcePVCRef, "/", 2) if len(parts) < 2 { ns = options.PVC.Namespace } else { ns = parts[0] } sourcePVCName := parts[len(parts)-1] sourcePVC, err := p.getPVC(ns, sourcePVCName) if err != nil { return nil, fmt.Errorf("Unable to get PVC %s/%s", ns, sourcePVCName) } if sourceVolID, ok = sourcePVC.Annotations[heketiVolIDAnn]; ok { klog.Infof("Requesting clone of heketi volumeID %s", sourceVolID) cGlusterfs, sizeGiB, cVolID, createCloneErr := p.createVolumeClone(sourceVolID, cfg) if createCloneErr != nil { klog.Errorf("failed to create clone of %v: %v", sourceVolID, createCloneErr) return nil, fmt.Errorf("failed to create clone of %v: %v", sourceVolID, createCloneErr) } if cGlusterfs != nil { glusterfs = cGlusterfs } volID = cVolID klog.Infof("glusterfs volume source %v with size %d retrieved", cGlusterfs, sizeGiB) err = p.annotateClonedPVC(cVolID, options.PVC, sourceVolID) if err != nil { klog.Errorf("Failed to annotate cloned PVC: %v", err) return nil, fmt.Errorf("failed to annotate cloned PVC %s :%v", options.PVC, err) //todo: cleanup? } } else { return nil, fmt.Errorf("PVC %s/%s missing %s annotation", ns, sourcePVCName, heketiVolIDAnn) } } } else { nGlusterfs, sizeGiB, nVolID, createErr := p.CreateVolume(gid, cfg, volszInt) if createErr != nil { klog.Errorf("failed to create volume: %v", createErr) return nil, fmt.Errorf("failed to create volume: %v", createErr) } klog.Infof("glusterfs volume source %v with size %d retrieved", nGlusterfs, sizeGiB) if nGlusterfs != nil { glusterfs = nGlusterfs } volID = nVolID annotations := make(map[string]string, 2) annotations[heketiVolIDAnn] = nVolID annotateErr := p.annotatePVC(options.PVC.Namespace, options.PVC.Name, annotations) if annotateErr != nil { klog.Errorf("annotating PVC %v failed: %v", options.PVC.Name, annotateErr) } klog.V(1).Infof("successfully created Gluster File volume %+v with size %d and volID %s", glusterfs, sizeGiB, volID) } if glusterfs == nil { klog.Errorf("retrieved glusterfs volume source is nil") return nil, fmt.Errorf("retrieved glusterfs volume source is nil") } mode := v1.PersistentVolumeFilesystem pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: options.PVName, Annotations: map[string]string{ gidAnn: gidStr, glusterTypeAnn: "file", "Description": descAnn, heketiVolIDAnn: volID, restStr: modeAnn, v1.MountOptionAnnotation: mountStr, }, }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy, AccessModes: options.PVC.Spec.AccessModes, VolumeMode: &mode, Capacity: v1.ResourceList{ v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], }, PersistentVolumeSource: v1.PersistentVolumeSource{ Glusterfs: glusterfs, }, }, } return pv, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
gluster/file/cmd/glusterfile-provisioner/glusterfile-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/gluster/file/cmd/glusterfile-provisioner/glusterfile-provisioner.go#L703-L722
go
train
// parseSecret finds a given Secret instance and reads user password from it.
func parseSecret(namespace, secretName string, kubeClient kubernetes.Interface) (string, error)
// parseSecret finds a given Secret instance and reads user password from it. func parseSecret(namespace, secretName string, kubeClient kubernetes.Interface) (string, error)
{ secretMap, err := GetSecretForPV(namespace, secretName, provisionerName, kubeClient) if err != nil { klog.Errorf("failed to get secret %s/%s: %v", namespace, secretName, err) return "", fmt.Errorf("failed to get secret %s/%s: %v", namespace, secretName, err) } if len(secretMap) == 0 { return "", fmt.Errorf("empty secret map") } secret := "" for k, v := range secretMap { if k == secretKeyName { return v, nil } secret = v } // If not found, the last secret in the map wins as done before return secret, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
gluster/file/cmd/glusterfile-provisioner/glusterfile-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/gluster/file/cmd/glusterfile-provisioner/glusterfile-provisioner.go#L725-L741
go
train
// GetSecretForPV locates secret by name and namespace, verifies the secret type, and returns secret map
func GetSecretForPV(restSecretNamespace, restSecretName, volumePluginName string, kubeClient kubernetes.Interface) (map[string]string, error)
// GetSecretForPV locates secret by name and namespace, verifies the secret type, and returns secret map func GetSecretForPV(restSecretNamespace, restSecretName, volumePluginName string, kubeClient kubernetes.Interface) (map[string]string, error)
{ secret := make(map[string]string) if kubeClient == nil { return secret, fmt.Errorf("Cannot get kube client") } secrets, err := kubeClient.Core().Secrets(restSecretNamespace).Get(restSecretName, metav1.GetOptions{}) if err != nil { return secret, err } if secrets.Type != v1.SecretType(volumePluginName) { return secret, fmt.Errorf("cannot get secret of type %s", volumePluginName) } for name, data := range secrets.Data { secret[name] = string(data) } return secret, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
gluster/file/cmd/glusterfile-provisioner/glusterfile-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/gluster/file/cmd/glusterfile-provisioner/glusterfile-provisioner.go#L757-L901
go
train
// parseClassParameters parses StorageClass.Parameters
func (p *glusterfileProvisioner) parseClassParameters(params map[string]string, kubeclient kubernetes.Interface) (*provisionerConfig, error)
// parseClassParameters parses StorageClass.Parameters func (p *glusterfileProvisioner) parseClassParameters(params map[string]string, kubeclient kubernetes.Interface) (*provisionerConfig, error)
{ var cfg provisionerConfig var err error authEnabled := true parseVolumeType := "" parseVolumeOptions := "" parseVolumeNamePrefix := "" parseThinPoolSnapFactor := "" cfg.thinPoolSnapFactor = defaultThinPoolSnapFactor for k, v := range params { switch dstrings.ToLower(k) { case "resturl": cfg.url = v case "restuser": cfg.user = v case "restuserkey": cfg.userKey = v case "restsecretname": cfg.secretName = v case "restsecretnamespace": cfg.secretNamespace = v case "clusterid": if len(v) != 0 { cfg.clusterID = v } case "restauthenabled": authEnabled = dstrings.ToLower(v) == "true" case "volumetype": parseVolumeType = v case "volumeoptions": if len(v) != 0 { parseVolumeOptions = v } case "volumenameprefix": if len(v) != 0 { parseVolumeNamePrefix = v } case "snapfactor": if len(v) != 0 { parseThinPoolSnapFactor = v } case "gidmin": case "gidmax": case "smartclone": default: return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, provisionerName) } } if len(cfg.url) == 0 { return nil, fmt.Errorf("StorageClass for provisioner %s must contain 'resturl' parameter", provisionerName) } if len(parseVolumeType) == 0 { cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: replicaCount}} } else { parseVolumeTypeInfo := dstrings.Split(parseVolumeType, ":") switch parseVolumeTypeInfo[0] { case "replicate": if len(parseVolumeTypeInfo) >= 2 { newReplicaCount, convertErr := convertVolumeParam(parseVolumeTypeInfo[1]) if convertErr != nil { return nil, fmt.Errorf("error %v when parsing value %q of option %s for volume plugin %s", convertErr, parseVolumeTypeInfo[1], "volumetype", provisionerName) } cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: newReplicaCount}} } else { cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: replicaCount}} } case "disperse": if len(parseVolumeTypeInfo) >= 3 { newDisperseData, convertErr := convertVolumeParam(parseVolumeTypeInfo[1]) if err != nil { return nil, fmt.Errorf("error %v when parsing value %q of option %s for volume plugin %s", parseVolumeTypeInfo[1], convertErr, "volumetype", provisionerName) } newDisperseRedundancy, convertErr := convertVolumeParam(parseVolumeTypeInfo[2]) if err != nil { return nil, fmt.Errorf("error %v when parsing value %q of option %s for volume plugin %s", convertErr, parseVolumeTypeInfo[2], "volumetype", provisionerName) } cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityEC, Disperse: gapi.DisperseDurability{Data: newDisperseData, Redundancy: newDisperseRedundancy}} } else { return nil, fmt.Errorf("StorageClass for provisioner %q must have data:redundancy count set for disperse volumes in storage class option '%s'", provisionerName, "volumetype") } case "none": cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityDistributeOnly} default: return nil, fmt.Errorf("error parsing value for option 'volumetype' for volume plugin %s", provisionerName) } } if !authEnabled { cfg.user = "" cfg.secretName = "" cfg.secretNamespace = "" cfg.userKey = "" cfg.secretValue = "" } if len(cfg.secretName) != 0 || len(cfg.secretNamespace) != 0 { // secretName + Namespace has precedence over userKey if len(cfg.secretName) != 0 && len(cfg.secretNamespace) != 0 { cfg.secretValue, err = parseSecret(cfg.secretNamespace, cfg.secretName, p.client) if err != nil { return nil, err } } else { return nil, fmt.Errorf("StorageClass for provisioner %q must have secretNamespace and secretName either both set or both empty", provisionerName) } } else { cfg.secretValue = cfg.userKey } if len(parseVolumeOptions) != 0 { volOptions := dstrings.Split(parseVolumeOptions, ",") if len(volOptions) == 0 { return nil, fmt.Errorf("StorageClass for provisioner %q must have valid (for e.g.,'client.ssl on') volume option", provisionerName) } cfg.volumeOptions = volOptions } if len(parseVolumeNamePrefix) != 0 { if dstrings.Contains(parseVolumeNamePrefix, "_") { return nil, fmt.Errorf("Storageclass parameter 'volumenameprefix' should not contain '_' in its value") } cfg.volumeNamePrefix = parseVolumeNamePrefix } if len(parseThinPoolSnapFactor) != 0 { thinPoolSnapFactor, err := strconv.ParseFloat(parseThinPoolSnapFactor, 32) if err != nil { return nil, fmt.Errorf("failed to convert snapfactor value %v to float32: %v", parseThinPoolSnapFactor, err) } if thinPoolSnapFactor < 1.0 || thinPoolSnapFactor > 100.0 { return nil, fmt.Errorf("invalid snapshot factor %v, the value of snapfactor must be between 1 to 100", thinPoolSnapFactor) } cfg.thinPoolSnapFactor = float32(thinPoolSnapFactor) } return &cfg, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/proctable.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/proctable.go#L66-L75
go
train
// IsRunning Check if cleanup process is still running
func (v *ProcTableImpl) IsRunning(pvName string) bool
// IsRunning Check if cleanup process is still running func (v *ProcTableImpl) IsRunning(pvName string) bool
{ v.mutex.RLock() defer v.mutex.RUnlock() if entry, ok := v.procTable[pvName]; !ok || entry.Status != CSRunning { return false } return true }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/proctable.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/proctable.go#L78-L82
go
train
// IsEmpty Check if any cleanup process is running
func (v *ProcTableImpl) IsEmpty() bool
// IsEmpty Check if any cleanup process is running func (v *ProcTableImpl) IsEmpty() bool
{ v.mutex.RLock() defer v.mutex.RUnlock() return len(v.procTable) == 0 }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/proctable.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/proctable.go#L85-L94
go
train
// MarkRunning Indicate that process is running.
func (v *ProcTableImpl) MarkRunning(pvName string) error
// MarkRunning Indicate that process is running. func (v *ProcTableImpl) MarkRunning(pvName string) error
{ v.mutex.Lock() defer v.mutex.Unlock() _, ok := v.procTable[pvName] if ok { return fmt.Errorf("Failed to mark running of %q as it is already running, should never happen", pvName) } v.procTable[pvName] = ProcEntry{StartTime: time.Now(), Status: CSRunning} return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/proctable.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/proctable.go#L97-L99
go
train
// MarkFailed Indicate the process has failed in its run.
func (v *ProcTableImpl) MarkFailed(pvName string) error
// MarkFailed Indicate the process has failed in its run. func (v *ProcTableImpl) MarkFailed(pvName string) error
{ return v.markStatus(pvName, CSFailed) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/proctable.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/proctable.go#L102-L104
go
train
// MarkSucceeded Indicate the process has succeeded in its run.
func (v *ProcTableImpl) MarkSucceeded(pvName string) error
// MarkSucceeded Indicate the process has succeeded in its run. func (v *ProcTableImpl) MarkSucceeded(pvName string) error
{ return v.markStatus(pvName, CSSucceeded) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/proctable.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/proctable.go#L128-L143
go
train
// RemoveEntry Removes proctable entry and returns final state and start time of cleanup. // Must only be called and cleanup that has ended, else error is returned.
func (v *ProcTableImpl) RemoveEntry(pvName string) (CleanupState, *time.Time, error)
// RemoveEntry Removes proctable entry and returns final state and start time of cleanup. // Must only be called and cleanup that has ended, else error is returned. func (v *ProcTableImpl) RemoveEntry(pvName string) (CleanupState, *time.Time, error)
{ v.mutex.Lock() defer v.mutex.Unlock() entry, ok := v.procTable[pvName] if !ok { return CSNotFound, nil, nil } if entry.Status == CSRunning { return CSUnknown, nil, fmt.Errorf("cannot remove proctable entry for %q when it is still running", pvName) } if entry.Status == CSUnknown { return CSUnknown, nil, fmt.Errorf("proctable entry for %q in unexpected unknown state", pvName) } delete(v.procTable, pvName) return entry.Status, &entry.StartTime, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/proctable.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/proctable.go#L146-L160
go
train
// Stats returns stats of ProcTable.
func (v *ProcTableImpl) Stats() ProcTableStats
// Stats returns stats of ProcTable. func (v *ProcTableImpl) Stats() ProcTableStats
{ v.mutex.RLock() defer v.mutex.RUnlock() running := 0 for _, entry := range v.procTable { if entry.Status == CSRunning { running++ } } return ProcTableStats{ Running: running, Succeeded: v.succeeded, Failed: v.failed, } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/proctable.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/proctable.go#L185-L188
go
train
// IsRunning Check if cleanup process is still running
func (f *FakeProcTableImpl) IsRunning(pvName string) bool
// IsRunning Check if cleanup process is still running func (f *FakeProcTableImpl) IsRunning(pvName string) bool
{ f.IsRunningCount++ return f.realTable.IsRunning(pvName) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/proctable.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/proctable.go#L196-L199
go
train
// MarkRunning Indicate that process is running.
func (f *FakeProcTableImpl) MarkRunning(pvName string) error
// MarkRunning Indicate that process is running. func (f *FakeProcTableImpl) MarkRunning(pvName string) error
{ f.MarkRunningCount++ return f.realTable.MarkRunning(pvName) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/proctable.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/proctable.go#L202-L205
go
train
// MarkFailed Indicate the process has failed.
func (f *FakeProcTableImpl) MarkFailed(pvName string) error
// MarkFailed Indicate the process has failed. func (f *FakeProcTableImpl) MarkFailed(pvName string) error
{ f.MarkDoneCount++ return f.realTable.MarkFailed(pvName) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/proctable.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/proctable.go#L208-L211
go
train
// MarkSucceeded Indicate the process has succeeded.
func (f *FakeProcTableImpl) MarkSucceeded(pvName string) error
// MarkSucceeded Indicate the process has succeeded. func (f *FakeProcTableImpl) MarkSucceeded(pvName string) error
{ f.MarkDoneCount++ return f.realTable.MarkSucceeded(pvName) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/proctable.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/proctable.go#L214-L217
go
train
// RemoveEntry removes the entry from the proc table.
func (f *FakeProcTableImpl) RemoveEntry(pvName string) (CleanupState, *time.Time, error)
// RemoveEntry removes the entry from the proc table. func (f *FakeProcTableImpl) RemoveEntry(pvName string) (CleanupState, *time.Time, error)
{ f.RemoveCount++ return f.realTable.RemoveEntry(pvName) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/apis/crd/v1/register.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/apis/crd/v1/register.go#L50-L59
go
train
// addKnownTypes adds the set of types defined in this package to the supplied scheme.
func addKnownTypes(scheme *runtime.Scheme) error
// addKnownTypes adds the set of types defined in this package to the supplied scheme. func addKnownTypes(scheme *runtime.Scheme) error
{ scheme.AddKnownTypes(SchemeGroupVersion, &VolumeSnapshot{}, &VolumeSnapshotList{}, &VolumeSnapshotData{}, &VolumeSnapshotDataList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/client/client.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/client/client.go#L41-L59
go
train
// NewClient creates a new RESTClient
func NewClient(cfg *rest.Config) (*rest.RESTClient, *runtime.Scheme, error)
// NewClient creates a new RESTClient func NewClient(cfg *rest.Config) (*rest.RESTClient, *runtime.Scheme, error)
{ scheme := runtime.NewScheme() if err := crdv1.AddToScheme(scheme); err != nil { return nil, nil, err } config := *cfg config.GroupVersion = &crdv1.SchemeGroupVersion config.APIPath = "/apis" config.ContentType = runtime.ContentTypeJSON config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: serializer.NewCodecFactory(scheme)} client, err := rest.RESTClientFor(&config) if err != nil { return nil, nil, err } return client, scheme, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/client/client.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/client/client.go#L62-L104
go
train
// CreateCRD creates CustomResourceDefinition
func CreateCRD(clientset apiextensionsclient.Interface) error
// CreateCRD creates CustomResourceDefinition func CreateCRD(clientset apiextensionsclient.Interface) error
{ crd := &apiextensionsv1beta1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ Name: crdv1.VolumeSnapshotDataResourcePlural + "." + crdv1.GroupName, }, Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ Group: crdv1.GroupName, Version: crdv1.SchemeGroupVersion.Version, Scope: apiextensionsv1beta1.ClusterScoped, Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ Plural: crdv1.VolumeSnapshotDataResourcePlural, Kind: reflect.TypeOf(crdv1.VolumeSnapshotData{}).Name(), }, }, } res, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) if err != nil && !apierrors.IsAlreadyExists(err) { glog.Fatalf("failed to create VolumeSnapshotDataResource: %#v, err: %#v", res, err) } crd = &apiextensionsv1beta1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ Name: crdv1.VolumeSnapshotResourcePlural + "." + crdv1.GroupName, }, Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ Group: crdv1.GroupName, Version: crdv1.SchemeGroupVersion.Version, Scope: apiextensionsv1beta1.NamespaceScoped, Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ Plural: crdv1.VolumeSnapshotResourcePlural, Kind: reflect.TypeOf(crdv1.VolumeSnapshot{}).Name(), }, }, } res, err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) if err != nil && !apierrors.IsAlreadyExists(err) { glog.Fatalf("failed to create VolumeSnapshotResource: %#v, err: %#v", res, err) } return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/client/client.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/client/client.go#L107-L119
go
train
// WaitForSnapshotResource waits for the snapshot resource
func WaitForSnapshotResource(snapshotClient *rest.RESTClient) error
// WaitForSnapshotResource waits for the snapshot resource func WaitForSnapshotResource(snapshotClient *rest.RESTClient) error
{ return wait.Poll(100*time.Millisecond, 60*time.Second, func() (bool, error) { _, err := snapshotClient.Get(). Resource(crdv1.VolumeSnapshotDataResourcePlural).DoRaw() if err == nil { return true, nil } if apierrors.IsNotFound(err) { return false, nil } return false, err }) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go#L38-L49
go
train
// Instances returns an implementation of Instances for OpenStack.
func (os *OpenStack) Instances() (cloudprovider.Instances, bool)
// Instances returns an implementation of Instances for OpenStack. func (os *OpenStack) Instances() (cloudprovider.Instances, bool)
{ glog.V(4).Info("openstack.Instances() called") compute, err := os.NewComputeV2() if err != nil { return nil, false } glog.V(1).Info("Claiming to support Instances") return &Instances{compute}, true }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go#L52-L80
go
train
// List lists node names
func (i *Instances) List(nameFilter string) ([]types.NodeName, error)
// List lists node names func (i *Instances) List(nameFilter string) ([]types.NodeName, error)
{ glog.V(4).Infof("openstack List(%v) called", nameFilter) opts := servers.ListOpts{ Name: nameFilter, Status: "ACTIVE", } pager := servers.List(i.compute, opts) ret := make([]types.NodeName, 0) err := pager.EachPage(func(page pagination.Page) (bool, error) { sList, err := servers.ExtractServers(page) if err != nil { return false, err } for i := range sList { ret = append(ret, mapServerToNodeName(&sList[i])) } return true, nil }) if err != nil { return nil, err } glog.V(3).Infof("Found %v instances matching %v: %v", len(ret), nameFilter, ret) return ret, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go#L84-L90
go
train
// CurrentNodeName is an implementation of Instances.CurrentNodeName // Note this is *not* necessarily the same as hostname.
func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error)
// CurrentNodeName is an implementation of Instances.CurrentNodeName // Note this is *not* necessarily the same as hostname. func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error)
{ md, err := getMetadata() if err != nil { return "", err } return types.NodeName(md.Name), nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go#L93-L95
go
train
// AddSSHKeyToAllInstances adds SSH key to all instances
func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error
// AddSSHKeyToAllInstances adds SSH key to all instances func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error
{ return errors.New("unimplemented") }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go#L98-L108
go
train
// NodeAddresses gets node addresses
func (i *Instances) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error)
// NodeAddresses gets node addresses func (i *Instances) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error)
{ glog.V(4).Infof("NodeAddresses(%v) called", name) addrs, err := getAddressesByName(i.compute, name) if err != nil { return nil, err } glog.V(4).Infof("NodeAddresses(%v) => %v", name, addrs) return addrs, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go#L113-L115
go
train
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here
func (i *Instances) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error)
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here func (i *Instances) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error)
{ return []v1.NodeAddress{}, errors.New("unimplemented") }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go#L118-L127
go
train
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
func (i *Instances) ExternalID(name types.NodeName) (string, error)
// ExternalID returns the cloud provider ID of the specified instance (deprecated). func (i *Instances) ExternalID(name types.NodeName) (string, error)
{ srv, err := getServerByName(i.compute, name) if err != nil { if err == ErrNotFound { return "", cloudprovider.ErrInstanceNotFound } return "", err } return srv.ID, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/openstack/openstack_instances.go#L135-L143
go
train
// InstanceID returns the cloud provider ID of the specified instance.
func (i *Instances) InstanceID(name types.NodeName) (string, error)
// InstanceID returns the cloud provider ID of the specified instance. func (i *Instances) InstanceID(name types.NodeName) (string, error)
{ srv, err := getServerByName(i.compute, name) if err != nil { return "", err } // In the future it is possible to also return an endpoint as: // <endpoint>/<instanceid> return "/" + srv.ID, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
nfs/pkg/volume/util.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/nfs/pkg/volume/util.go#L31-L42
go
train
// generateID generates a unique exportID to assign an export
func generateID(mutex *sync.Mutex, ids map[uint16]bool) uint16
// generateID generates a unique exportID to assign an export func generateID(mutex *sync.Mutex, ids map[uint16]bool) uint16
{ mutex.Lock() id := uint16(1) for ; id <= math.MaxUint16; id++ { if _, ok := ids[id]; !ok { break } } ids[id] = true mutex.Unlock() return id }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
nfs/pkg/volume/util.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/nfs/pkg/volume/util.go#L52-L74
go
train
// getExistingIDs populates a map with existing ids found in the given config // file using the given regexp. Regexp must have a "digits" submatch.
func getExistingIDs(config string, re *regexp.Regexp) (map[uint16]bool, error)
// getExistingIDs populates a map with existing ids found in the given config // file using the given regexp. Regexp must have a "digits" submatch. func getExistingIDs(config string, re *regexp.Regexp) (map[uint16]bool, error)
{ ids := map[uint16]bool{} digitsRe := "([0-9]+)" if !strings.Contains(re.String(), digitsRe) { return ids, fmt.Errorf("regexp %s doesn't contain digits submatch %s", re.String(), digitsRe) } read, err := ioutil.ReadFile(config) if err != nil { return ids, err } allMatches := re.FindAllSubmatch(read, -1) for _, match := range allMatches { digits := match[1] if id, err := strconv.ParseUint(string(digits), 10, 16); err == nil { ids[uint16(id)] = true } } return ids, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshot-controller/snapshot-controller.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshot-controller/snapshot-controller.go#L96-L163
go
train
// NewSnapshotController creates a new SnapshotController
func NewSnapshotController(client *rest.RESTClient, scheme *runtime.Scheme, clientset kubernetes.Interface, volumePlugins *map[string]volume.Plugin, syncDuration time.Duration) SnapshotController
// NewSnapshotController creates a new SnapshotController func NewSnapshotController(client *rest.RESTClient, scheme *runtime.Scheme, clientset kubernetes.Interface, volumePlugins *map[string]volume.Plugin, syncDuration time.Duration) SnapshotController
{ sc := &snapshotController{ snapshotClient: client, snapshotScheme: scheme, } // Watch snapshot objects source := kcache.NewListWatchFromClient( sc.snapshotClient, crdv1.VolumeSnapshotResourcePlural, apiv1.NamespaceAll, fields.Everything()) sc.snapshotStore, sc.snapshotController = kcache.NewInformer( source, // The object type. &crdv1.VolumeSnapshot{}, // resyncPeriod // Every resyncPeriod, all resources in the kcache will retrigger events. // Set to 0 to disable the resync. time.Minute*60, // Your custom resource event handlers. kcache.ResourceEventHandlerFuncs{ AddFunc: sc.onSnapshotAdd, UpdateFunc: sc.onSnapshotUpdate, DeleteFunc: sc.onSnapshotDelete, }) //eventBroadcaster := record.NewBroadcaster() //eventBroadcaster.StartLogging(glog.Infof) //eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client).Events("")}) // sc.recorder = eventBroadcaster.NewRecorder(api.Scheme, apiv1.EventSource{Component: "volume snapshotting"}) sc.desiredStateOfWorld = cache.NewDesiredStateOfWorld() sc.actualStateOfWorld = cache.NewActualStateOfWorld() sc.snapshotter = snapshotter.NewVolumeSnapshotter( client, scheme, clientset, sc.actualStateOfWorld, volumePlugins) sc.reconciler = reconciler.NewReconciler( reconcilerLoopPeriod, syncDuration, false, /* disableReconciliationSync */ sc.desiredStateOfWorld, sc.actualStateOfWorld, sc.snapshotter) sc.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator( desiredStateOfWorldPopulatorLoopSleepPeriod, desiredStateOfWorldPopulatorListSnapshotsRetryDuration, sc.snapshotStore, sc.desiredStateOfWorld, ) return sc }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshot-controller/snapshot-controller.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshot-controller/snapshot-controller.go#L166-L178
go
train
// Run starts an Snapshot resource controller
func (c *snapshotController) Run(ctx <-chan struct{})
// Run starts an Snapshot resource controller func (c *snapshotController) Run(ctx <-chan struct{})
{ glog.Infof("Starting snapshot controller") go c.snapshotController.Run(ctx) if !kcontroller.WaitForCacheSync("snapshot-controller", ctx, c.snapshotController.HasSynced) { return } go c.reconciler.Run(ctx) go c.desiredStateOfWorldPopulator.Run(ctx) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws_loadbalancer.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws_loadbalancer.go#L327-L379
go
train
// Makes sure that the health check for an ELB matches the configured listeners
func (c *Cloud) ensureLoadBalancerHealthCheck(loadBalancer *elb.LoadBalancerDescription, listeners []*elb.Listener) error
// Makes sure that the health check for an ELB matches the configured listeners func (c *Cloud) ensureLoadBalancerHealthCheck(loadBalancer *elb.LoadBalancerDescription, listeners []*elb.Listener) error
{ name := aws.StringValue(loadBalancer.LoadBalancerName) actual := loadBalancer.HealthCheck // Default AWS settings expectedHealthyThreshold := int64(2) expectedUnhealthyThreshold := int64(6) expectedTimeout := int64(5) expectedInterval := int64(10) // We only configure a TCP health-check on the first port expectedTarget := "" for _, listener := range listeners { if listener.InstancePort == nil { continue } expectedTarget = "TCP:" + strconv.FormatInt(*listener.InstancePort, 10) break } if expectedTarget == "" { return fmt.Errorf("unable to determine health check port for %q (no valid listeners)", name) } if expectedTarget == orEmpty(actual.Target) && expectedHealthyThreshold == orZero(actual.HealthyThreshold) && expectedUnhealthyThreshold == orZero(actual.UnhealthyThreshold) && expectedTimeout == orZero(actual.Timeout) && expectedInterval == orZero(actual.Interval) { return nil } glog.V(2).Infof("Updating load-balancer health-check for %q", name) healthCheck := &elb.HealthCheck{} healthCheck.HealthyThreshold = &expectedHealthyThreshold healthCheck.UnhealthyThreshold = &expectedUnhealthyThreshold healthCheck.Timeout = &expectedTimeout healthCheck.Interval = &expectedInterval healthCheck.Target = &expectedTarget request := &elb.ConfigureHealthCheckInput{} request.HealthCheck = healthCheck request.LoadBalancerName = loadBalancer.LoadBalancerName _, err := c.elb.ConfigureHealthCheck(request) if err != nil { return fmt.Errorf("error configuring load-balancer health-check for %q: %v", name, err) } return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/metrics/metrics.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/metrics/metrics.go#L123-L126
go
train
// CapacityBreakDown breaks capacity down into every 500G, e.g. // [0]: 0G // (0, 500G]: 500G // (500G, 1000G]: 1000G // (1000G, 1500G]: 1500G
func CapacityBreakDown(capacityBytes int64) string
// CapacityBreakDown breaks capacity down into every 500G, e.g. // [0]: 0G // (0, 500G]: 500G // (500G, 1000G]: 1000G // (1000G, 1500G]: 1500G func CapacityBreakDown(capacityBytes int64) string
{ n := int64(math.Ceil(float64(capacityBytes) / float64(500*esUtil.GiB))) return fmt.Sprintf("%dG", n*500) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
ceph/rbd/pkg/provision/rbd_util.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/ceph/rbd/pkg/provision/rbd_util.go#L48-L86
go
train
// CreateImage creates a new ceph image with provision and volume options.
func (u *RBDUtil) CreateImage(image string, pOpts *rbdProvisionOptions, options controller.VolumeOptions) (*v1.RBDPersistentVolumeSource, int, error)
// CreateImage creates a new ceph image with provision and volume options. func (u *RBDUtil) CreateImage(image string, pOpts *rbdProvisionOptions, options controller.VolumeOptions) (*v1.RBDPersistentVolumeSource, int, error)
{ var output []byte var err error capacity := options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] volSizeBytes := capacity.Value() // convert to MB that rbd defaults on sz := int(util.RoundUpSize(volSizeBytes, 1024*1024)) if sz <= 0 { return nil, 0, fmt.Errorf("invalid storage '%s' requested for RBD provisioner, it must greater than zero", capacity.String()) } volSz := fmt.Sprintf("%d", sz) // rbd create mon := u.kernelRBDMonitorsOpt(pOpts.monitors) if pOpts.imageFormat == rbdImageFormat2 { klog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", image, volSz, pOpts.imageFormat, pOpts.imageFeatures, mon, pOpts.pool, pOpts.adminID, pOpts.adminSecret) } else { klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", image, volSz, pOpts.imageFormat, mon, pOpts.pool, pOpts.adminID, pOpts.adminSecret) } args := []string{"create", image, "--size", volSz, "--pool", pOpts.pool, "--id", pOpts.adminID, "-m", mon, "--key=" + pOpts.adminSecret, "--image-format", pOpts.imageFormat} if pOpts.imageFormat == rbdImageFormat2 { // if no image features is provided, it results in empty string // which disable all RBD image format 2 features as we expected features := strings.Join(pOpts.imageFeatures, ",") args = append(args, "--image-feature", features) } output, err = u.execCommand("rbd", args) if err != nil { klog.Warningf("failed to create rbd image, output %v", string(output)) return nil, 0, fmt.Errorf("failed to create rbd image: %v, command output: %s", err, string(output)) } return &v1.RBDPersistentVolumeSource{ CephMonitors: pOpts.monitors, RBDImage: image, RBDPool: pOpts.pool, FSType: pOpts.fsType, }, sz, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
ceph/rbd/pkg/provision/rbd_util.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/ceph/rbd/pkg/provision/rbd_util.go#L90-L126
go
train
// rbdStatus checks if there is watcher on the image. // It returns true if there is a watcher onthe image, otherwise returns false.
func (u *RBDUtil) rbdStatus(image string, pOpts *rbdProvisionOptions) (bool, error)
// rbdStatus checks if there is watcher on the image. // It returns true if there is a watcher onthe image, otherwise returns false. func (u *RBDUtil) rbdStatus(image string, pOpts *rbdProvisionOptions) (bool, error)
{ var err error var output string var cmd []byte mon := u.kernelRBDMonitorsOpt(pOpts.monitors) // cmd "rbd status" list the rbd client watch with the following output: // // # there is a watcher (exit=0) // Watchers: // watcher=10.16.153.105:0/710245699 client.14163 cookie=1 // // # there is no watcher (exit=0) // Watchers: none // // Otherwise, exit is non-zero, for example: // // # image does not exist (exit=2) // rbd: error opening image kubernetes-dynamic-pvc-<UUID>: (2) No such file or directory // klog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", image, mon, pOpts.pool, pOpts.adminID, pOpts.adminSecret) args := []string{"status", image, "--pool", pOpts.pool, "-m", mon, "--id", pOpts.adminID, "--key=" + pOpts.adminSecret} cmd, err = u.execCommand("rbd", args) output = string(cmd) // If command never succeed, returns its last error. if err != nil { return false, err } if strings.Contains(output, imageWatcherStr) { klog.V(4).Infof("rbd: watchers on %s: %s", image, output) return true, nil } klog.Warningf("rbd: no watchers on %s", image) return false, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
ceph/rbd/pkg/provision/rbd_util.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/ceph/rbd/pkg/provision/rbd_util.go#L129-L149
go
train
// DeleteImage deletes a ceph image with provision and volume options.
func (u *RBDUtil) DeleteImage(image string, pOpts *rbdProvisionOptions) error
// DeleteImage deletes a ceph image with provision and volume options. func (u *RBDUtil) DeleteImage(image string, pOpts *rbdProvisionOptions) error
{ var output []byte found, err := u.rbdStatus(image, pOpts) if err != nil { return err } if found { klog.Info("rbd is still being used ", image) return fmt.Errorf("rbd %s is still being used", image) } // rbd rm mon := u.kernelRBDMonitorsOpt(pOpts.monitors) klog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", image, mon, pOpts.pool, pOpts.adminID, pOpts.adminSecret) args := []string{"rm", image, "--pool", pOpts.pool, "--id", pOpts.adminID, "-m", mon, "--key=" + pOpts.adminSecret} output, err = u.execCommand("rbd", args) if err == nil { return nil } klog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output)) return err }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws_routes.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws_routes.go#L68-L111
go
train
// ListRoutes implements Routes.ListRoutes // List all routes that match the filter
func (c *Cloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error)
// ListRoutes implements Routes.ListRoutes // List all routes that match the filter func (c *Cloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error)
{ table, err := c.findRouteTable(clusterName) if err != nil { return nil, err } var routes []*cloudprovider.Route var instanceIDs []*string for _, r := range table.Routes { instanceID := orEmpty(r.InstanceId) if instanceID == "" { continue } instanceIDs = append(instanceIDs, &instanceID) } instances, err := c.getInstancesByIDs(instanceIDs) if err != nil { return nil, err } for _, r := range table.Routes { instanceID := orEmpty(r.InstanceId) destinationCIDR := orEmpty(r.DestinationCidrBlock) if instanceID == "" || destinationCIDR == "" { continue } instance, found := instances[instanceID] if !found { glog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID) continue } nodeName := mapInstanceToNodeName(instance) routeName := clusterName + "-" + destinationCIDR routes = append(routes, &cloudprovider.Route{Name: routeName, TargetNode: nodeName, DestinationCIDR: destinationCIDR}) } return routes, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws_routes.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws_routes.go#L188-L204
go
train
// DeleteRoute implements Routes.DeleteRoute // Delete the specified route
func (c *Cloud) DeleteRoute(clusterName string, route *cloudprovider.Route) error
// DeleteRoute implements Routes.DeleteRoute // Delete the specified route func (c *Cloud) DeleteRoute(clusterName string, route *cloudprovider.Route) error
{ table, err := c.findRouteTable(clusterName) if err != nil { return err } request := &ec2.DeleteRouteInput{} request.DestinationCidrBlock = aws.String(route.DestinationCIDR) request.RouteTableId = table.RouteTableId _, err = c.ec2.DeleteRoute(request) if err != nil { return fmt.Errorf("error deleting AWS route (%s): %v", route.DestinationCIDR, err) } return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/discovery/discovery.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/discovery/discovery.go#L52-L99
go
train
// NewDiscoverer creates a Discoverer object that will scan through // the configured directories and create local PVs for any new directories found
func NewDiscoverer(config *common.RuntimeConfig, cleanupTracker *deleter.CleanupStatusTracker) (*Discoverer, error)
// NewDiscoverer creates a Discoverer object that will scan through // the configured directories and create local PVs for any new directories found func NewDiscoverer(config *common.RuntimeConfig, cleanupTracker *deleter.CleanupStatusTracker) (*Discoverer, error)
{ sharedInformer := config.InformerFactory.Storage().V1().StorageClasses() sharedInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ // We don't need an actual event handler for StorageClasses, // but we must pass a non-nil one to cache.NewInformer() AddFunc: nil, UpdateFunc: nil, DeleteFunc: nil, }) labelMap := make(map[string]string) for _, labelName := range config.NodeLabelsForPV { labelVal, ok := config.Node.Labels[labelName] if ok { labelMap[labelName] = labelVal } } if config.UseAlphaAPI { nodeAffinity, err := generateNodeAffinity(config.Node) if err != nil { return nil, fmt.Errorf("Failed to generate node affinity: %v", err) } tmpAnnotations := map[string]string{} err = StorageNodeAffinityToAlphaAnnotation(tmpAnnotations, nodeAffinity) if err != nil { return nil, fmt.Errorf("Failed to convert node affinity to alpha annotation: %v", err) } return &Discoverer{ RuntimeConfig: config, Labels: labelMap, CleanupTracker: cleanupTracker, classLister: sharedInformer.Lister(), nodeAffinityAnn: tmpAnnotations[common.AlphaStorageNodeAffinityAnnotation]}, nil } volumeNodeAffinity, err := generateVolumeNodeAffinity(config.Node) if err != nil { return nil, fmt.Errorf("Failed to generate volume node affinity: %v", err) } return &Discoverer{ RuntimeConfig: config, Labels: labelMap, CleanupTracker: cleanupTracker, classLister: sharedInformer.Lister(), nodeAffinity: volumeNodeAffinity}, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/discovery/discovery.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/discovery/discovery.go#L154-L158
go
train
// DiscoverLocalVolumes reads the configured discovery paths, and creates PVs for the new volumes
func (d *Discoverer) DiscoverLocalVolumes()
// DiscoverLocalVolumes reads the configured discovery paths, and creates PVs for the new volumes func (d *Discoverer) DiscoverLocalVolumes()
{ for class, config := range d.DiscoveryMap { d.discoverVolumesAtPath(class, config) } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/discovery/discovery.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/discovery/discovery.go#L339-L353
go
train
// Round down the capacity to an easy to read value.
func roundDownCapacityPretty(capacityBytes int64) int64
// Round down the capacity to an easy to read value. func roundDownCapacityPretty(capacityBytes int64) int64
{ easyToReadUnitsBytes := []int64{esUtil.GiB, esUtil.MiB} // Round down to the nearest easy to read unit // such that there are at least 10 units at that size. for _, easyToReadUnitBytes := range easyToReadUnitsBytes { // Round down the capacity to the nearest unit. size := capacityBytes / easyToReadUnitBytes if size >= 10 { return size * easyToReadUnitBytes } } return capacityBytes }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/discovery/discovery.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/discovery/discovery.go#L357-L367
go
train
// GetStorageNodeAffinityFromAnnotation gets the json serialized data from PersistentVolume.Annotations // and converts it to the NodeAffinity type in core.
func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*v1.NodeAffinity, error)
// GetStorageNodeAffinityFromAnnotation gets the json serialized data from PersistentVolume.Annotations // and converts it to the NodeAffinity type in core. func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*v1.NodeAffinity, error)
{ if len(annotations) > 0 && annotations[common.AlphaStorageNodeAffinityAnnotation] != "" { var affinity v1.NodeAffinity err := json.Unmarshal([]byte(annotations[common.AlphaStorageNodeAffinityAnnotation]), &affinity) if err != nil { return nil, err } return &affinity, nil } return nil, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/discovery/discovery.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/discovery/discovery.go#L370-L381
go
train
// StorageNodeAffinityToAlphaAnnotation converts NodeAffinity type to Alpha annotation for use in PersistentVolumes
func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinity *v1.NodeAffinity) error
// StorageNodeAffinityToAlphaAnnotation converts NodeAffinity type to Alpha annotation for use in PersistentVolumes func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinity *v1.NodeAffinity) error
{ if affinity == nil { return nil } json, err := json.Marshal(*affinity) if err != nil { return err } annotations[common.AlphaStorageNodeAffinityAnnotation] = string(json) return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/apis/crd/v1/deepcopy.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/apis/crd/v1/deepcopy.go#L24-L29
go
train
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeSnapshotCondition) DeepCopyInto(out *VolumeSnapshotCondition)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSnapshotCondition) DeepCopyInto(out *VolumeSnapshotCondition)
{ *out = *in out.Status = in.Status in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) return }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/apis/crd/v1/deepcopy.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/apis/crd/v1/deepcopy.go#L32-L37
go
train
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeSnapshotDataCondition) DeepCopyInto(out *VolumeSnapshotDataCondition)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSnapshotDataCondition) DeepCopyInto(out *VolumeSnapshotDataCondition)
{ *out = *in out.Status = in.Status in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) return }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/apis/crd/v1/deepcopy.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/apis/crd/v1/deepcopy.go#L40-L62
go
train
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeSnapshotDataSpec) DeepCopyInto(out *VolumeSnapshotDataSpec)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSnapshotDataSpec) DeepCopyInto(out *VolumeSnapshotDataSpec)
{ *out = *in in.VolumeSnapshotDataSource.DeepCopyInto(&out.VolumeSnapshotDataSource) if in.VolumeSnapshotRef != nil { in, out := &in.VolumeSnapshotRef, &out.VolumeSnapshotRef if *in == nil { *out = nil } else { *out = new(core_v1.ObjectReference) (*in).DeepCopyInto(*out) } } if in.PersistentVolumeRef != nil { in, out := &in.PersistentVolumeRef, &out.PersistentVolumeRef if *in == nil { *out = nil } else { *out = new(core_v1.ObjectReference) (*in).DeepCopyInto(*out) } } return }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L279-L330
go
train
// newGCECloud creates a new instance of GCECloud.
func newGCECloud(config io.Reader) (*Cloud, error)
// newGCECloud creates a new instance of GCECloud. func newGCECloud(config io.Reader) (*Cloud, error)
{ projectID, zone, err := getProjectAndZone() if err != nil { return nil, err } region, err := GetGCERegion(zone) if err != nil { return nil, err } networkName, err := getNetworkNameViaMetadata() if err != nil { return nil, err } networkURL := gceNetworkURL(projectID, networkName) // By default, Kubernetes clusters only run against one zone managedZones := []string{zone} tokenSource := google.ComputeTokenSource("") var nodeTags []string var nodeInstancePrefix string if config != nil { var cfg Config if err := gcfg.ReadInto(&cfg, config); err != nil { glog.Errorf("Couldn't read config: %v", err) return nil, err } glog.Infof("Using GCE provider config %+v", cfg) if cfg.Global.ProjectID != "" { projectID = cfg.Global.ProjectID } if cfg.Global.NetworkName != "" { if strings.Contains(cfg.Global.NetworkName, "/") { networkURL = cfg.Global.NetworkName } else { networkURL = gceNetworkURL(cfg.Global.ProjectID, cfg.Global.NetworkName) } } if cfg.Global.TokenURL != "" { tokenSource = NewAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody) } nodeTags = cfg.Global.NodeTags nodeInstancePrefix = cfg.Global.NodeInstancePrefix if cfg.Global.Multizone { managedZones = nil // Use all zones in region } } return CreateGCECloud(projectID, region, zone, managedZones, networkURL, nodeTags, nodeInstancePrefix, tokenSource, true /* useMetadataServer */) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L336-L405
go
train
// CreateGCECloud creates a GCE Cloud object using the specified parameters. // If no networkUrl is specified, loads networkName via rest call. // If no tokenSource is specified, uses oauth2.DefaultTokenSource. // If managedZones is nil / empty all zones in the region will be managed.
func CreateGCECloud(projectID, region, zone string, managedZones []string, networkURL string, nodeTags []string, nodeInstancePrefix string, tokenSource oauth2.TokenSource, useMetadataServer bool) (*Cloud, error)
// CreateGCECloud creates a GCE Cloud object using the specified parameters. // If no networkUrl is specified, loads networkName via rest call. // If no tokenSource is specified, uses oauth2.DefaultTokenSource. // If managedZones is nil / empty all zones in the region will be managed. func CreateGCECloud(projectID, region, zone string, managedZones []string, networkURL string, nodeTags []string, nodeInstancePrefix string, tokenSource oauth2.TokenSource, useMetadataServer bool) (*Cloud, error)
{ if tokenSource == nil { var err error tokenSource, err = google.DefaultTokenSource( oauth2.NoContext, compute.CloudPlatformScope, compute.ComputeScope) glog.Infof("Using DefaultTokenSource %#v", tokenSource) if err != nil { return nil, err } } else { glog.Infof("Using existing Token Source %#v", tokenSource) } if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) { if _, err := tokenSource.Token(); err != nil { glog.Errorf("error fetching initial token: %v", err) return false, nil } return true, nil }); err != nil { return nil, err } client := oauth2.NewClient(oauth2.NoContext, tokenSource) svc, err := compute.New(client) if err != nil { return nil, err } containerSvc, err := container.New(client) if err != nil { return nil, err } if networkURL == "" { networkName, err := getNetworkNameViaAPICall(svc, projectID) if err != nil { return nil, err } networkURL = gceNetworkURL(projectID, networkName) } if len(managedZones) == 0 { managedZones, err = getZonesForRegion(svc, projectID, region) if err != nil { return nil, err } } if len(managedZones) != 1 { glog.Infof("managing multiple zones: %v", managedZones) } operationPollRateLimiter := flowcontrol.NewTokenBucketRateLimiter(10, 100) // 10 qps, 100 bucket size. return &Cloud{ service: svc, containerService: containerSvc, projectID: projectID, region: region, localZone: zone, managedZones: managedZones, networkURL: networkURL, nodeTags: nodeTags, nodeInstancePrefix: nodeInstancePrefix, useMetadataServer: useMetadataServer, operationPollRateLimiter: operationPollRateLimiter, }, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L421-L429
go
train
// ScrubDNS filters DNS settings for pods.
func (gce *Cloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string)
// ScrubDNS filters DNS settings for pods. func (gce *Cloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string)
{ // GCE has too many search paths by default. Filter the ones we know are useless. for _, s := range searches { if !uselessDNSSearchRE.MatchString(s) { srchOut = append(srchOut, s) } } return nameservers, srchOut }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L548-L561
go
train
// GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer
func (gce *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error)
// GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer func (gce *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error)
{ loadBalancerName := cloudprovider.GetLoadBalancerName(service) fwd, err := gce.service.ForwardingRules.Get(gce.projectID, gce.region, loadBalancerName).Do() if err == nil { status := &v1.LoadBalancerStatus{} status.Ingress = []v1.LoadBalancerIngress{{IP: fwd.IPAddress}} return status, true, nil } if isHTTPErrorCode(err, http.StatusNotFound) { return nil, false, nil } return nil, false, err }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L583-L866
go
train
// EnsureLoadBalancer is an implementation of LoadBalancer.EnsureLoadBalancer. // Our load balancers in GCE consist of four separate GCE resources - a static // IP address, a firewall rule, a target pool, and a forwarding rule. This // function has to manage all of them. // Due to an interesting series of design decisions, this handles both creating // new load balancers and updating existing load balancers, recognizing when // each is needed.
func (gce *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error)
// EnsureLoadBalancer is an implementation of LoadBalancer.EnsureLoadBalancer. // Our load balancers in GCE consist of four separate GCE resources - a static // IP address, a firewall rule, a target pool, and a forwarding rule. This // function has to manage all of them. // Due to an interesting series of design decisions, this handles both creating // new load balancers and updating existing load balancers, recognizing when // each is needed. func (gce *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error)
{ if len(nodes) == 0 { return nil, fmt.Errorf("Cannot EnsureLoadBalancer() with no hosts") } hostNames := nodeNames(nodes) hosts, err := gce.getInstancesByNames(hostNames) if err != nil { return nil, err } loadBalancerName := cloudprovider.GetLoadBalancerName(apiService) loadBalancerIP := apiService.Spec.LoadBalancerIP ports := apiService.Spec.Ports portStr := []string{} for _, p := range apiService.Spec.Ports { portStr = append(portStr, fmt.Sprintf("%s/%d", p.Protocol, p.Port)) } affinityType := apiService.Spec.SessionAffinity serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name} glog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", loadBalancerName, gce.region, loadBalancerIP, portStr, hostNames, serviceName, apiService.Annotations) // Check if the forwarding rule exists, and if so, what its IP is. fwdRuleExists, fwdRuleNeedsUpdate, fwdRuleIP, err := gce.forwardingRuleNeedsUpdate(loadBalancerName, gce.region, loadBalancerIP, ports) if err != nil { return nil, err } if !fwdRuleExists { glog.Infof("Forwarding rule %v for Service %v/%v doesn't exist", loadBalancerName, apiService.Namespace, apiService.Name) } // Make sure we know which IP address will be used and have properly reserved // it as static before moving forward with the rest of our operations. // // We use static IP addresses when updating a load balancer to ensure that we // can replace the load balancer's other components without changing the // address its service is reachable on. We do it this way rather than always // keeping the static IP around even though this is more complicated because // it makes it less likely that we'll run into quota issues. Only 7 static // IP addresses are allowed per region by default. // // We could let an IP be allocated for us when the forwarding rule is created, // but we need the IP to set up the firewall rule, and we want to keep the // forwarding rule creation as the last thing that needs to be done in this // function in order to maintain the invariant that "if the forwarding rule // exists, the LB has been fully created". ipAddress := "" // Through this process we try to keep track of whether it is safe to // release the IP that was allocated. If the user specifically asked for // an IP, we assume they are managing it themselves. Otherwise, we will // release the IP in case of early-terminating failure or upon successful // creating of the LB. // TODO(#36535): boil this logic down into a set of component functions // and key the flag values off of errors returned. isUserOwnedIP := false // if this is set, we never release the IP isSafeToReleaseIP := false defer func() { if isUserOwnedIP { return } if isSafeToReleaseIP { if err = gce.deleteStaticIP(loadBalancerName, gce.region); err != nil { glog.Errorf("failed to release static IP %s for load balancer (%v(%v), %v): %v", ipAddress, loadBalancerName, serviceName, gce.region, err) } glog.V(2).Infof("EnsureLoadBalancer(%v(%v)): released static IP %s", loadBalancerName, serviceName, ipAddress) } else { glog.Warningf("orphaning static IP %s during update of load balancer (%v(%v), %v): %v", ipAddress, loadBalancerName, serviceName, gce.region, err) } }() if loadBalancerIP != "" { // If a specific IP address has been requested, we have to respect the // user's request and use that IP. If the forwarding rule was already using // a different IP, it will be harmlessly abandoned because it was only an // ephemeral IP (or it was a different static IP owned by the user, in which // case we shouldn't delete it anyway). if isStatic, addrErr := gce.projectOwnsStaticIP(loadBalancerName, gce.region, loadBalancerIP); err != nil { return nil, fmt.Errorf("failed to test if this GCE project owns the static IP %s: %v", loadBalancerIP, addrErr) } else if isStatic { // The requested IP is a static IP, owned and managed by the user. isUserOwnedIP = true isSafeToReleaseIP = false ipAddress = loadBalancerIP glog.V(4).Infof("EnsureLoadBalancer(%v(%v)): using user-provided static IP %s", loadBalancerName, serviceName, ipAddress) } else if loadBalancerIP == fwdRuleIP { // The requested IP is not a static IP, but is currently assigned // to this forwarding rule, so we can keep it. isUserOwnedIP = false isSafeToReleaseIP = true ipAddress, _, err = gce.ensureStaticIP(loadBalancerName, serviceName.String(), gce.region, fwdRuleIP) if err != nil { return nil, fmt.Errorf("failed to ensure static IP %s: %v", fwdRuleIP, err) } glog.V(4).Infof("EnsureLoadBalancer(%v(%v)): using user-provided non-static IP %s", loadBalancerName, serviceName, ipAddress) } else { // The requested IP is not static and it is not assigned to the // current forwarding rule. It might be attached to a different // rule or it might not be part of this project at all. Either // way, we can't use it. return nil, fmt.Errorf("requested ip %s is neither static nor assigned to LB %s(%v): %v", loadBalancerIP, loadBalancerName, serviceName, err) } } else { // The user did not request a specific IP. isUserOwnedIP = false // This will either allocate a new static IP if the forwarding rule didn't // already have an IP, or it will promote the forwarding rule's current // IP from ephemeral to static, or it will just get the IP if it is // already static. existed := false ipAddress, existed, err = gce.ensureStaticIP(loadBalancerName, serviceName.String(), gce.region, fwdRuleIP) if err != nil { return nil, fmt.Errorf("failed to ensure static IP %s: %v", fwdRuleIP, err) } if existed { // If the IP was not specifically requested by the user, but it // already existed, it seems to be a failed update cycle. We can // use this IP and try to run through the process again, but we // should not release the IP unless it is explicitly flagged as OK. isSafeToReleaseIP = false glog.V(4).Infof("EnsureLoadBalancer(%v(%v)): adopting static IP %s", loadBalancerName, serviceName, ipAddress) } else { // For total clarity. The IP did not pre-exist and the user did // not ask for a particular one, so we can release the IP in case // of failure or success. isSafeToReleaseIP = true glog.V(4).Infof("EnsureLoadBalancer(%v(%v)): allocated static IP %s", loadBalancerName, serviceName, ipAddress) } } // Deal with the firewall next. The reason we do this here rather than last // is because the forwarding rule is used as the indicator that the load // balancer is fully created - it's what getLoadBalancer checks for. // Check if user specified the allow source range sourceRanges, err := apiservice.GetLoadBalancerSourceRanges(apiService) if err != nil { return nil, err } firewallExists, firewallNeedsUpdate, err := gce.firewallNeedsUpdate(loadBalancerName, serviceName.String(), gce.region, ipAddress, ports, sourceRanges) if err != nil { return nil, err } if firewallNeedsUpdate { desc := makeFirewallDescription(serviceName.String(), ipAddress) // Unlike forwarding rules and target pools, firewalls can be updated // without needing to be deleted and recreated. if firewallExists { glog.Infof("EnsureLoadBalancer(%v(%v)): updating firewall", loadBalancerName, serviceName) if err = gce.updateFirewall(loadBalancerName, gce.region, desc, sourceRanges, ports, hosts); err != nil { return nil, err } glog.Infof("EnsureLoadBalancer(%v(%v)): updated firewall", loadBalancerName, serviceName) } else { glog.Infof("EnsureLoadBalancer(%v(%v)): creating firewall", loadBalancerName, serviceName) if err = gce.createFirewall(loadBalancerName, gce.region, desc, sourceRanges, ports, hosts); err != nil { return nil, err } glog.Infof("EnsureLoadBalancer(%v(%v)): created firewall", loadBalancerName, serviceName) } } tpExists, tpNeedsUpdate, err := gce.targetPoolNeedsUpdate(loadBalancerName, gce.region, affinityType) if err != nil { return nil, err } if !tpExists { glog.Infof("Target pool %v for Service %v/%v doesn't exist", loadBalancerName, apiService.Namespace, apiService.Name) } // Ensure health checks are created for this target pool to pass to createTargetPool for health check links // Alternately, if the annotation on the service was removed, we need to recreate the target pool without // health checks. This needs to be prior to the forwarding rule deletion below otherwise it is not possible // to delete just the target pool or http health checks later. var hcToCreate *compute.HttpHealthCheck hcExisting, err := gce.GetHTTPHealthCheck(loadBalancerName) if err != nil && !isHTTPErrorCode(err, http.StatusNotFound) { return nil, fmt.Errorf("Error checking HTTP health check %s: %v", loadBalancerName, err) } if path, healthCheckNodePort := apiservice.GetServiceHealthCheckPathPort(apiService); path != "" { glog.V(4).Infof("service %v (%v) needs health checks on :%d%s)", apiService.Name, loadBalancerName, healthCheckNodePort, path) if err != nil { // This logic exists to detect a transition for a pre-existing service and turn on // the tpNeedsUpdate flag to delete/recreate fwdrule/tpool adding the health check // to the target pool. glog.V(2).Infof("Annotation external-traffic=OnlyLocal added to new or pre-existing service") tpNeedsUpdate = true } hcToCreate, err = gce.ensureHTTPHealthCheck(loadBalancerName, path, healthCheckNodePort) if err != nil { return nil, fmt.Errorf("Failed to ensure health check for localized service %v on node port %v: %v", loadBalancerName, healthCheckNodePort, err) } } else { glog.V(4).Infof("service %v does not need health checks", apiService.Name) if err == nil { glog.V(2).Infof("Deleting stale health checks for service %v LB %v", apiService.Name, loadBalancerName) tpNeedsUpdate = true } } // Now we get to some slightly more interesting logic. // First, neither target pools nor forwarding rules can be updated in place - // they have to be deleted and recreated. // Second, forwarding rules are layered on top of target pools in that you // can't delete a target pool that's currently in use by a forwarding rule. // Thus, we have to tear down the forwarding rule if either it or the target // pool needs to be updated. if fwdRuleExists && (fwdRuleNeedsUpdate || tpNeedsUpdate) { // Begin critical section. If we have to delete the forwarding rule, // and something should fail before we recreate it, don't release the // IP. That way we can come back to it later. isSafeToReleaseIP = false if err := gce.deleteForwardingRule(loadBalancerName, gce.region); err != nil { return nil, fmt.Errorf("failed to delete existing forwarding rule %s for load balancer update: %v", loadBalancerName, err) } glog.Infof("EnsureLoadBalancer(%v(%v)): deleted forwarding rule", loadBalancerName, serviceName) } if tpExists && tpNeedsUpdate { // Generate the list of health checks for this target pool to pass to deleteTargetPool if path, _ := apiservice.GetServiceHealthCheckPathPort(apiService); path != "" { var err error hcExisting, err = gce.GetHTTPHealthCheck(loadBalancerName) if err != nil && !isHTTPErrorCode(err, http.StatusNotFound) { glog.Infof("Failed to retrieve health check %v:%v", loadBalancerName, err) } } // Pass healthchecks to deleteTargetPool to cleanup health checks prior to cleaning up the target pool itself. if err := gce.deleteTargetPool(loadBalancerName, gce.region, hcExisting); err != nil { return nil, fmt.Errorf("failed to delete existing target pool %s for load balancer update: %v", loadBalancerName, err) } glog.Infof("EnsureLoadBalancer(%v(%v)): deleted target pool", loadBalancerName, serviceName) } // Once we've deleted the resources (if necessary), build them back up (or for // the first time if they're new). if tpNeedsUpdate { createInstances := hosts if len(hosts) > maxTargetPoolCreateInstances { createInstances = createInstances[:maxTargetPoolCreateInstances] } // Pass healthchecks to createTargetPool which needs them as health check links in the target pool if err := gce.createTargetPool(loadBalancerName, serviceName.String(), gce.region, createInstances, affinityType, hcToCreate); err != nil { return nil, fmt.Errorf("failed to create target pool %s: %v", loadBalancerName, err) } if hcToCreate != nil { glog.Infof("EnsureLoadBalancer(%v(%v)): created health checks for target pool", loadBalancerName, serviceName) } if len(hosts) <= maxTargetPoolCreateInstances { glog.Infof("EnsureLoadBalancer(%v(%v)): created target pool", loadBalancerName, serviceName) } else { glog.Infof("EnsureLoadBalancer(%v(%v)): created initial target pool (now updating with %d hosts)", loadBalancerName, serviceName, len(hosts)-maxTargetPoolCreateInstances) created := sets.NewString() for _, host := range createInstances { created.Insert(host.makeComparableHostPath()) } if err := gce.updateTargetPool(loadBalancerName, created, hosts); err != nil { return nil, fmt.Errorf("failed to update target pool %s: %v", loadBalancerName, err) } glog.Infof("EnsureLoadBalancer(%v(%v)): updated target pool (with %d hosts)", loadBalancerName, serviceName, len(hosts)-maxTargetPoolCreateInstances) } } if tpNeedsUpdate || fwdRuleNeedsUpdate { glog.Infof("EnsureLoadBalancer(%v(%v)): creating forwarding rule, IP %s", loadBalancerName, serviceName, ipAddress) if err := gce.createForwardingRule(loadBalancerName, serviceName.String(), gce.region, ipAddress, ports); err != nil { return nil, fmt.Errorf("failed to create forwarding rule %s: %v", loadBalancerName, err) } // End critical section. It is safe to release the static IP (which // just demotes it to ephemeral) now that it is attached. In the case // of a user-requested IP, the "is user-owned" flag will be set, // preventing it from actually being released. isSafeToReleaseIP = true glog.Infof("EnsureLoadBalancer(%v(%v)): created forwarding rule, IP %s", loadBalancerName, serviceName, ipAddress) } status := &v1.LoadBalancerStatus{} status.Ingress = []v1.LoadBalancerIngress{{IP: ipAddress}} return status, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L919-L954
go
train
// Passing nil for requested IP is perfectly fine - it just means that no specific // IP is being requested. // Returns whether the forwarding rule exists, whether it needs to be updated, // what its IP address is (if it exists), and any error we encountered.
func (gce *Cloud) forwardingRuleNeedsUpdate(name, region string, loadBalancerIP string, ports []v1.ServicePort) (exists bool, needsUpdate bool, ipAddress string, err error)
// Passing nil for requested IP is perfectly fine - it just means that no specific // IP is being requested. // Returns whether the forwarding rule exists, whether it needs to be updated, // what its IP address is (if it exists), and any error we encountered. func (gce *Cloud) forwardingRuleNeedsUpdate(name, region string, loadBalancerIP string, ports []v1.ServicePort) (exists bool, needsUpdate bool, ipAddress string, err error)
{ fwd, err := gce.service.ForwardingRules.Get(gce.projectID, region, name).Do() if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { return false, true, "", nil } // Err on the side of caution in case of errors. Caller should notice the error and retry. // We never want to end up recreating resources because gce api flaked. return true, false, "", fmt.Errorf("error getting load balancer's forwarding rule: %v", err) } // If the user asks for a specific static ip through the Service spec, // check that we're actually using it. // TODO: we report loadbalancer IP through status, so we want to verify if // that matches the forwarding rule as well. if loadBalancerIP != "" && loadBalancerIP != fwd.IPAddress { glog.Infof("LoadBalancer ip for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.IPAddress, loadBalancerIP) return true, true, fwd.IPAddress, nil } portRange, err := loadBalancerPortRange(ports) if err != nil { // Err on the side of caution in case of errors. Caller should notice the error and retry. // We never want to end up recreating resources because gce api flaked. return true, false, "", err } if portRange != fwd.PortRange { glog.Infof("LoadBalancer port range for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.PortRange, portRange) return true, true, fwd.IPAddress, nil } // The service controller verified all the protocols match on the ports, just check the first one if string(ports[0].Protocol) != fwd.IPProtocol { glog.Infof("LoadBalancer protocol for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.IPProtocol, string(ports[0].Protocol)) return true, true, fwd.IPAddress, nil } return true, false, fwd.IPAddress, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L981-L1004
go
train
// Doesn't check whether the hosts have changed, since host updating is handled // separately.
func (gce *Cloud) targetPoolNeedsUpdate(name, region string, affinityType v1.ServiceAffinity) (exists bool, needsUpdate bool, err error)
// Doesn't check whether the hosts have changed, since host updating is handled // separately. func (gce *Cloud) targetPoolNeedsUpdate(name, region string, affinityType v1.ServiceAffinity) (exists bool, needsUpdate bool, err error)
{ tp, err := gce.service.TargetPools.Get(gce.projectID, region, name).Do() if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { return false, true, nil } // Err on the side of caution in case of errors. Caller should notice the error and retry. // We never want to end up recreating resources because gce api flaked. return true, false, fmt.Errorf("error getting load balancer's target pool: %v", err) } // TODO: If the user modifies their Service's session affinity, it *should* // reflect in the associated target pool. However, currently not setting the // session affinity on a target pool defaults it to the empty string while // not setting in on a Service defaults it to None. There is a lack of // documentation around the default setting for the target pool, so if we // find it's the undocumented empty string, don't blindly recreate the // target pool (which results in downtime). Fix this when we have formally // defined the defaults on either side. if tp.SessionAffinity != "" && translateAffinityType(affinityType) != tp.SessionAffinity { glog.Infof("LoadBalancer target pool %v changed affinity from %v to %v", name, tp.SessionAffinity, affinityType) return true, true, nil } return true, false, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1007-L1017
go
train
// translate from what K8s supports to what the cloud provider supports for session affinity.
func translateAffinityType(affinityType v1.ServiceAffinity) string
// translate from what K8s supports to what the cloud provider supports for session affinity. func translateAffinityType(affinityType v1.ServiceAffinity) string
{ switch affinityType { case v1.ServiceAffinityClientIP: return gceAffinityTypeClientIP case v1.ServiceAffinityNone: return gceAffinityTypeNone default: glog.Errorf("Unexpected affinity type: %v", affinityType) return gceAffinityTypeNone } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1222-L1292
go
train
// ComputeHostTags grabs all tags from all instances being added to the pool. // * The longest tag that is a prefix of the instance name is used // * If any instance has no matching prefix tag, return error // Invoking this method to get host tags is risky since it depends on the format // of the host names in the cluster. Only use it as a fallback if gce.nodeTags // is unspecified
func (gce *Cloud) computeHostTags(hosts []*gceInstance) ([]string, error)
// ComputeHostTags grabs all tags from all instances being added to the pool. // * The longest tag that is a prefix of the instance name is used // * If any instance has no matching prefix tag, return error // Invoking this method to get host tags is risky since it depends on the format // of the host names in the cluster. Only use it as a fallback if gce.nodeTags // is unspecified func (gce *Cloud) computeHostTags(hosts []*gceInstance) ([]string, error)
{ // TODO: We could store the tags in gceInstance, so we could have already fetched it hostNamesByZone := make(map[string]map[string]bool) // map of zones -> map of names -> bool (for easy lookup) nodeInstancePrefix := gce.nodeInstancePrefix for _, host := range hosts { if !strings.HasPrefix(host.Name, gce.nodeInstancePrefix) { glog.Warningf("instance '%v' does not conform to prefix '%s', ignoring filter", host, gce.nodeInstancePrefix) nodeInstancePrefix = "" } z, ok := hostNamesByZone[host.Zone] if !ok { z = make(map[string]bool) hostNamesByZone[host.Zone] = z } z[host.Name] = true } tags := sets.NewString() for zone, hostNames := range hostNamesByZone { pageToken := "" page := 0 for ; page == 0 || (pageToken != "" && page < maxPages); page++ { listCall := gce.service.Instances.List(gce.projectID, zone) if nodeInstancePrefix != "" { // Add the filter for hosts listCall = listCall.Filter("name eq " + nodeInstancePrefix + ".*") } // Add the fields we want // TODO(zmerlynn): Internal bug 29524655 // listCall = listCall.Fields("items(name,tags)") if pageToken != "" { listCall = listCall.PageToken(pageToken) } res, err := listCall.Do() if err != nil { return nil, err } pageToken = res.NextPageToken for _, instance := range res.Items { if !hostNames[instance.Name] { continue } longestTag := "" for _, tag := range instance.Tags.Items { if strings.HasPrefix(instance.Name, tag) && len(tag) > len(longestTag) { longestTag = tag } } if len(longestTag) > 0 { tags.Insert(longestTag) } else { return nil, fmt.Errorf("Could not find any tag that is a prefix of instance name for instance %s", instance.Name) } } } if page >= maxPages { glog.Errorf("computeHostTags exceeded maxPages=%d for Instances.List: truncating.", maxPages) } } if len(tags) == 0 { return nil, fmt.Errorf("No instances found") } return tags.List(), nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1361-L1378
go
train
// UpdateLoadBalancer is an implementation of LoadBalancer.UpdateLoadBalancer.
func (gce *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error
// UpdateLoadBalancer is an implementation of LoadBalancer.UpdateLoadBalancer. func (gce *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error
{ hosts, err := gce.getInstancesByNames(nodeNames(nodes)) if err != nil { return err } loadBalancerName := cloudprovider.GetLoadBalancerName(service) pool, err := gce.service.TargetPools.Get(gce.projectID, gce.region, loadBalancerName).Do() if err != nil { return err } existing := sets.NewString() for _, instance := range pool.Instances { existing.Insert(hostURLToComparablePath(instance)) } return gce.updateTargetPool(loadBalancerName, existing, hosts) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1432-L1469
go
train
// EnsureLoadBalancerDeleted is an implementation of LoadBalancer.EnsureLoadBalancerDeleted.
func (gce *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error
// EnsureLoadBalancerDeleted is an implementation of LoadBalancer.EnsureLoadBalancerDeleted. func (gce *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error
{ loadBalancerName := cloudprovider.GetLoadBalancerName(service) glog.V(2).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v)", clusterName, service.Namespace, service.Name, loadBalancerName, gce.region) var hc *compute.HttpHealthCheck if path, _ := apiservice.GetServiceHealthCheckPathPort(service); path != "" { var err error hc, err = gce.GetHTTPHealthCheck(loadBalancerName) if err != nil && !isHTTPErrorCode(err, http.StatusNotFound) { glog.Infof("Failed to retrieve health check %v:%v", loadBalancerName, err) return err } } errs := utilerrors.AggregateGoroutines( func() error { return gce.deleteFirewall(loadBalancerName, gce.region) }, // Even though we don't hold on to static IPs for load balancers, it's // possible that EnsureLoadBalancer left one around in a failed // creation/update attempt, so make sure we clean it up here just in case. func() error { return gce.deleteStaticIP(loadBalancerName, gce.region) }, func() error { // The forwarding rule must be deleted before either the target pool can, // unfortunately, so we have to do these two serially. if err := gce.deleteForwardingRule(loadBalancerName, gce.region); err != nil { return err } if err := gce.deleteTargetPool(loadBalancerName, gce.region, hc); err != nil { return err } return nil }, ) if errs != nil { return utilerrors.Flatten(errs) } return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1488-L1494
go
train
// DeleteForwardingRule deletes the forwarding rule
func (gce *Cloud) DeleteForwardingRule(name string) error
// DeleteForwardingRule deletes the forwarding rule func (gce *Cloud) DeleteForwardingRule(name string) error
{ region, err := GetGCERegion(gce.localZone) if err != nil { return err } return gce.deleteForwardingRule(name, region) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1497-L1503
go
train
// DeleteTargetPool deletes the given target pool.
func (gce *Cloud) DeleteTargetPool(name string, hc *compute.HttpHealthCheck) error
// DeleteTargetPool deletes the given target pool. func (gce *Cloud) DeleteTargetPool(name string, hc *compute.HttpHealthCheck) error
{ region, err := GetGCERegion(gce.localZone) if err != nil { return err } return gce.deleteTargetPool(name, region, hc) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1575-L1577
go
train
// Firewall management: These methods are just passthrough to the existing // internal firewall creation methods used to manage TCPLoadBalancer. // GetFirewall returns the Firewall by name.
func (gce *Cloud) GetFirewall(name string) (*compute.Firewall, error)
// Firewall management: These methods are just passthrough to the existing // internal firewall creation methods used to manage TCPLoadBalancer. // GetFirewall returns the Firewall by name. func (gce *Cloud) GetFirewall(name string) (*compute.Firewall, error)
{ return gce.service.Firewalls.Get(gce.projectID, name).Do() }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1604-L1610
go
train
// DeleteFirewall deletes the given firewall rule.
func (gce *Cloud) DeleteFirewall(name string) error
// DeleteFirewall deletes the given firewall rule. func (gce *Cloud) DeleteFirewall(name string) error
{ region, err := GetGCERegion(gce.localZone) if err != nil { return err } return gce.deleteFirewall(name, region) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1614-L1635
go
train
// UpdateFirewall applies the given firewall rule as an update to an existing // firewall rule with the same name.
func (gce *Cloud) UpdateFirewall(name, desc string, sourceRanges netsets.IPNet, ports []int64, hostNames []string) error
// UpdateFirewall applies the given firewall rule as an update to an existing // firewall rule with the same name. func (gce *Cloud) UpdateFirewall(name, desc string, sourceRanges netsets.IPNet, ports []int64, hostNames []string) error
{ region, err := GetGCERegion(gce.localZone) if err != nil { return err } // TODO: This completely breaks modularity in the cloudprovider but the methods // shared with the TCPLoadBalancer take v1.ServicePorts. svcPorts := []v1.ServicePort{} // TODO: Currently the only consumer of this method is the GCE L7 // loadbalancer controller, which never needs a protocol other than TCP. // We should pipe through a mapping of port:protocol and default to TCP // if UDP ports are required. This means the method signature will change, // forcing downstream clients to refactor interfaces. for _, p := range ports { svcPorts = append(svcPorts, v1.ServicePort{Port: int32(p), Protocol: v1.ProtocolTCP}) } hosts, err := gce.getInstancesByNames(hostNames) if err != nil { return err } return gce.updateFirewall(name, region, desc, sourceRanges, svcPorts, hosts) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1643-L1653
go
train
// Global static IP management // ReserveGlobalStaticIP creates a global static IP. // Caller is allocated a random IP if they do not specify an ipAddress. If an // ipAddress is specified, it must belong to the current project, eg: an // ephemeral IP associated with a global forwarding rule.
func (gce *Cloud) ReserveGlobalStaticIP(name, ipAddress string) (address *compute.Address, err error)
// Global static IP management // ReserveGlobalStaticIP creates a global static IP. // Caller is allocated a random IP if they do not specify an ipAddress. If an // ipAddress is specified, it must belong to the current project, eg: an // ephemeral IP associated with a global forwarding rule. func (gce *Cloud) ReserveGlobalStaticIP(name, ipAddress string) (address *compute.Address, err error)
{ op, err := gce.service.GlobalAddresses.Insert(gce.projectID, &compute.Address{Name: name, Address: ipAddress}).Do() if err != nil { return nil, err } if err := gce.waitForGlobalOp(op); err != nil { return nil, err } // We have to get the address to know which IP was allocated for us. return gce.service.GlobalAddresses.Get(gce.projectID, name).Do() }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1656-L1662
go
train
// DeleteGlobalStaticIP deletes a global static IP by name.
func (gce *Cloud) DeleteGlobalStaticIP(name string) error
// DeleteGlobalStaticIP deletes a global static IP by name. func (gce *Cloud) DeleteGlobalStaticIP(name string) error
{ op, err := gce.service.GlobalAddresses.Delete(gce.projectID, name).Do() if err != nil { return err } return gce.waitForGlobalOp(op) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1665-L1667
go
train
// GetGlobalStaticIP returns the global static IP by name.
func (gce *Cloud) GetGlobalStaticIP(name string) (address *compute.Address, err error)
// GetGlobalStaticIP returns the global static IP by name. func (gce *Cloud) GetGlobalStaticIP(name string) (address *compute.Address, err error)
{ return gce.service.GlobalAddresses.Get(gce.projectID, name).Do() }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1672-L1674
go
train
// UrlMap management // GetURLMap returns the URLMap by name.
func (gce *Cloud) GetURLMap(name string) (*compute.UrlMap, error)
// UrlMap management // GetURLMap returns the URLMap by name. func (gce *Cloud) GetURLMap(name string) (*compute.UrlMap, error)
{ return gce.service.UrlMaps.Get(gce.projectID, name).Do() }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1677-L1690
go
train
// CreateURLMap creates an url map, using the given backend service as the default service.
func (gce *Cloud) CreateURLMap(backend *compute.BackendService, name string) (*compute.UrlMap, error)
// CreateURLMap creates an url map, using the given backend service as the default service. func (gce *Cloud) CreateURLMap(backend *compute.BackendService, name string) (*compute.UrlMap, error)
{ urlMap := &compute.UrlMap{ Name: name, DefaultService: backend.SelfLink, } op, err := gce.service.UrlMaps.Insert(gce.projectID, urlMap).Do() if err != nil { return nil, err } if err = gce.waitForGlobalOp(op); err != nil { return nil, err } return gce.GetURLMap(name) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1693-L1702
go
train
// UpdateURLMap applies the given UrlMap as an update, and returns the new UrlMap.
func (gce *Cloud) UpdateURLMap(urlMap *compute.UrlMap) (*compute.UrlMap, error)
// UpdateURLMap applies the given UrlMap as an update, and returns the new UrlMap. func (gce *Cloud) UpdateURLMap(urlMap *compute.UrlMap) (*compute.UrlMap, error)
{ op, err := gce.service.UrlMaps.Update(gce.projectID, urlMap.Name, urlMap).Do() if err != nil { return nil, err } if err = gce.waitForGlobalOp(op); err != nil { return nil, err } return gce.service.UrlMaps.Get(gce.projectID, urlMap.Name).Do() }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1717-L1720
go
train
// ListURLMaps lists all URLMaps in the project.
func (gce *Cloud) ListURLMaps() (*compute.UrlMapList, error)
// ListURLMaps lists all URLMaps in the project. func (gce *Cloud) ListURLMaps() (*compute.UrlMapList, error)
{ // TODO: use PageToken to list all not just the first 500 return gce.service.UrlMaps.List(gce.projectID).Do() }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1725-L1727
go
train
// TargetHttpProxy management // GetTargetHTTPProxy returns the UrlMap by name.
func (gce *Cloud) GetTargetHTTPProxy(name string) (*compute.TargetHttpProxy, error)
// TargetHttpProxy management // GetTargetHTTPProxy returns the UrlMap by name. func (gce *Cloud) GetTargetHTTPProxy(name string) (*compute.TargetHttpProxy, error)
{ return gce.service.TargetHttpProxies.Get(gce.projectID, name).Do() }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1730-L1743
go
train
// CreateTargetHTTPProxy creates and returns a TargetHTTPProxy with the given UrlMap.
func (gce *Cloud) CreateTargetHTTPProxy(urlMap *compute.UrlMap, name string) (*compute.TargetHttpProxy, error)
// CreateTargetHTTPProxy creates and returns a TargetHTTPProxy with the given UrlMap. func (gce *Cloud) CreateTargetHTTPProxy(urlMap *compute.UrlMap, name string) (*compute.TargetHttpProxy, error)
{ proxy := &compute.TargetHttpProxy{ Name: name, UrlMap: urlMap.SelfLink, } op, err := gce.service.TargetHttpProxies.Insert(gce.projectID, proxy).Do() if err != nil { return nil, err } if err = gce.waitForGlobalOp(op); err != nil { return nil, err } return gce.GetTargetHTTPProxy(name) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1746-L1752
go
train
// SetURLMapForTargetHTTPProxy sets the given URLMap for the given TargetHTTPProxy.
func (gce *Cloud) SetURLMapForTargetHTTPProxy(proxy *compute.TargetHttpProxy, urlMap *compute.UrlMap) error
// SetURLMapForTargetHTTPProxy sets the given URLMap for the given TargetHTTPProxy. func (gce *Cloud) SetURLMapForTargetHTTPProxy(proxy *compute.TargetHttpProxy, urlMap *compute.UrlMap) error
{ op, err := gce.service.TargetHttpProxies.SetUrlMap(gce.projectID, proxy.Name, &compute.UrlMapReference{UrlMap: urlMap.SelfLink}).Do() if err != nil { return err } return gce.waitForGlobalOp(op) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1767-L1770
go
train
// ListTargetHTTPProxies lists all TargetHTTPProxies in the project.
func (gce *Cloud) ListTargetHTTPProxies() (*compute.TargetHttpProxyList, error)
// ListTargetHTTPProxies lists all TargetHTTPProxies in the project. func (gce *Cloud) ListTargetHTTPProxies() (*compute.TargetHttpProxyList, error)
{ // TODO: use PageToken to list all not just the first 500 return gce.service.TargetHttpProxies.List(gce.projectID).Do() }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1775-L1777
go
train
// TargetHttpsProxy management // GetTargetHTTPSProxy returns the URLMap by name.
func (gce *Cloud) GetTargetHTTPSProxy(name string) (*compute.TargetHttpsProxy, error)
// TargetHttpsProxy management // GetTargetHTTPSProxy returns the URLMap by name. func (gce *Cloud) GetTargetHTTPSProxy(name string) (*compute.TargetHttpsProxy, error)
{ return gce.service.TargetHttpsProxies.Get(gce.projectID, name).Do() }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1780-L1794
go
train
// CreateTargetHTTPSProxy creates and returns a TargetHTTPSProxy with the given URLMap and SslCertificate.
func (gce *Cloud) CreateTargetHTTPSProxy(urlMap *compute.UrlMap, sslCert *compute.SslCertificate, name string) (*compute.TargetHttpsProxy, error)
// CreateTargetHTTPSProxy creates and returns a TargetHTTPSProxy with the given URLMap and SslCertificate. func (gce *Cloud) CreateTargetHTTPSProxy(urlMap *compute.UrlMap, sslCert *compute.SslCertificate, name string) (*compute.TargetHttpsProxy, error)
{ proxy := &compute.TargetHttpsProxy{ Name: name, UrlMap: urlMap.SelfLink, SslCertificates: []string{sslCert.SelfLink}, } op, err := gce.service.TargetHttpsProxies.Insert(gce.projectID, proxy).Do() if err != nil { return nil, err } if err = gce.waitForGlobalOp(op); err != nil { return nil, err } return gce.GetTargetHTTPSProxy(name) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1797-L1803
go
train
// SetURLMapForTargetHTTPSProxy sets the given URLMap for the given TargetHTTPSProxy.
func (gce *Cloud) SetURLMapForTargetHTTPSProxy(proxy *compute.TargetHttpsProxy, urlMap *compute.UrlMap) error
// SetURLMapForTargetHTTPSProxy sets the given URLMap for the given TargetHTTPSProxy. func (gce *Cloud) SetURLMapForTargetHTTPSProxy(proxy *compute.TargetHttpsProxy, urlMap *compute.UrlMap) error
{ op, err := gce.service.TargetHttpsProxies.SetUrlMap(gce.projectID, proxy.Name, &compute.UrlMapReference{UrlMap: urlMap.SelfLink}).Do() if err != nil { return err } return gce.waitForGlobalOp(op) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1806-L1812
go
train
// SetSslCertificateForTargetHTTPSProxy sets the given SslCertificate for the given TargetHTTPSProxy.
func (gce *Cloud) SetSslCertificateForTargetHTTPSProxy(proxy *compute.TargetHttpsProxy, sslCert *compute.SslCertificate) error
// SetSslCertificateForTargetHTTPSProxy sets the given SslCertificate for the given TargetHTTPSProxy. func (gce *Cloud) SetSslCertificateForTargetHTTPSProxy(proxy *compute.TargetHttpsProxy, sslCert *compute.SslCertificate) error
{ op, err := gce.service.TargetHttpsProxies.SetSslCertificates(gce.projectID, proxy.Name, &compute.TargetHttpsProxiesSetSslCertificatesRequest{SslCertificates: []string{sslCert.SelfLink}}).Do() if err != nil { return err } return gce.waitForGlobalOp(op) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1815-L1824
go
train
// DeleteTargetHTTPSProxy deletes the TargetHTTPSProxy by name.
func (gce *Cloud) DeleteTargetHTTPSProxy(name string) error
// DeleteTargetHTTPSProxy deletes the TargetHTTPSProxy by name. func (gce *Cloud) DeleteTargetHTTPSProxy(name string) error
{ op, err := gce.service.TargetHttpsProxies.Delete(gce.projectID, name).Do() if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { return nil } return err } return gce.waitForGlobalOp(op) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1827-L1830
go
train
// ListTargetHTTPSProxies lists all TargetHTTPSProxies in the project.
func (gce *Cloud) ListTargetHTTPSProxies() (*compute.TargetHttpsProxyList, error)
// ListTargetHTTPSProxies lists all TargetHTTPSProxies in the project. func (gce *Cloud) ListTargetHTTPSProxies() (*compute.TargetHttpsProxyList, error)
{ // TODO: use PageToken to list all not just the first 500 return gce.service.TargetHttpsProxies.List(gce.projectID).Do() }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1835-L1837
go
train
// SSL Certificate management // GetSslCertificate returns the SslCertificate by name.
func (gce *Cloud) GetSslCertificate(name string) (*compute.SslCertificate, error)
// SSL Certificate management // GetSslCertificate returns the SslCertificate by name. func (gce *Cloud) GetSslCertificate(name string) (*compute.SslCertificate, error)
{ return gce.service.SslCertificates.Get(gce.projectID, name).Do() }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1840-L1849
go
train
// CreateSslCertificate creates and returns a SslCertificate.
func (gce *Cloud) CreateSslCertificate(sslCerts *compute.SslCertificate) (*compute.SslCertificate, error)
// CreateSslCertificate creates and returns a SslCertificate. func (gce *Cloud) CreateSslCertificate(sslCerts *compute.SslCertificate) (*compute.SslCertificate, error)
{ op, err := gce.service.SslCertificates.Insert(gce.projectID, sslCerts).Do() if err != nil { return nil, err } if err = gce.waitForGlobalOp(op); err != nil { return nil, err } return gce.GetSslCertificate(sslCerts.Name) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1864-L1867
go
train
// ListSslCertificates lists all SslCertificates in the project.
func (gce *Cloud) ListSslCertificates() (*compute.SslCertificateList, error)
// ListSslCertificates lists all SslCertificates in the project. func (gce *Cloud) ListSslCertificates() (*compute.SslCertificateList, error)
{ // TODO: use PageToken to list all not just the first 500 return gce.service.SslCertificates.List(gce.projectID).Do() }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1873-L1889
go
train
// GlobalForwardingRule management // CreateGlobalForwardingRule creates and returns a GlobalForwardingRule that points to the given TargetHttp(s)Proxy. // targetProxyLink is the SelfLink of a TargetHttp(s)Proxy.
func (gce *Cloud) CreateGlobalForwardingRule(targetProxyLink, ip, name, portRange string) (*compute.ForwardingRule, error)
// GlobalForwardingRule management // CreateGlobalForwardingRule creates and returns a GlobalForwardingRule that points to the given TargetHttp(s)Proxy. // targetProxyLink is the SelfLink of a TargetHttp(s)Proxy. func (gce *Cloud) CreateGlobalForwardingRule(targetProxyLink, ip, name, portRange string) (*compute.ForwardingRule, error)
{ rule := &compute.ForwardingRule{ Name: name, IPAddress: ip, Target: targetProxyLink, PortRange: portRange, IPProtocol: "TCP", } op, err := gce.service.GlobalForwardingRules.Insert(gce.projectID, rule).Do() if err != nil { return nil, err } if err = gce.waitForGlobalOp(op); err != nil { return nil, err } return gce.GetGlobalForwardingRule(name) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1893-L1899
go
train
// SetProxyForGlobalForwardingRule links the given TargetHttp(s)Proxy with the given GlobalForwardingRule. // targetProxyLink is the SelfLink of a TargetHttp(s)Proxy.
func (gce *Cloud) SetProxyForGlobalForwardingRule(fw *compute.ForwardingRule, targetProxyLink string) error
// SetProxyForGlobalForwardingRule links the given TargetHttp(s)Proxy with the given GlobalForwardingRule. // targetProxyLink is the SelfLink of a TargetHttp(s)Proxy. func (gce *Cloud) SetProxyForGlobalForwardingRule(fw *compute.ForwardingRule, targetProxyLink string) error
{ op, err := gce.service.GlobalForwardingRules.SetTarget(gce.projectID, fw.Name, &compute.TargetReference{Target: targetProxyLink}).Do() if err != nil { return err } return gce.waitForGlobalOp(op) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1914-L1916
go
train
// GetGlobalForwardingRule returns the GlobalForwardingRule by name.
func (gce *Cloud) GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error)
// GetGlobalForwardingRule returns the GlobalForwardingRule by name. func (gce *Cloud) GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error)
{ return gce.service.GlobalForwardingRules.Get(gce.projectID, name).Do() }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/gce/gce.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/gce/gce.go#L1919-L1922
go
train
// ListGlobalForwardingRules lists all GlobalForwardingRules in the project.
func (gce *Cloud) ListGlobalForwardingRules() (*compute.ForwardingRuleList, error)
// ListGlobalForwardingRules lists all GlobalForwardingRules in the project. func (gce *Cloud) ListGlobalForwardingRules() (*compute.ForwardingRuleList, error)
{ // TODO: use PageToken to list all not just the first 500 return gce.service.GlobalForwardingRules.List(gce.projectID).Do() }