repo
stringlengths
5
67
sha
stringlengths
40
40
path
stringlengths
4
234
url
stringlengths
85
339
language
stringclasses
6 values
split
stringclasses
3 values
doc
stringlengths
3
51.2k
sign
stringlengths
5
8.01k
problem
stringlengths
13
51.2k
output
stringlengths
0
3.87M
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1388-L1406
go
train
// Gets the full information about this volume from the EC2 API
func (d *awsDisk) describeVolume() (*ec2.Volume, error)
// Gets the full information about this volume from the EC2 API func (d *awsDisk) describeVolume() (*ec2.Volume, error)
{ volumeID := d.awsID request := &ec2.DescribeVolumesInput{ VolumeIds: []*string{volumeID.awsString()}, } volumes, err := d.ec2.DescribeVolumes(request) if err != nil { return nil, fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err) } if len(volumes) == 0 { return nil, fmt.Errorf("no volumes found for volume %q", volumeID) } if len(volumes) > 1 { return nil, fmt.Errorf("multiple volumes found for volume %q", volumeID) } return volumes[0], nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1410-L1466
go
train
// waitForAttachmentStatus polls until the attachment status is the expected value // On success, it returns the last attachment state.
func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, error)
// waitForAttachmentStatus polls until the attachment status is the expected value // On success, it returns the last attachment state. func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, error)
{ backoff := wait.Backoff{ Duration: volumeAttachmentStatusInitialDelay, Factor: volumeAttachmentStatusFactor, Steps: volumeAttachmentStatusSteps, } // Because of rate limiting, we often see errors from describeVolume // So we tolerate a limited number of failures. // But once we see more than 10 errors in a row, we return the error describeErrorCount := 0 var attachment *ec2.VolumeAttachment err := wait.ExponentialBackoff(backoff, func() (bool, error) { info, err := d.describeVolume() if err != nil { describeErrorCount++ if describeErrorCount > volumeAttachmentStatusConsecutiveErrorLimit { // report the error return false, err } glog.Warningf("Ignoring error from describe volume; will retry: %q", err) return false, nil } describeErrorCount = 0 if len(info.Attachments) > 1 { // Shouldn't happen; log so we know if it is glog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info) } attachmentStatus := "" for _, a := range info.Attachments { if attachmentStatus != "" { // Shouldn't happen; log so we know if it is glog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info) } if a.State != nil { attachment = a attachmentStatus = *a.State } else { // Shouldn't happen; log so we know if it is glog.Warningf("Ignoring nil attachment state for volume %q: %v", d.awsID, a) } } if attachmentStatus == "" { attachmentStatus = "detached" } if attachmentStatus == status { // Attachment is in requested state, finish waiting return true, nil } // continue waiting glog.V(2).Infof("Waiting for volume %q state: actual=%s, desired=%s", d.awsID, attachmentStatus, status) return false, nil }) return attachment, err }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1469-L1484
go
train
// Deletes the EBS disk
func (d *awsDisk) deleteVolume() (bool, error)
// Deletes the EBS disk func (d *awsDisk) deleteVolume() (bool, error)
{ request := &ec2.DeleteVolumeInput{VolumeId: d.awsID.awsString()} _, err := d.ec2.DeleteVolume(request) if err != nil { if awsError, ok := err.(awserr.Error); ok { if awsError.Code() == "InvalidVolume.NotFound" { return false, nil } if awsError.Code() == "VolumeInUse" { return false, volume.NewDeletedVolumeInUseError(err.Error()) } } return false, fmt.Errorf("error deleting EBS volume %q: %v", d.awsID, err) } return true, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1513-L1527
go
train
// Gets the awsInstance with for the node with the specified nodeName, or the 'self' instance if nodeName == ""
func (c *Cloud) getAwsInstance(nodeName types.NodeName) (*awsInstance, error)
// Gets the awsInstance with for the node with the specified nodeName, or the 'self' instance if nodeName == "" func (c *Cloud) getAwsInstance(nodeName types.NodeName) (*awsInstance, error)
{ var awsInstance *awsInstance if nodeName == "" { awsInstance = c.selfAWSInstance } else { instance, err := c.getInstanceByNodeName(nodeName) if err != nil { return nil, err } awsInstance = newAWSInstance(c.ec2, instance) } return awsInstance, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1530-L1614
go
train
// AttachDisk implements Volumes.AttachDisk
func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error)
// AttachDisk implements Volumes.AttachDisk func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error)
{ disk, err := newAWSDisk(c, diskName) if err != nil { return "", err } awsInstance, info, err := c.getFullInstance(nodeName) if err != nil { return "", fmt.Errorf("error finding instance %s: %v", nodeName, err) } if readOnly { // TODO: We could enforce this when we mount the volume (?) // TODO: We could also snapshot the volume and attach copies of it return "", errors.New("AWS volumes cannot be mounted read-only") } // mountDevice will hold the device where we should try to attach the disk var mountDeviceName mountDevice // alreadyAttached is true if we have already called AttachVolume on this disk var alreadyAttached bool // attachEnded is set to true if the attach operation completed // (successfully or not), and is thus no longer in progress attachEnded := false defer func() { if attachEnded { if !c.endAttaching(awsInstance, disk.awsID, mountDeviceName) { glog.Errorf("endAttaching called for disk %q when attach not in progress", disk.awsID) } } }() mountDeviceName, alreadyAttached, err = c.getMountDevice(awsInstance, info, disk.awsID, true) if err != nil { return "", err } // Inside the instance, the mountpoint always looks like /dev/xvdX (?) hostDevice := "/dev/xvd" + string(mountDeviceName) // We are using xvd names (so we are HVM only) // See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html ec2Device := "/dev/xvd" + string(mountDeviceName) if !alreadyAttached { request := &ec2.AttachVolumeInput{ Device: aws.String(ec2Device), InstanceId: aws.String(awsInstance.awsID), VolumeId: disk.awsID.awsString(), } attachResponse, attachErr := c.ec2.AttachVolume(request) if attachErr != nil { attachEnded = true // TODO: Check if the volume was concurrently attached? return "", fmt.Errorf("Error attaching EBS volume %q to instance %q: %v", disk.awsID, awsInstance.awsID, attachErr) } glog.V(2).Infof("AttachVolume volume=%q instance=%q request returned %v", disk.awsID, awsInstance.awsID, attachResponse) } attachment, err := disk.waitForAttachmentStatus("attached") if err != nil { return "", err } // The attach operation has finished attachEnded = true // Double check the attachment to be 100% sure we attached the correct volume at the correct mountpoint // It could happen otherwise that we see the volume attached from a previous/separate AttachVolume call, // which could theoretically be against a different device (or even instance). if attachment == nil { // Impossible? return "", fmt.Errorf("unexpected state: attachment nil after attached %q to %q", diskName, nodeName) } if ec2Device != aws.StringValue(attachment.Device) { return "", fmt.Errorf("disk attachment of %q to %q failed: requested device %q but found %q", diskName, nodeName, ec2Device, aws.StringValue(attachment.Device)) } if awsInstance.awsID != aws.StringValue(attachment.InstanceId) { return "", fmt.Errorf("disk attachment of %q to %q failed: requested instance %q but found %q", diskName, nodeName, awsInstance.awsID, aws.StringValue(attachment.InstanceId)) } return hostDevice, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1617-L1677
go
train
// DetachDisk implements Volumes.DetachDisk
func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error)
// DetachDisk implements Volumes.DetachDisk func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error)
{ disk, err := newAWSDisk(c, diskName) if err != nil { return "", err } awsInstance, info, err := c.getFullInstance(nodeName) if err != nil { if err == cloudprovider.ErrInstanceNotFound { // If instance no longer exists, safe to assume volume is not attached. glog.Warningf( "Instance %q does not exist. DetachDisk will assume disk %q is not attached to it.", nodeName, diskName) return "", nil } return "", err } mountDeviceName, alreadyAttached, err := c.getMountDevice(awsInstance, info, disk.awsID, false) if err != nil { return "", err } if !alreadyAttached { glog.Warningf("DetachDisk called on non-attached disk: %s", diskName) // TODO: Continue? Tolerate non-attached error from the AWS DetachVolume call? } request := ec2.DetachVolumeInput{ InstanceId: &awsInstance.awsID, VolumeId: disk.awsID.awsString(), } response, err := c.ec2.DetachVolume(&request) if err != nil { return "", fmt.Errorf("error detaching EBS volume %q from %q: %v", disk.awsID, awsInstance.awsID, err) } if response == nil { return "", errors.New("no response from DetachVolume") } attachment, err := disk.waitForAttachmentStatus("detached") if err != nil { return "", err } if attachment != nil { // We expect it to be nil, it is (maybe) interesting if it is not glog.V(2).Infof("waitForAttachmentStatus returned non-nil attachment with state=detached: %v", attachment) } if mountDeviceName != "" { c.endAttaching(awsInstance, disk.awsID, mountDeviceName) // We don't check the return value - we don't really expect the attachment to have been // in progress, though it might have been } hostDevicePath := "/dev/xvd" + string(mountDeviceName) return hostDevicePath, err }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1680-L1760
go
train
// CreateDisk implements Volumes.CreateDisk
func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (KubernetesVolumeID, error)
// CreateDisk implements Volumes.CreateDisk func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (KubernetesVolumeID, error)
{ allZones, err := c.getCandidateZonesForDynamicVolume() if err != nil { return "", fmt.Errorf("error querying for all zones: %v", err) } createAZ := volumeOptions.AvailabilityZone if createAZ == "" { createAZ = util.ChooseZoneForVolume(allZones, volumeOptions.PVCName) } var createType string var iops int64 switch volumeOptions.VolumeType { case VolumeTypeGP2, VolumeTypeSC1, VolumeTypeST1: createType = volumeOptions.VolumeType case VolumeTypeIO1: // See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html // for IOPS constraints. AWS will throw an error if IOPS per GB gets out // of supported bounds, no need to check it here. createType = volumeOptions.VolumeType iops = int64(volumeOptions.CapacityGB * volumeOptions.IOPSPerGB) // Cap at min/max total IOPS, AWS would throw an error if it gets too // low/high. if iops < MinTotalIOPS { iops = MinTotalIOPS } if iops > MaxTotalIOPS { iops = MaxTotalIOPS } case "": createType = DefaultVolumeType default: return "", fmt.Errorf("invalid AWS VolumeType %q", volumeOptions.VolumeType) } // TODO: Should we tag this with the cluster id (so it gets deleted when the cluster does?) request := &ec2.CreateVolumeInput{} request.AvailabilityZone = aws.String(createAZ) request.Size = aws.Int64(int64(volumeOptions.CapacityGB)) request.VolumeType = aws.String(createType) request.Encrypted = aws.Bool(volumeOptions.Encrypted) if len(volumeOptions.KmsKeyID) > 0 { request.KmsKeyId = aws.String(volumeOptions.KmsKeyID) request.Encrypted = aws.Bool(true) } if iops > 0 { request.Iops = aws.Int64(iops) } if len(volumeOptions.SnapshotID) > 0 { request.SnapshotId = aws.String(volumeOptions.SnapshotID) } response, err := c.ec2.CreateVolume(request) if err != nil { return "", err } awsID := awsVolumeID(aws.StringValue(response.VolumeId)) if awsID == "" { return "", fmt.Errorf("VolumeID was not returned by CreateVolume") } volumeName := KubernetesVolumeID("aws://" + aws.StringValue(response.AvailabilityZone) + "/" + string(awsID)) // apply tags if err := c.tagging.createTags(c.ec2, string(awsID), ResourceLifecycleOwned, volumeOptions.Tags); err != nil { // delete the volume and hope it succeeds _, delerr := c.DeleteDisk(volumeName) if delerr != nil { // delete did not succeed, we have a stray volume! return "", fmt.Errorf("error tagging volume %s, could not delete the volume: %v", volumeName, delerr) } return "", fmt.Errorf("error tagging volume %s: %v", volumeName, err) } return volumeName, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1763-L1769
go
train
// DeleteDisk implements Volumes.DeleteDisk
func (c *Cloud) DeleteDisk(volumeName KubernetesVolumeID) (bool, error)
// DeleteDisk implements Volumes.DeleteDisk func (c *Cloud) DeleteDisk(volumeName KubernetesVolumeID) (bool, error)
{ awsDisk, err := newAWSDisk(c, volumeName) if err != nil { return false, err } return awsDisk.deleteVolume() }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1772-L1795
go
train
// GetVolumeLabels implements Volumes.GetVolumeLabels
func (c *Cloud) GetVolumeLabels(volumeName KubernetesVolumeID) (map[string]string, error)
// GetVolumeLabels implements Volumes.GetVolumeLabels func (c *Cloud) GetVolumeLabels(volumeName KubernetesVolumeID) (map[string]string, error)
{ awsDisk, err := newAWSDisk(c, volumeName) if err != nil { return nil, err } info, err := awsDisk.describeVolume() if err != nil { return nil, err } labels := make(map[string]string) az := aws.StringValue(info.AvailabilityZone) if az == "" { return nil, fmt.Errorf("volume did not have AZ information: %q", *info.VolumeId) } labels[apis.LabelZoneFailureDomain] = az region, err := azToRegion(az) if err != nil { return nil, err } labels[apis.LabelZoneRegion] = region return labels, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1814-L1841
go
train
// DiskIsAttached implements Volumes.DiskIsAttached
func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeName) (bool, error)
// DiskIsAttached implements Volumes.DiskIsAttached func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeName) (bool, error)
{ _, instance, err := c.getFullInstance(nodeName) if err != nil { if err == cloudprovider.ErrInstanceNotFound { // If instance no longer exists, safe to assume volume is not attached. glog.Warningf( "Instance %q does not exist. DiskIsAttached will assume disk %q is not attached to it.", nodeName, diskName) return false, nil } return false, err } diskID, err := diskName.mapToAWSVolumeID() if err != nil { return false, fmt.Errorf("error mapping volume spec %q to aws id: %v", diskName, err) } for _, blockDevice := range instance.BlockDeviceMappings { id := awsVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId)) if id == diskID { return true, nil } } return false, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1844-L1908
go
train
// DisksAreAttached checks whether disks are attached
func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolumeID) (map[types.NodeName]map[KubernetesVolumeID]bool, error)
// DisksAreAttached checks whether disks are attached func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolumeID) (map[types.NodeName]map[KubernetesVolumeID]bool, error)
{ attached := make(map[types.NodeName]map[KubernetesVolumeID]bool) if len(nodeDisks) == 0 { return attached, nil } dnsNameSlice := []string{} for nodeName, diskNames := range nodeDisks { for _, diskName := range diskNames { setNodeDisk(attached, diskName, nodeName, false) } dnsNameSlice = append(dnsNameSlice, mapNodeNameToPrivateDNSName(nodeName)) } awsInstances, err := c.getInstancesByNodeNames(dnsNameSlice) if err != nil { // When there is an error fetching instance information // it is safer to return nil and let volume information not be touched. return nil, err } if len(awsInstances) == 0 { glog.V(2).Infof("DisksAreAttached will assume no disks are attached to any node on AWS cluster.") return attached, nil } awsInstanceMap := make(map[types.NodeName]*ec2.Instance) for _, awsInstance := range awsInstances { awsInstanceMap[mapInstanceToNodeName(awsInstance)] = awsInstance } // Note that we check that the volume is attached to the correct node, not that it is attached to _a_ node for nodeName, diskNames := range nodeDisks { awsInstance := awsInstanceMap[nodeName] if awsInstance == nil { // If instance no longer exists, safe to assume volume is not attached. glog.Warningf( "Node %q does not exist. DisksAreAttached will assume disks %v are not attached to it.", nodeName, diskNames) continue } idToDiskName := make(map[awsVolumeID]KubernetesVolumeID) for _, diskName := range diskNames { volumeID, err := diskName.mapToAWSVolumeID() if err != nil { return nil, fmt.Errorf("error mapping volume spec %q to aws id: %v", diskName, err) } idToDiskName[volumeID] = diskName } for _, blockDevice := range awsInstance.BlockDeviceMappings { volumeID := awsVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId)) diskName, found := idToDiskName[volumeID] if found { // Disk is still attached to node setNodeDisk(attached, diskName, nodeName, true) } } } return attached, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1911-L1937
go
train
// CreateSnapshot creates an EBS volume snapshot
func (c *Cloud) CreateSnapshot(snapshotOptions *SnapshotOptions) (snapshotID string, status string, err error)
// CreateSnapshot creates an EBS volume snapshot func (c *Cloud) CreateSnapshot(snapshotOptions *SnapshotOptions) (snapshotID string, status string, err error)
{ request := &ec2.CreateSnapshotInput{} request.VolumeId = aws.String(snapshotOptions.VolumeID) request.DryRun = aws.Bool(false) descriptions := "Created by Kubernetes for volume " + snapshotOptions.VolumeID request.Description = aws.String(descriptions) res, err := c.ec2.CreateSnapshot(request) if err != nil { return "", "", err } if res == nil { return "", "", fmt.Errorf("nil CreateSnapshotResponse") } if snapshotOptions.Tags != nil { awsID := awsVolumeID(aws.StringValue(res.SnapshotId)) // apply tags if err := c.tagging.createTags(c.ec2, string(awsID), ResourceLifecycleOwned, *snapshotOptions.Tags); err != nil { _, delerr := c.DeleteSnapshot(*res.SnapshotId) if delerr != nil { return "", "", fmt.Errorf("error tagging snapshot %s, could not delete the snapshot: %v", *res.SnapshotId, delerr) } return "", "", fmt.Errorf("error tagging snapshot %s: %v", *res.SnapshotId, err) } } return *res.SnapshotId, *res.State, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1940-L1949
go
train
// DeleteSnapshot deletes an EBS volume snapshot
func (c *Cloud) DeleteSnapshot(snapshotID string) (bool, error)
// DeleteSnapshot deletes an EBS volume snapshot func (c *Cloud) DeleteSnapshot(snapshotID string) (bool, error)
{ request := &ec2.DeleteSnapshotInput{} request.SnapshotId = aws.String(snapshotID) _, err := c.ec2.DeleteSnapshot(request) if err != nil { return false, err } return true, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1952-L1978
go
train
// DescribeSnapshot returns the status of the snapshot
func (c *Cloud) DescribeSnapshot(snapshotID string) (status string, isCompleted bool, err error)
// DescribeSnapshot returns the status of the snapshot func (c *Cloud) DescribeSnapshot(snapshotID string) (status string, isCompleted bool, err error)
{ request := &ec2.DescribeSnapshotsInput{ SnapshotIds: []*string{ aws.String(snapshotID), }, } result, err := c.ec2.DescribeSnapshots(request) if err != nil { return "", false, err } if len(result) != 1 { return "", false, fmt.Errorf("wrong result from DescribeSnapshots: %#v", result) } if result[0].State == nil { return "", false, fmt.Errorf("missing state from DescribeSnapshots: %#v", result) } if *result[0].State == ec2.SnapshotStateCompleted { return *result[0].State, true, nil } if *result[0].State == ec2.SnapshotStateError { return *result[0].State, false, fmt.Errorf("snapshot state is error: %s", *result[0].StateMessage) } if *result[0].State == ec2.SnapshotStatePending { return *result[0].State, false, nil } return *result[0].State, false, fmt.Errorf("unknown state") }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L1981-L2004
go
train
// FindSnapshot returns the found snapshot
func (c *Cloud) FindSnapshot(tags map[string]string) ([]string, []string, error)
// FindSnapshot returns the found snapshot func (c *Cloud) FindSnapshot(tags map[string]string) ([]string, []string, error)
{ request := &ec2.DescribeSnapshotsInput{} for k, v := range tags { filter := &ec2.Filter{} filter.SetName(k) filter.SetValues([]*string{&v}) request.Filters = append(request.Filters, filter) } result, err := c.ec2.DescribeSnapshots(request) if err != nil { return nil, nil, err } var snapshotIDs, statuses []string for _, snapshot := range result { id := *snapshot.SnapshotId status := *snapshot.State glog.Infof("found %s, status %s", id, status) snapshotIDs = append(snapshotIDs, id) statuses = append(statuses, status) } return snapshotIDs, statuses, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L2054-L2075
go
train
// Retrieves the specified security group from the AWS API, or returns nil if not found
func (c *Cloud) findSecurityGroup(securityGroupID string) (*ec2.SecurityGroup, error)
// Retrieves the specified security group from the AWS API, or returns nil if not found func (c *Cloud) findSecurityGroup(securityGroupID string) (*ec2.SecurityGroup, error)
{ describeSecurityGroupsRequest := &ec2.DescribeSecurityGroupsInput{ GroupIds: []*string{&securityGroupID}, } // We don't apply our tag filters because we are retrieving by ID groups, err := c.ec2.DescribeSecurityGroups(describeSecurityGroupsRequest) if err != nil { glog.Warningf("Error retrieving security group: %q", err) return nil, err } if len(groups) == 0 { return nil, nil } if len(groups) != 1 { // This should not be possible - ids should be unique return nil, fmt.Errorf("multiple security groups found with same id %q", securityGroupID) } group := groups[0] return group, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L2156-L2221
go
train
// Makes sure the security group ingress is exactly the specified permissions // Returns true if and only if changes were made // The security group must already exist
func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPermissionSet) (bool, error)
// Makes sure the security group ingress is exactly the specified permissions // Returns true if and only if changes were made // The security group must already exist func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPermissionSet) (bool, error)
{ group, err := c.findSecurityGroup(securityGroupID) if err != nil { glog.Warning("Error retrieving security group", err) return false, err } if group == nil { return false, fmt.Errorf("security group not found: %s", securityGroupID) } glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) actual := NewIPPermissionSet(group.IpPermissions...) // EC2 groups rules together, for example combining: // // { Port=80, Range=[A] } and { Port=80, Range=[B] } // // into { Port=80, Range=[A,B] } // // We have to ungroup them, because otherwise the logic becomes really // complicated, and also because if we have Range=[A,B] and we try to // add Range=[A] then EC2 complains about a duplicate rule. permissions = permissions.Ungroup() actual = actual.Ungroup() remove := actual.Difference(permissions) add := permissions.Difference(actual) if add.Len() == 0 && remove.Len() == 0 { return false, nil } // TODO: There is a limit in VPC of 100 rules per security group, so we // probably should try grouping or combining to fit under this limit. // But this is only used on the ELB security group currently, so it // would require (ports * CIDRS) > 100. Also, it isn't obvious exactly // how removing single permissions from compound rules works, and we // don't want to accidentally open more than intended while we're // applying changes. if add.Len() != 0 { glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, add.List()) request := &ec2.AuthorizeSecurityGroupIngressInput{} request.GroupId = &securityGroupID request.IpPermissions = add.List() _, err = c.ec2.AuthorizeSecurityGroupIngress(request) if err != nil { return false, fmt.Errorf("error authorizing security group ingress: %v", err) } } if remove.Len() != 0 { glog.V(2).Infof("Remove security group ingress: %s %v", securityGroupID, remove.List()) request := &ec2.RevokeSecurityGroupIngressInput{} request.GroupId = &securityGroupID request.IpPermissions = remove.List() _, err = c.ec2.RevokeSecurityGroupIngress(request) if err != nil { return false, fmt.Errorf("error revoking security group ingress: %v", err) } } return true, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L2466-L2534
go
train
// Finds the subnets to use for an ELB we are creating. // Normal (Internet-facing) ELBs must use public subnets, so we skip private subnets. // Internal ELBs can use public or private subnets, but if we have a private subnet we should prefer that.
func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error)
// Finds the subnets to use for an ELB we are creating. // Normal (Internet-facing) ELBs must use public subnets, so we skip private subnets. // Internal ELBs can use public or private subnets, but if we have a private subnet we should prefer that. func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error)
{ vpcIDFilter := newEc2Filter("vpc-id", c.vpcID) subnets, err := c.findSubnets() if err != nil { return nil, err } rRequest := &ec2.DescribeRouteTablesInput{} rRequest.Filters = []*ec2.Filter{vpcIDFilter} rt, err := c.ec2.DescribeRouteTables(rRequest) if err != nil { return nil, fmt.Errorf("error describe route table: %v", err) } subnetsByAZ := make(map[string]*ec2.Subnet) for _, subnet := range subnets { az := aws.StringValue(subnet.AvailabilityZone) id := aws.StringValue(subnet.SubnetId) if az == "" || id == "" { glog.Warningf("Ignoring subnet with empty az/id: %v", subnet) continue } isPublic, err := isSubnetPublic(rt, id) if err != nil { return nil, err } if !internalELB && !isPublic { glog.V(2).Infof("Ignoring private subnet for public ELB %q", id) continue } existing := subnetsByAZ[az] if existing == nil { subnetsByAZ[az] = subnet continue } // Try to break the tie using a tag var tagName string if internalELB { tagName = TagNameSubnetInternalELB } else { tagName = TagNameSubnetPublicELB } _, existingHasTag := findTag(existing.Tags, tagName) _, subnetHasTag := findTag(subnet.Tags, tagName) if existingHasTag != subnetHasTag { if subnetHasTag { subnetsByAZ[az] = subnet } continue } // TODO: Should this be an error? glog.Warningf("Found multiple subnets in AZ %q; making arbitrary choice between subnets %q and %q", az, *existing.SubnetId, *subnet.SubnetId) continue } var subnetIDs []string for _, subnet := range subnetsByAZ { subnetIDs = append(subnetIDs, aws.StringValue(subnet.SubnetId)) } return subnetIDs, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L2653-L2923
go
train
// EnsureLoadBalancer implements LoadBalancer.EnsureLoadBalancer
func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error)
// EnsureLoadBalancer implements LoadBalancer.EnsureLoadBalancer func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error)
{ annotations := apiService.Annotations glog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, c.region, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodes, annotations) if apiService.Spec.SessionAffinity != v1.ServiceAffinityNone { // ELB supports sticky sessions, but only when configured for HTTP/HTTPS return nil, fmt.Errorf("unsupported load balancer affinity: %v", apiService.Spec.SessionAffinity) } if len(apiService.Spec.Ports) == 0 { return nil, fmt.Errorf("requested load balancer with no ports") } // Figure out what mappings we want on the load balancer listeners := []*elb.Listener{} portList := getPortSets(annotations[ServiceAnnotationLoadBalancerSSLPorts]) for _, port := range apiService.Spec.Ports { if port.Protocol != v1.ProtocolTCP { return nil, fmt.Errorf("Only TCP LoadBalancer is supported for AWS ELB") } if port.NodePort == 0 { glog.Errorf("Ignoring port without NodePort defined: %v", port) continue } listener, err := buildListener(port, annotations, portList) if err != nil { return nil, err } listeners = append(listeners, listener) } if apiService.Spec.LoadBalancerIP != "" { return nil, fmt.Errorf("LoadBalancerIP cannot be specified for AWS ELB") } instances, err := c.getInstancesByNodeNamesCached(nodeNames(nodes)) if err != nil { return nil, err } sourceRanges, err := service.GetLoadBalancerSourceRanges(apiService) if err != nil { return nil, err } // Determine if this is tagged as an Internal ELB internalELB := false internalAnnotation := apiService.Annotations[ServiceAnnotationLoadBalancerInternal] if internalAnnotation != "" { if internalAnnotation != "0.0.0.0/0" { return nil, fmt.Errorf("annotation %q=%q detected, but the only value supported currently is 0.0.0.0/0", ServiceAnnotationLoadBalancerInternal, internalAnnotation) } if !service.IsAllowAll(sourceRanges) { // TODO: Unify the two annotations return nil, fmt.Errorf("source-range annotation cannot be combined with the internal-elb annotation") } internalELB = true } // Determine if we need to set the Proxy protocol policy proxyProtocol := false proxyProtocolAnnotation := apiService.Annotations[ServiceAnnotationLoadBalancerProxyProtocol] if proxyProtocolAnnotation != "" { if proxyProtocolAnnotation != "*" { return nil, fmt.Errorf("annotation %q=%q detected, but the only value supported currently is '*'", ServiceAnnotationLoadBalancerProxyProtocol, proxyProtocolAnnotation) } proxyProtocol = true } // Some load balancer attributes are required, so defaults are set. These can be overridden by annotations. loadBalancerAttributes := &elb.LoadBalancerAttributes{ AccessLog: &elb.AccessLog{Enabled: aws.Bool(false)}, ConnectionDraining: &elb.ConnectionDraining{Enabled: aws.Bool(false)}, ConnectionSettings: &elb.ConnectionSettings{IdleTimeout: aws.Int64(60)}, CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{Enabled: aws.Bool(false)}, } // Determine if an access log emit interval has been specified accessLogEmitIntervalAnnotation := annotations[ServiceAnnotationLoadBalancerAccessLogEmitInterval] if accessLogEmitIntervalAnnotation != "" { accessLogEmitInterval, parseErr := strconv.ParseInt(accessLogEmitIntervalAnnotation, 10, 64) if parseErr != nil { return nil, fmt.Errorf("error parsing service annotation: %s=%s", ServiceAnnotationLoadBalancerAccessLogEmitInterval, accessLogEmitIntervalAnnotation, ) } loadBalancerAttributes.AccessLog.EmitInterval = &accessLogEmitInterval } // Determine if access log enabled/disabled has been specified accessLogEnabledAnnotation := annotations[ServiceAnnotationLoadBalancerAccessLogEnabled] if accessLogEnabledAnnotation != "" { accessLogEnabled, parseErr := strconv.ParseBool(accessLogEnabledAnnotation) if parseErr != nil { return nil, fmt.Errorf("error parsing service annotation: %s=%s", ServiceAnnotationLoadBalancerAccessLogEnabled, accessLogEnabledAnnotation, ) } loadBalancerAttributes.AccessLog.Enabled = &accessLogEnabled } // Determine if access log s3 bucket name has been specified accessLogS3BucketNameAnnotation := annotations[ServiceAnnotationLoadBalancerAccessLogS3BucketName] if accessLogS3BucketNameAnnotation != "" { loadBalancerAttributes.AccessLog.S3BucketName = &accessLogS3BucketNameAnnotation } // Determine if access log s3 bucket prefix has been specified accessLogS3BucketPrefixAnnotation := annotations[ServiceAnnotationLoadBalancerAccessLogS3BucketPrefix] if accessLogS3BucketPrefixAnnotation != "" { loadBalancerAttributes.AccessLog.S3BucketPrefix = &accessLogS3BucketPrefixAnnotation } // Determine if connection draining enabled/disabled has been specified connectionDrainingEnabledAnnotation := annotations[ServiceAnnotationLoadBalancerConnectionDrainingEnabled] if connectionDrainingEnabledAnnotation != "" { connectionDrainingEnabled, parseErr := strconv.ParseBool(connectionDrainingEnabledAnnotation) if parseErr != nil { return nil, fmt.Errorf("error parsing service annotation: %s=%s", ServiceAnnotationLoadBalancerConnectionDrainingEnabled, connectionDrainingEnabledAnnotation, ) } loadBalancerAttributes.ConnectionDraining.Enabled = &connectionDrainingEnabled } // Determine if connection draining timeout has been specified connectionDrainingTimeoutAnnotation := annotations[ServiceAnnotationLoadBalancerConnectionDrainingTimeout] if connectionDrainingTimeoutAnnotation != "" { connectionDrainingTimeout, parseErr := strconv.ParseInt(connectionDrainingTimeoutAnnotation, 10, 64) if parseErr != nil { return nil, fmt.Errorf("error parsing service annotation: %s=%s", ServiceAnnotationLoadBalancerConnectionDrainingTimeout, connectionDrainingTimeoutAnnotation, ) } loadBalancerAttributes.ConnectionDraining.Timeout = &connectionDrainingTimeout } // Determine if connection idle timeout has been specified connectionIdleTimeoutAnnotation := annotations[ServiceAnnotationLoadBalancerConnectionIdleTimeout] if connectionIdleTimeoutAnnotation != "" { connectionIdleTimeout, parseErr := strconv.ParseInt(connectionIdleTimeoutAnnotation, 10, 64) if parseErr != nil { return nil, fmt.Errorf("error parsing service annotation: %s=%s", ServiceAnnotationLoadBalancerConnectionIdleTimeout, connectionIdleTimeoutAnnotation, ) } loadBalancerAttributes.ConnectionSettings.IdleTimeout = &connectionIdleTimeout } // Determine if cross zone load balancing enabled/disabled has been specified crossZoneLoadBalancingEnabledAnnotation := annotations[ServiceAnnotationLoadBalancerCrossZoneLoadBalancingEnabled] if crossZoneLoadBalancingEnabledAnnotation != "" { crossZoneLoadBalancingEnabled, parseErr := strconv.ParseBool(crossZoneLoadBalancingEnabledAnnotation) if parseErr != nil { return nil, fmt.Errorf("error parsing service annotation: %s=%s", ServiceAnnotationLoadBalancerCrossZoneLoadBalancingEnabled, crossZoneLoadBalancingEnabledAnnotation, ) } loadBalancerAttributes.CrossZoneLoadBalancing.Enabled = &crossZoneLoadBalancingEnabled } // Find the subnets that the ELB will live in subnetIDs, err := c.findELBSubnets(internalELB) if err != nil { glog.Error("Error listing subnets in VPC: ", err) return nil, err } // Bail out early if there are no subnets if len(subnetIDs) == 0 { return nil, fmt.Errorf("could not find any suitable subnets for creating the ELB") } loadBalancerName := cloudprovider.GetLoadBalancerName(apiService) serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name} // Create a security group for the load balancer var securityGroupID string { sgName := "k8s-elb-" + loadBalancerName sgDescription := fmt.Sprintf("Security group for Kubernetes ELB %s (%v)", loadBalancerName, serviceName) securityGroupID, err = c.ensureSecurityGroup(sgName, sgDescription) if err != nil { glog.Error("Error creating load balancer security group: ", err) return nil, err } ec2SourceRanges := []*ec2.IpRange{} for _, sourceRange := range sourceRanges.StringSlice() { ec2SourceRanges = append(ec2SourceRanges, &ec2.IpRange{CidrIp: aws.String(sourceRange)}) } permissions := NewIPPermissionSet() for _, port := range apiService.Spec.Ports { portInt64 := int64(port.Port) protocol := strings.ToLower(string(port.Protocol)) permission := &ec2.IpPermission{} permission.FromPort = &portInt64 permission.ToPort = &portInt64 permission.IpRanges = ec2SourceRanges permission.IpProtocol = &protocol permissions.Insert(permission) } // Allow ICMP fragmentation packets, important for MTU discovery { permission := &ec2.IpPermission{ IpProtocol: aws.String("icmp"), FromPort: aws.Int64(3), ToPort: aws.Int64(4), IpRanges: []*ec2.IpRange{{CidrIp: aws.String("0.0.0.0/0")}}, } permissions.Insert(permission) } _, err = c.setSecurityGroupIngress(securityGroupID, permissions) if err != nil { return nil, err } } securityGroupIDs := []string{securityGroupID} // Build the load balancer itself loadBalancer, err := c.ensureLoadBalancer( serviceName, loadBalancerName, listeners, subnetIDs, securityGroupIDs, internalELB, proxyProtocol, loadBalancerAttributes, ) if err != nil { return nil, err } err = c.ensureLoadBalancerHealthCheck(loadBalancer, listeners) if err != nil { return nil, err } err = c.updateInstanceSecurityGroupsForLoadBalancer(loadBalancer, instances) if err != nil { glog.Warningf("Error opening ingress rules for the load balancer to the instances: %v", err) return nil, err } err = c.ensureLoadBalancerInstances(orEmpty(loadBalancer.LoadBalancerName), loadBalancer.Instances, instances) if err != nil { glog.Warningf("Error registering instances with the load balancer: %v", err) return nil, err } glog.V(1).Infof("Loadbalancer %s (%v) has DNS name %s", loadBalancerName, serviceName, orEmpty(loadBalancer.DNSName)) // TODO: Wait for creation? status := toStatus(loadBalancer) return status, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L2926-L2939
go
train
// GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer
func (c *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error)
// GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer func (c *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error)
{ loadBalancerName := cloudprovider.GetLoadBalancerName(service) lb, err := c.describeLoadBalancer(loadBalancerName) if err != nil { return nil, false, err } if lb == nil { return nil, false, nil } status := toStatus(lb) return status, true, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L2957-L2995
go
train
// Returns the first security group for an instance, or nil // We only create instances with one security group, so we don't expect multiple security groups. // However, if there are multiple security groups, we will choose the one tagged with our cluster filter. // Otherwise we will return an error.
func findSecurityGroupForInstance(instance *ec2.Instance, taggedSecurityGroups map[string]*ec2.SecurityGroup) (*ec2.GroupIdentifier, error)
// Returns the first security group for an instance, or nil // We only create instances with one security group, so we don't expect multiple security groups. // However, if there are multiple security groups, we will choose the one tagged with our cluster filter. // Otherwise we will return an error. func findSecurityGroupForInstance(instance *ec2.Instance, taggedSecurityGroups map[string]*ec2.SecurityGroup) (*ec2.GroupIdentifier, error)
{ instanceID := aws.StringValue(instance.InstanceId) var tagged []*ec2.GroupIdentifier var untagged []*ec2.GroupIdentifier for _, group := range instance.SecurityGroups { groupID := aws.StringValue(group.GroupId) if groupID == "" { glog.Warningf("Ignoring security group without id for instance %q: %v", instanceID, group) continue } _, isTagged := taggedSecurityGroups[groupID] if isTagged { tagged = append(tagged, group) } else { untagged = append(untagged, group) } } if len(tagged) > 0 { // We create instances with one SG // If users create multiple SGs, they must tag one of them as being k8s owned if len(tagged) != 1 { return nil, fmt.Errorf("Multiple tagged security groups found for instance %s; ensure only the k8s security group is tagged", instanceID) } return tagged[0], nil } if len(untagged) > 0 { // For back-compat, we will allow a single untagged SG if len(untagged) != 1 { return nil, fmt.Errorf("Multiple untagged security groups found for instance %s; ensure the k8s security group is tagged", instanceID) } return untagged[0], nil } glog.Warningf("No security group found for instance %q", instanceID) return nil, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L3024-L3153
go
train
// Open security group ingress rules on the instances so that the load balancer can talk to them // Will also remove any security groups ingress rules for the load balancer that are _not_ needed for allInstances
func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancerDescription, allInstances []*ec2.Instance) error
// Open security group ingress rules on the instances so that the load balancer can talk to them // Will also remove any security groups ingress rules for the load balancer that are _not_ needed for allInstances func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancerDescription, allInstances []*ec2.Instance) error
{ if c.cfg.Global.DisableSecurityGroupIngress { return nil } // Determine the load balancer security group id loadBalancerSecurityGroupID := "" for _, securityGroup := range lb.SecurityGroups { if isNilOrEmpty(securityGroup) { continue } if loadBalancerSecurityGroupID != "" { // We create LBs with one SG glog.Warningf("Multiple security groups for load balancer: %q", orEmpty(lb.LoadBalancerName)) } loadBalancerSecurityGroupID = *securityGroup } if loadBalancerSecurityGroupID == "" { return fmt.Errorf("Could not determine security group for load balancer: %s", orEmpty(lb.LoadBalancerName)) } // Get the actual list of groups that allow ingress from the load-balancer var actualGroups []*ec2.SecurityGroup { describeRequest := &ec2.DescribeSecurityGroupsInput{} filters := []*ec2.Filter{ newEc2Filter("ip-permission.group-id", loadBalancerSecurityGroupID), } describeRequest.Filters = c.tagging.addFilters(filters) response, err := c.ec2.DescribeSecurityGroups(describeRequest) if err != nil { return fmt.Errorf("error querying security groups for ELB: %v", err) } for _, sg := range response { if !c.tagging.hasClusterTag(sg.Tags) { continue } actualGroups = append(actualGroups, sg) } } taggedSecurityGroups, err := c.getTaggedSecurityGroups() if err != nil { return fmt.Errorf("error querying for tagged security groups: %v", err) } // Open the firewall from the load balancer to the instance // We don't actually have a trivial way to know in advance which security group the instance is in // (it is probably the node security group, but we don't easily have that). // However, we _do_ have the list of security groups on the instance records. // Map containing the changes we want to make; true to add, false to remove instanceSecurityGroupIds := map[string]bool{} // Scan instances for groups we want open for _, instance := range allInstances { securityGroup, err := findSecurityGroupForInstance(instance, taggedSecurityGroups) if err != nil { return err } if securityGroup == nil { glog.Warning("Ignoring instance without security group: ", orEmpty(instance.InstanceId)) continue } id := aws.StringValue(securityGroup.GroupId) if id == "" { glog.Warningf("found security group without id: %v", securityGroup) continue } instanceSecurityGroupIds[id] = true } // Compare to actual groups for _, actualGroup := range actualGroups { actualGroupID := aws.StringValue(actualGroup.GroupId) if actualGroupID == "" { glog.Warning("Ignoring group without ID: ", actualGroup) continue } adding, found := instanceSecurityGroupIds[actualGroupID] if found && adding { // We don't need to make a change; the permission is already in place delete(instanceSecurityGroupIds, actualGroupID) } else { // This group is not needed by allInstances; delete it instanceSecurityGroupIds[actualGroupID] = false } } for instanceSecurityGroupID, add := range instanceSecurityGroupIds { if add { glog.V(2).Infof("Adding rule for traffic from the load balancer (%s) to instances (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) } else { glog.V(2).Infof("Removing rule for traffic from the load balancer (%s) to instance (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) } sourceGroupID := &ec2.UserIdGroupPair{} sourceGroupID.GroupId = &loadBalancerSecurityGroupID allProtocols := "-1" permission := &ec2.IpPermission{} permission.IpProtocol = &allProtocols permission.UserIdGroupPairs = []*ec2.UserIdGroupPair{sourceGroupID} permissions := []*ec2.IpPermission{permission} if add { changed, err := c.addSecurityGroupIngress(instanceSecurityGroupID, permissions) if err != nil { return err } if !changed { glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } else { changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, permissions) if err != nil { return err } if !changed { glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } } return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L3156-L3249
go
train
// EnsureLoadBalancerDeleted implements LoadBalancer.EnsureLoadBalancerDeleted.
func (c *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error
// EnsureLoadBalancerDeleted implements LoadBalancer.EnsureLoadBalancerDeleted. func (c *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error
{ loadBalancerName := cloudprovider.GetLoadBalancerName(service) lb, err := c.describeLoadBalancer(loadBalancerName) if err != nil { return err } if lb == nil { glog.Info("Load balancer already deleted: ", loadBalancerName) return nil } { // De-authorize the load balancer security group from the instances security group err = c.updateInstanceSecurityGroupsForLoadBalancer(lb, nil) if err != nil { glog.Error("Error deregistering load balancer from instance security groups: ", err) return err } } { // Delete the load balancer itself request := &elb.DeleteLoadBalancerInput{} request.LoadBalancerName = lb.LoadBalancerName _, err = c.elb.DeleteLoadBalancer(request) if err != nil { // TODO: Check if error was because load balancer was concurrently deleted glog.Error("Error deleting load balancer: ", err) return err } } { // Delete the security group(s) for the load balancer // Note that this is annoying: the load balancer disappears from the API immediately, but it is still // deleting in the background. We get a DependencyViolation until the load balancer has deleted itself // Collect the security groups to delete securityGroupIDs := map[string]struct{}{} for _, securityGroupID := range lb.SecurityGroups { if isNilOrEmpty(securityGroupID) { glog.Warning("Ignoring empty security group in ", service.Name) continue } securityGroupIDs[*securityGroupID] = struct{}{} } // Loop through and try to delete them timeoutAt := time.Now().Add(time.Second * 600) for { for securityGroupID := range securityGroupIDs { request := &ec2.DeleteSecurityGroupInput{} request.GroupId = &securityGroupID _, err := c.ec2.DeleteSecurityGroup(request) if err == nil { delete(securityGroupIDs, securityGroupID) } else { ignore := false if awsError, ok := err.(awserr.Error); ok { if awsError.Code() == "DependencyViolation" { glog.V(2).Infof("Ignoring DependencyViolation while deleting load-balancer security group (%s), assuming because LB is in process of deleting", securityGroupID) ignore = true } } if !ignore { return fmt.Errorf("error while deleting load balancer security group (%s): %v", securityGroupID, err) } } } if len(securityGroupIDs) == 0 { glog.V(2).Info("Deleted all security groups for load balancer: ", service.Name) break } if time.Now().After(timeoutAt) { ids := []string{} for id := range securityGroupIDs { ids = append(ids, id) } return fmt.Errorf("timed out deleting ELB: %s. Could not delete security groups %v", service.Name, strings.Join(ids, ",")) } glog.V(2).Info("Waiting for load-balancer to delete so we can delete security groups: ", service.Name) time.Sleep(10 * time.Second) } } return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L3252-L3279
go
train
// UpdateLoadBalancer implements LoadBalancer.UpdateLoadBalancer
func (c *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error
// UpdateLoadBalancer implements LoadBalancer.UpdateLoadBalancer func (c *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error
{ instances, err := c.getInstancesByNodeNamesCached(nodeNames(nodes)) if err != nil { return err } loadBalancerName := cloudprovider.GetLoadBalancerName(service) lb, err := c.describeLoadBalancer(loadBalancerName) if err != nil { return err } if lb == nil { return fmt.Errorf("Load balancer not found") } err = c.ensureLoadBalancerInstances(orEmpty(lb.LoadBalancerName), lb.Instances, instances) if err != nil { return nil } err = c.updateInstanceSecurityGroupsForLoadBalancer(lb, instances) if err != nil { return err } return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L3282-L3296
go
train
// Returns the instance with the specified ID
func (c *Cloud) getInstanceByID(instanceID string) (*ec2.Instance, error)
// Returns the instance with the specified ID func (c *Cloud) getInstanceByID(instanceID string) (*ec2.Instance, error)
{ instances, err := c.getInstancesByIDs([]*string{&instanceID}) if err != nil { return nil, err } if len(instances) == 0 { return nil, cloudprovider.ErrInstanceNotFound } if len(instances) > 1 { return nil, fmt.Errorf("multiple instances found for instance: %s", instanceID) } return instances[instanceID], nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L3328-L3353
go
train
// Fetches and caches instances by node names; returns an error if any cannot be found. // This is implemented with a multi value filter on the node names, fetching the desired instances with a single query. // TODO(therc): make all the caching more rational during the 1.4 timeframe
func (c *Cloud) getInstancesByNodeNamesCached(nodeNames sets.String) ([]*ec2.Instance, error)
// Fetches and caches instances by node names; returns an error if any cannot be found. // This is implemented with a multi value filter on the node names, fetching the desired instances with a single query. // TODO(therc): make all the caching more rational during the 1.4 timeframe func (c *Cloud) getInstancesByNodeNamesCached(nodeNames sets.String) ([]*ec2.Instance, error)
{ c.mutex.Lock() defer c.mutex.Unlock() if nodeNames.Equal(c.lastNodeNames) { if len(c.lastInstancesByNodeNames) > 0 { // We assume that if the list of nodes is the same, the underlying // instances have not changed. Later we might guard this with TTLs. glog.V(2).Infof("Returning cached instances for %v", nodeNames) return c.lastInstancesByNodeNames, nil } } instances, err := c.getInstancesByNodeNames(nodeNames.List()) if err != nil { return nil, err } if len(instances) == 0 { return nil, nil } glog.V(2).Infof("Caching instances for %v", nodeNames) c.lastNodeNames = nodeNames c.lastInstancesByNodeNames = instances return instances, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/aws.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/aws.go#L3437-L3443
go
train
// Returns the instance with the specified node name // Like findInstanceByNodeName, but returns error if node not found
func (c *Cloud) getInstanceByNodeName(nodeName types.NodeName) (*ec2.Instance, error)
// Returns the instance with the specified node name // Like findInstanceByNodeName, but returns error if node not found func (c *Cloud) getInstanceByNodeName(nodeName types.NodeName) (*ec2.Instance, error)
{ instance, err := c.findInstanceByNodeName(nodeName) if err == nil && instance == nil { return nil, cloudprovider.ErrInstanceNotFound } return instance, err }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
gluster/glusterfs/pkg/volume/provision.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/gluster/glusterfs/pkg/volume/provision.go#L44-L47
go
train
// NewGlusterfsProvisioner creates a new glusterfs simple provisioner
func NewGlusterfsProvisioner(config *rest.Config, client kubernetes.Interface) controller.Provisioner
// NewGlusterfsProvisioner creates a new glusterfs simple provisioner func NewGlusterfsProvisioner(config *rest.Config, client kubernetes.Interface) controller.Provisioner
{ klog.Infof("Creating NewGlusterfsProvisioner.") return newGlusterfsProvisionerInternal(config, client) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/cache/cache.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/cache/cache.go#L41-L47
go
train
// GetPV returns the PV object given the PV name
func (cache *VolumeCache) GetPV(pvName string) (*v1.PersistentVolume, bool)
// GetPV returns the PV object given the PV name func (cache *VolumeCache) GetPV(pvName string) (*v1.PersistentVolume, bool)
{ cache.mutex.Lock() defer cache.mutex.Unlock() pv, exists := cache.pvs[pvName] return pv, exists }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/cache/cache.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/cache/cache.go#L50-L56
go
train
// AddPV adds the PV object to the cache
func (cache *VolumeCache) AddPV(pv *v1.PersistentVolume)
// AddPV adds the PV object to the cache func (cache *VolumeCache) AddPV(pv *v1.PersistentVolume)
{ cache.mutex.Lock() defer cache.mutex.Unlock() cache.pvs[pv.Name] = pv glog.Infof("Added pv %q to cache", pv.Name) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/cache/cache.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/cache/cache.go#L68-L74
go
train
// DeletePV deletes the PV object from the cache
func (cache *VolumeCache) DeletePV(pvName string)
// DeletePV deletes the PV object from the cache func (cache *VolumeCache) DeletePV(pvName string)
{ cache.mutex.Lock() defer cache.mutex.Unlock() delete(cache.pvs, pvName) glog.Infof("Deleted pv %q from cache", pvName) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/cache/cache.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/cache/cache.go#L77-L86
go
train
// ListPVs returns a list of all the PVs in the cache
func (cache *VolumeCache) ListPVs() []*v1.PersistentVolume
// ListPVs returns a list of all the PVs in the cache func (cache *VolumeCache) ListPVs() []*v1.PersistentVolume
{ cache.mutex.Lock() defer cache.mutex.Unlock() pvs := []*v1.PersistentVolume{} for _, pv := range cache.pvs { pvs = append(pvs, pv) } return pvs }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
nfs/pkg/server/server.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/nfs/pkg/server/server.go#L77-L120
go
train
// Setup sets up various prerequisites and settings for the server. If an error // is encountered at any point it returns it instantly
func Setup(ganeshaConfig string, gracePeriod uint) error
// Setup sets up various prerequisites and settings for the server. If an error // is encountered at any point it returns it instantly func Setup(ganeshaConfig string, gracePeriod uint) error
{ // Start rpcbind if it is not started yet cmd := exec.Command("/usr/sbin/rpcinfo", "127.0.0.1") if err := cmd.Run(); err != nil { cmd = exec.Command("/usr/sbin/rpcbind", "-w") if out, err := cmd.CombinedOutput(); err != nil { return fmt.Errorf("Starting rpcbind failed with error: %v, output: %s", err, out) } } cmd = exec.Command("/usr/sbin/rpc.statd") if out, err := cmd.CombinedOutput(); err != nil { return fmt.Errorf("rpc.statd failed with error: %v, output: %s", err, out) } // Start dbus, needed for ganesha dynamic exports cmd = exec.Command("dbus-daemon", "--system") if out, err := cmd.CombinedOutput(); err != nil { return fmt.Errorf("dbus-daemon failed with error: %v, output: %s", err, out) } err := setRlimitNOFILE() if err != nil { glog.Warningf("Error setting RLIMIT_NOFILE, there may be 'Too many open files' errors later: %v", err) } // Use defaultGaneshaConfigContents if the ganeshaConfig doesn't exist yet if _, err = os.Stat(ganeshaConfig); os.IsNotExist(err) { err = ioutil.WriteFile(ganeshaConfig, defaultGaneshaConfigContents, 0600) if err != nil { return fmt.Errorf("error writing ganesha config %s: %v", ganeshaConfig, err) } } err = setGracePeriod(ganeshaConfig, gracePeriod) if err != nil { return fmt.Errorf("error setting grace period to ganesha config: %v", err) } err = setFsidDevice(ganeshaConfig, true) if err != nil { return fmt.Errorf("error setting fsid device to ganesha config: %v", err) } return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
nfs/pkg/server/server.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/nfs/pkg/server/server.go#L127-L136
go
train
// Run : run the NFS server in the foreground until it exits // Ideally, it should never exit when run in foreground mode // We force foreground to allow the provisioner process to restart // the server if it crashes - daemonization prevents us from using Wait() // for this purpose
func Run(ganeshaLog, ganeshaPid, ganeshaConfig string) error
// Run : run the NFS server in the foreground until it exits // Ideally, it should never exit when run in foreground mode // We force foreground to allow the provisioner process to restart // the server if it crashes - daemonization prevents us from using Wait() // for this purpose func Run(ganeshaLog, ganeshaPid, ganeshaConfig string) error
{ // Start ganesha.nfsd glog.Infof("Running NFS server!") cmd := exec.Command("ganesha.nfsd", "-F", "-L", ganeshaLog, "-p", ganeshaPid, "-f", ganeshaConfig) if out, err := cmd.CombinedOutput(); err != nil { return fmt.Errorf("ganesha.nfsd failed with error: %v, output: %s", err, out) } return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go#L150-L155
go
train
//NewGlusterBlockProvisioner create a new provisioner.
func NewGlusterBlockProvisioner(client kubernetes.Interface, id string) controller.Provisioner
//NewGlusterBlockProvisioner create a new provisioner. func NewGlusterBlockProvisioner(client kubernetes.Interface, id string) controller.Provisioner
{ return &glusterBlockProvisioner{ client: client, identity: id, } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go#L167-L302
go
train
// Provision creates a storage asset and returns a PV object representing it.
func (p *glusterBlockProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error)
// Provision creates a storage asset and returns a PV object representing it. func (p *glusterBlockProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error)
{ var err error if options.PVC.Spec.Selector != nil { return nil, fmt.Errorf("claim Selector is not supported") } if !util.AccessModesContainedInAll(p.GetAccessModes(), options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", options.PVC.Spec.AccessModes, p.GetAccessModes()) } klog.V(4).Infof("VolumeOptions %v", options) cfg, parseErr := parseClassParameters(options.Parameters, p.client) if parseErr != nil { return nil, fmt.Errorf("failed to parse storage class parameters: %v", parseErr) } klog.V(4).Infof("creating volume with configuration %+v", *cfg) // Calculate the size volSize := options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] volSizeBytes := volSize.Value() volszInt := int(util.RoundUpToGiB(volSizeBytes)) // Create gluster block Volume blockVolName := "" if cfg.opMode == glusterBlockOpmode { blockVolName = blockVolPrefix + string(uuid.NewUUID()) } if cfg.volumeNamePrefix != "" { blockVolName = fmt.Sprintf("%s_%s_%s_%s", cfg.volumeNamePrefix, options.PVC.Namespace, options.PVC.Name, uuid.NewUUID()) } blockVol, createErr := p.createVolume(volszInt, blockVolName, cfg) if createErr != nil { return nil, fmt.Errorf("failed to create volume: %v", createErr) } iscsiVol := &iscsiSpec{} if blockVol != nil { blockVol.iscsiSpec = iscsiVol } storeErr := p.storeFieldsFromResponse(blockVolName, cfg, blockVol, iscsiVol) if storeErr != nil { return nil, fmt.Errorf("failed to store response fields to iscsi volume spec: %v", storeErr) } sortErr := p.sortTargetPortal(iscsiVol) if sortErr != nil { return nil, fmt.Errorf("failed to fetch Target Portal %v from iscsi volume spec", sortErr) } if iscsiVol.TargetPortal == "" || iscsiVol.Iqn == "" { return nil, fmt.Errorf("failed to create volume, Target portal/IQN is nil in iscsi volume spec") } klog.V(1).Infof("Volume configuration %+v", blockVol) secretRef := &v1.SecretReference{} if cfg.chapAuthEnabled && iscsiVol.User != "" && iscsiVol.AuthKey != "" { nameSpace := options.PVC.Namespace secretName := "glusterblk-" + iscsiVol.User + "-secret" secretRef, err = p.createSecretRef(nameSpace, secretName, iscsiVol.User, iscsiVol.AuthKey) if err != nil { klog.Errorf("failed to create CHAP auth credentials for pv: %v", err) return nil, fmt.Errorf("failed to create CHAP auth credentials for pv: %v", err) } iscsiVol.SessionCHAPAuth = cfg.chapAuthEnabled iscsiVol.BlockSecret = secretName iscsiVol.BlockSecretNs = nameSpace } else if !(cfg.chapAuthEnabled) { klog.V(1).Infof("CHAP authentication is not requested for this PV") iscsiVol.SessionCHAPAuth = false secretRef = nil } else { klog.Errorf("chapauth enabled - but CHAP credentials are missing in the %v response", cfg.opMode) return nil, fmt.Errorf("chapauth enabled - but CHAP credentials are missing in the %v response", cfg.opMode) } var blockString []string modeAnn := "" if cfg.opMode == glusterBlockOpmode { for k, v := range cfg.blockModeArgs { blockString = append(blockString, k+":"+v) modeAnn = dstrings.Join(blockString, ",") } } else { blockString = nil modeAnn = "url:" + cfg.url + "," + "user:" + cfg.user + "," + "secret:" + cfg.restSecretName + "," + "secretnamespace:" + cfg.restSecretNamespace } volMode := options.PVC.Spec.VolumeMode pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: options.PVName, Annotations: map[string]string{ provisionerIDAnn: p.identity, provisionerVersion: provisionerVersion, shareIDAnn: iscsiVol.BlockVolName, creatorAnn: cfg.opMode, volumeTypeAnn: "block", "Description": descAnn, "Blockstring": modeAnn, "AccessKey": iscsiVol.BlockSecret, "AccessKeyNs": iscsiVol.BlockSecretNs, volIDAnn: iscsiVol.VolumeID, }, }, Spec: v1.PersistentVolumeSpec{ VolumeMode: volMode, PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy, AccessModes: options.PVC.Spec.AccessModes, Capacity: v1.ResourceList{ v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], }, PersistentVolumeSource: v1.PersistentVolumeSource{ ISCSI: &v1.ISCSIPersistentVolumeSource{ TargetPortal: iscsiVol.TargetPortal, Portals: iscsiVol.Portals, IQN: iscsiVol.Iqn, Lun: 0, FSType: "xfs", ReadOnly: false, SessionCHAPAuth: iscsiVol.SessionCHAPAuth, SecretRef: secretRef, }, }, }, } klog.V(1).Infof("successfully created Gluster Block volume %+v", pv.Spec.PersistentVolumeSource.ISCSI) return pv, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go#L336-L371
go
train
//createSecretRef() creates a secret reference.
func (p *glusterBlockProvisioner) createSecretRef(nameSpace string, secretName string, user string, password string) (*v1.SecretReference, error)
//createSecretRef() creates a secret reference. func (p *glusterBlockProvisioner) createSecretRef(nameSpace string, secretName string, user string, password string) (*v1.SecretReference, error)
{ var err error secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: nameSpace, Name: secretName, }, Data: map[string][]byte{ "node.session.auth.username": []byte(user), "node.session.auth.password": []byte(password), }, Type: chapType, } secretRef := &v1.SecretReference{} if secret != nil { _, err = p.client.CoreV1().Secrets(nameSpace).Create(secret) if err != nil && errors.IsAlreadyExists(err) { klog.V(1).Infof("secret %s already exist in namespace %s", secret, nameSpace) err = nil } if err != nil { return nil, fmt.Errorf("failed to create secret %s, %v", secret, err) } if secretRef != nil { secretRef.Name = secretName klog.V(1).Infof("secret %v and secretRef %v", secret, secretRef) } } else { return nil, fmt.Errorf("secret is nil") } return secretRef, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go#L374-L405
go
train
// createVolume creates a gluster block volume i.e. the storage asset.
func (p *glusterBlockProvisioner) createVolume(volSizeInt int, blockVol string, config *provisionerConfig) (*glusterBlockVolume, error)
// createVolume creates a gluster block volume i.e. the storage asset. func (p *glusterBlockProvisioner) createVolume(volSizeInt int, blockVol string, config *provisionerConfig) (*glusterBlockVolume, error)
{ blockRes := &glusterBlockVolume{} sizeStr := strconv.Itoa(volSizeInt) haCountStr := strconv.Itoa(config.haCount) klog.V(2).Infof("create block volume of size %d and configuration %+v", volSizeInt, config) // Possible opModes are gluster-block and heketi: switch config.opMode { // An experimental/Test Mode: case glusterBlockOpmode: gBlockCreateErr := p.glusterBlockExecCreate(blockRes, config, sizeStr, haCountStr, blockVol) if gBlockCreateErr != nil { klog.Errorf("gluster block volume creation failed: %v", gBlockCreateErr) return nil, fmt.Errorf("gluster block volume creation failed: %v", gBlockCreateErr) } case heketiOpmode: hBlockCreateErr := p.heketiBlockVolCreate(blockRes, config, volSizeInt, haCountStr, blockVol) if hBlockCreateErr != nil { klog.Errorf("heketi block volume creation failed: %v", hBlockCreateErr) return nil, fmt.Errorf("heketi block volume creation failed: %v", hBlockCreateErr) } default: return nil, fmt.Errorf("error parsing value for 'opmode' for volume plugin %s", provisionerName) } return blockRes, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go#L529-L637
go
train
// Delete removes the storage asset that was created by Provision represented // by the given PV.
func (p *glusterBlockProvisioner) Delete(volume *v1.PersistentVolume) error
// Delete removes the storage asset that was created by Provision represented // by the given PV. func (p *glusterBlockProvisioner) Delete(volume *v1.PersistentVolume) error
{ config := &provisionerConfig{} config.blockModeArgs = make(map[string]string) heketiModeArgs := make(map[string]string) ann, ok := volume.Annotations[provisionerIDAnn] if !ok { return fmt.Errorf("identity annotation not found on PV") } if ann != p.identity { return &controller.IgnoredError{Reason: "identity annotation on PV does not match this provisioners identity"} } delBlockVolName, ok := volume.Annotations[shareIDAnn] if !ok { return fmt.Errorf("share annotation not found on PV") } delBlockString, ok := volume.Annotations["Blockstring"] delBlockStrSlice := dstrings.Split(delBlockString, ",") config.opMode = volume.Annotations[creatorAnn] for _, v := range delBlockStrSlice { if v != "" { s := dstrings.Split(v, ":") if config.opMode == glusterBlockOpmode { config.blockModeArgs[s[0]] = s[1] } else { if s[0] == "url" { heketiModeArgs[s[0]] = dstrings.Join(s[1:], ":") } else { heketiModeArgs[s[0]] = s[1] } } } } // Delete this blockVol klog.V(1).Infof("blockVolume %v to be deleted", delBlockVolName) //Call subjected volume delete operation. switch config.opMode { case glusterBlockOpmode: klog.V(1).Infof("Deleting Volume %v ", delBlockVolName) deleteCmd := exec.Command( config.opMode, "delete", config.blockModeArgs["glustervol"]+"/"+delBlockVolName, "--json") _, cmdErr := deleteCmd.CombinedOutput() if cmdErr != nil { klog.Errorf("error %v when running gluster-block command %v", cmdErr, deleteCmd) return cmdErr } klog.V(1).Infof("successfully deleted Volume %v", delBlockVolName) case heketiOpmode: klog.V(1).Infof("opmode[heketi]: deleting Volume %v", delBlockVolName) heketiModeArgs["restsecretvalue"] = "" if heketiModeArgs["secret"] != "" && heketiModeArgs["secretnamespace"] != "" { var err error heketiModeArgs["restsecretvalue"], err = parseSecret(heketiModeArgs["secretnamespace"], heketiModeArgs["secret"], p.client) if err != nil { klog.Errorf("[heketi]: failed to parse secret %s : Error, %v", heketiModeArgs["secret"], err) return err } } cli := gcli.NewClient(heketiModeArgs["url"], heketiModeArgs["user"], heketiModeArgs["restsecretvalue"]) if cli == nil { klog.Errorf("[heketi]: failed to create REST client") return fmt.Errorf("[heketi]: failed to create REST client, REST server authentication failed") } volumeID, err := getVolumeID(volume, delBlockVolName) if err != nil { return fmt.Errorf("failed to get volumeID, err: %v", err) } deleteErr := cli.BlockVolumeDelete(volumeID) if deleteErr != nil { if dstrings.Contains(deleteErr.Error(), errIDNotFound) { klog.Errorf("[heketi]: failed to find volume ID %v in database, manual intervention required", volumeID) return fmt.Errorf("[heketi]: failed to find volume ID %v in database : %v", volumeID, deleteErr) } klog.Errorf("[heketi]: failed to delete gluster block volume %v: %v", delBlockVolName, deleteErr) return fmt.Errorf("[heketi]: failed to delete glusterblock volume %v: %v", delBlockVolName, deleteErr) } klog.V(1).Infof("[heketi]: successfully deleted Volume %v", delBlockVolName) default: klog.Errorf("Unknown OpMode, failed to delete volume %v", delBlockVolName) } if volume.Annotations["AccessKey"] != "" && volume.Annotations["AccessKeyNs"] != "" { deleteSecErr := p.client.CoreV1().Secrets(volume.Annotations["AccessKeyNs"]).Delete(volume.Annotations["AccessKey"], nil) if deleteSecErr != nil && errors.IsNotFound(deleteSecErr) { klog.V(1).Infof("secret %s does not exist in namespace %s", volume.Annotations["AccessKey"], volume.Annotations["AccessKeyNs"]) deleteSecErr = nil } if deleteSecErr != nil { klog.Errorf("failed to delete secret %v/%v: %v", volume.Annotations["AccessKey"], volume.Annotations["AccessKeyNs"], deleteSecErr) return fmt.Errorf("failed to delete secret %v/%v: %v", volume.Annotations["AccessKey"], volume.Annotations["AccessKeyNs"], deleteSecErr) } } return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go#L640-L657
go
train
// getVolumeID returns volumeID from the PV or volumename.
func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error)
// getVolumeID returns volumeID from the PV or volumename. func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error)
{ volumeID := "" // Get volID from pvspec if available, else fill it from volumename. if pv != nil { if pv.Annotations[volIDAnn] != "" { volumeID = pv.Annotations[volIDAnn] } else { volumeID = dstrings.TrimPrefix(volumeName, blockVolPrefix) } } else { return volumeID, fmt.Errorf("provided PV spec is nil") } if volumeID == "" { return volumeID, fmt.Errorf("volume ID is empty") } return volumeID, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go#L660-L681
go
train
//sortTargetPortal extract TP
func (p *glusterBlockProvisioner) sortTargetPortal(vol *iscsiSpec) error
//sortTargetPortal extract TP func (p *glusterBlockProvisioner) sortTargetPortal(vol *iscsiSpec) error
{ if len(vol.Portals) == 0 { return fmt.Errorf("portal is empty") } if len(vol.Portals) == 1 && vol.Portals[0] != "" { vol.TargetPortal = vol.Portals[0] vol.Portals = nil } else { portals := vol.Portals vol.Portals = nil for _, v := range portals { if v != "" && vol.TargetPortal == "" { vol.TargetPortal = v continue } else { vol.Portals = append(vol.Portals, v) } } } return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/util/volume_util_unsupported.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/util/volume_util_unsupported.go#L32-L34
go
train
// IsBlock for unsupported platform returns error.
func (u *volumeUtil) IsBlock(fullPath string) (bool, error)
// IsBlock for unsupported platform returns error. func (u *volumeUtil) IsBlock(fullPath string) (bool, error)
{ return false, fmt.Errorf("IsBlock is unsupported in this build") }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/volumes.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/volumes.go#L46-L84
go
train
// mapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID
func (name KubernetesVolumeID) mapToAWSVolumeID() (awsVolumeID, error)
// mapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID func (name KubernetesVolumeID) mapToAWSVolumeID() (awsVolumeID, error)
{ // name looks like aws://availability-zone/awsVolumeId // The original idea of the URL-style name was to put the AZ into the // host, so we could find the AZ immediately from the name without // querying the API. But it turns out we don't actually need it for // multi-AZ clusters, as we put the AZ into the labels on the PV instead. // However, if in future we want to support multi-AZ cluster // volume-awareness without using PersistentVolumes, we likely will // want the AZ in the host. s := string(name) if !strings.HasPrefix(s, "aws://") { // Assume a bare aws volume id (vol-1234...) // Build a URL with an empty host (AZ) s = "aws://" + "" + "/" + s } url, err := url.Parse(s) if err != nil { // TODO: Maybe we should pass a URL into the Volume functions return "", fmt.Errorf("Invalid disk name (%s): %v", name, err) } if url.Scheme != "aws" { return "", fmt.Errorf("Invalid scheme for AWS volume (%s)", name) } awsID := url.Path awsID = strings.Trim(awsID, "/") // We sanity check the resulting volume; the two known formats are // vol-12345678 and vol-12345678abcdef01 // TODO: Regex match? if strings.Contains(awsID, "/") || !strings.HasPrefix(awsID, "vol-") { return "", fmt.Errorf("Invalid format for AWS volume (%s)", name) } return awsVolumeID(awsID), nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/volume/awsebs/processor.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/volume/awsebs/processor.go#L110-L115
go
train
// FindSnapshot finds a VolumeSnapshot by matching metadata
func (a *awsEBSPlugin) FindSnapshot(tags *map[string]string) (*crdv1.VolumeSnapshotDataSource, *[]crdv1.VolumeSnapshotCondition, error)
// FindSnapshot finds a VolumeSnapshot by matching metadata func (a *awsEBSPlugin) FindSnapshot(tags *map[string]string) (*crdv1.VolumeSnapshotDataSource, *[]crdv1.VolumeSnapshotCondition, error)
{ glog.Infof("FindSnapshot by tags: %#v", *tags) // TODO: Implement FindSnapshot return nil, nil, fmt.Errorf("Snapshot not found") }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/openstack/openstack.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/openstack/openstack.go#L250-L294
go
train
// LoadBalancer returns a load balancer
func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool)
// LoadBalancer returns a load balancer func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool)
{ glog.V(4).Info("openstack.LoadBalancer() called") // TODO: Search for and support Rackspace loadbalancer API, and others. network, err := os.NewNetworkV2() if err != nil { return nil, false } compute, err := os.NewComputeV2() if err != nil { return nil, false } lbVersion := os.lbOpts.LBVersion if lbVersion == "" { // No version specified, try newest supported by server netExts, err := networkExtensions(network) if err != nil { glog.Warningf("Failed to list neutron extensions: %v", err) return nil, false } if netExts["lbaasv2"] { lbVersion = "v2" } else if netExts["lbaas"] { lbVersion = "v1" } else { glog.Warningf("Failed to find neutron LBaaS extension (v1 or v2)") return nil, false } glog.V(3).Infof("Using LBaaS extension %v", lbVersion) } glog.V(1).Info("Claiming to support LoadBalancer") if lbVersion == "v2" { return &LbaasV2{LoadBalancer{network, compute, os.lbOpts}}, true } else if lbVersion == "v1" { return &LbaasV1{LoadBalancer{network, compute, os.lbOpts}}, true } else { glog.Warningf("Config error: unrecognised lb-version \"%v\"", lbVersion) return nil, false } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/openstack/openstack.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/openstack/openstack.go#L297-L299
go
train
// ScrubDNS filters DNS settings for pods.
func (os *OpenStack) ScrubDNS(nameServers, searches []string) ([]string, []string)
// ScrubDNS filters DNS settings for pods. func (os *OpenStack) ScrubDNS(nameServers, searches []string) ([]string, []string)
{ return nameServers, searches }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/openstack/openstack.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/openstack/openstack.go#L497-L530
go
train
// Routes returns cloud provider routes
func (os *OpenStack) Routes() (cloudprovider.Routes, bool)
// Routes returns cloud provider routes func (os *OpenStack) Routes() (cloudprovider.Routes, bool)
{ glog.V(4).Info("openstack.Routes() called") network, err := os.NewNetworkV2() if err != nil { return nil, false } netExts, err := networkExtensions(network) if err != nil { glog.Warningf("Failed to list neutron extensions: %v", err) return nil, false } if !netExts["extraroute"] { glog.V(3).Infof("Neutron extraroute extension not found, required for Routes support") return nil, false } compute, err := os.NewComputeV2() if err != nil { return nil, false } r, err := NewRoutes(compute, network, os.routeOpts) if err != nil { glog.Warningf("Error initialising Routes support: %v", err) return nil, false } glog.V(1).Info("Claiming to support Routes") return r, true }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/openstack/openstack.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/openstack/openstack.go#L533-L537
go
train
// Zones returns cloud provider zones
func (os *OpenStack) Zones() (cloudprovider.Zones, bool)
// Zones returns cloud provider zones func (os *OpenStack) Zones() (cloudprovider.Zones, bool)
{ glog.V(1).Info("Claiming to support Zones") return os, true }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/openstack/openstack.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/openstack/openstack.go#L540-L553
go
train
// GetZone gets a zone from cloud provider
func (os *OpenStack) GetZone() (cloudprovider.Zone, error)
// GetZone gets a zone from cloud provider func (os *OpenStack) GetZone() (cloudprovider.Zone, error)
{ md, err := getMetadata() if err != nil { return cloudprovider.Zone{}, err } zone := cloudprovider.Zone{ FailureDomain: md.AvailabilityZone, Region: os.region, } glog.V(1).Infof("Current zone is %v", zone) return zone, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L100-L114
go
train
// NewVolumeSnapshotter create a new VolumeSnapshotter
func NewVolumeSnapshotter( restClient *rest.RESTClient, scheme *runtime.Scheme, clientset kubernetes.Interface, asw cache.ActualStateOfWorld, volumePlugins *map[string]volume.Plugin) VolumeSnapshotter
// NewVolumeSnapshotter create a new VolumeSnapshotter func NewVolumeSnapshotter( restClient *rest.RESTClient, scheme *runtime.Scheme, clientset kubernetes.Interface, asw cache.ActualStateOfWorld, volumePlugins *map[string]volume.Plugin) VolumeSnapshotter
{ return &volumeSnapshotter{ restClient: restClient, coreClient: clientset, scheme: scheme, actualStateOfWorld: asw, runningOperation: goroutinemap.NewGoRoutineMap(defaultExponentialBackOffOnError), volumePlugins: volumePlugins, } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L117-L137
go
train
// Helper function to get PV from VolumeSnapshot
func (vs *volumeSnapshotter) getPVFromVolumeSnapshot(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) (*v1.PersistentVolume, error)
// Helper function to get PV from VolumeSnapshot func (vs *volumeSnapshotter) getPVFromVolumeSnapshot(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) (*v1.PersistentVolume, error)
{ pvcName := snapshot.Spec.PersistentVolumeClaimName if pvcName == "" { return nil, fmt.Errorf("The PVC name is not specified in snapshot %s", uniqueSnapshotName) } pvc, err := vs.coreClient.CoreV1().PersistentVolumeClaims(snapshot.Metadata.Namespace).Get(pvcName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("Failed to retrieve PVC %s from the API server: %q", pvcName, err) } if pvc.Status.Phase != v1.ClaimBound { return nil, fmt.Errorf("The PVC %s not yet bound to a PV, will not attempt to take a snapshot yet", pvcName) } pvName := pvc.Spec.VolumeName pv, err := vs.getPVFromName(pvName) if err != nil { return nil, fmt.Errorf("Failed to retrieve PV %s from the API server: %q", pvName, err) } return pv, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L140-L142
go
train
// Helper function to get PV from PV name
func (vs *volumeSnapshotter) getPVFromName(pvName string) (*v1.PersistentVolume, error)
// Helper function to get PV from PV name func (vs *volumeSnapshotter) getPVFromName(pvName string) (*v1.PersistentVolume, error)
{ return vs.coreClient.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L147-L179
go
train
// TODO: cache the VolumeSnapshotData list since this is only needed when controller restarts, checks // whether there is existing VolumeSnapshotData refers to the snapshot already. // Helper function that looks up VolumeSnapshotData for a VolumeSnapshot named snapshotName
func (vs *volumeSnapshotter) getSnapshotDataFromSnapshotName(uniqueSnapshotName string) *crdv1.VolumeSnapshotData
// TODO: cache the VolumeSnapshotData list since this is only needed when controller restarts, checks // whether there is existing VolumeSnapshotData refers to the snapshot already. // Helper function that looks up VolumeSnapshotData for a VolumeSnapshot named snapshotName func (vs *volumeSnapshotter) getSnapshotDataFromSnapshotName(uniqueSnapshotName string) *crdv1.VolumeSnapshotData
{ var snapshotDataList crdv1.VolumeSnapshotDataList var snapshotDataObj crdv1.VolumeSnapshotData var found bool err := vs.restClient.Get(). Resource(crdv1.VolumeSnapshotDataResourcePlural). Do().Into(&snapshotDataList) if err != nil { glog.Errorf("Error retrieving the VolumeSnapshotData objects from API server: %v", err) return nil } if len(snapshotDataList.Items) == 0 { glog.Infof("No VolumeSnapshotData objects found on the API server") return nil } for _, snapData := range snapshotDataList.Items { if snapData.Spec.VolumeSnapshotRef != nil { name := snapData.Spec.VolumeSnapshotRef.Namespace + "/" + snapData.Spec.VolumeSnapshotRef.Name if name == uniqueSnapshotName || snapData.Spec.VolumeSnapshotRef.Name == uniqueSnapshotName { snapshotDataObj = snapData found = true break } } } if !found { glog.V(4).Infof("Error: no VolumeSnapshotData for VolumeSnapshot %s found", uniqueSnapshotName) return nil } return &snapshotDataObj }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L182-L197
go
train
// Helper function that looks up VolumeSnapshotData from a VolumeSnapshot
func (vs *volumeSnapshotter) getSnapshotDataFromSnapshot(snapshot *crdv1.VolumeSnapshot) (*crdv1.VolumeSnapshotData, error)
// Helper function that looks up VolumeSnapshotData from a VolumeSnapshot func (vs *volumeSnapshotter) getSnapshotDataFromSnapshot(snapshot *crdv1.VolumeSnapshot) (*crdv1.VolumeSnapshotData, error)
{ var snapshotDataObj crdv1.VolumeSnapshotData snapshotDataName := snapshot.Spec.SnapshotDataName if snapshotDataName == "" { return nil, fmt.Errorf("Could not find snapshot data object: SnapshotDataName in snapshot spec is empty") } err := vs.restClient.Get(). Name(snapshotDataName). Resource(crdv1.VolumeSnapshotDataResourcePlural). Do().Into(&snapshotDataObj) if err != nil { glog.Errorf("Error retrieving the VolumeSnapshotData objects from API server: %v", err) return nil, fmt.Errorf("Could not get snapshot data object %s: %v", snapshotDataName, err) } return &snapshotDataObj, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L201-L253
go
train
// Query status of the snapshot from plugin and update the status of VolumeSnapshot and VolumeSnapshotData // if needed. Finish waiting when the snapshot becomes available/ready or error.
func (vs *volumeSnapshotter) waitForSnapshot(uniqueSnapshotName string, snapshotObj *crdv1.VolumeSnapshot, snapshotDataObj *crdv1.VolumeSnapshotData) error
// Query status of the snapshot from plugin and update the status of VolumeSnapshot and VolumeSnapshotData // if needed. Finish waiting when the snapshot becomes available/ready or error. func (vs *volumeSnapshotter) waitForSnapshot(uniqueSnapshotName string, snapshotObj *crdv1.VolumeSnapshot, snapshotDataObj *crdv1.VolumeSnapshotData) error
{ glog.Infof("In waitForSnapshot: snapshot %s snapshot data %s", uniqueSnapshotName, snapshotObj.Spec.SnapshotDataName) if snapshotDataObj == nil { return fmt.Errorf("Failed to update VolumeSnapshot for snapshot %s: no VolumeSnapshotData", uniqueSnapshotName) } spec := &snapshotDataObj.Spec volumeType := crdv1.GetSupportedVolumeFromSnapshotDataSpec(spec) if len(volumeType) == 0 { return fmt.Errorf("unsupported volume type found in snapshot %#v", spec) } plugin, ok := (*vs.volumePlugins)[volumeType] if !ok { return fmt.Errorf("%s is not supported volume for %#v", volumeType, spec) } backoff := wait.Backoff{ Duration: volumeSnapshotInitialDelay, Factor: volumeSnapshotFactor, Steps: volumeSnapshotSteps, } // Wait until the snapshot is successfully created by the plugin or an error occurs that // fails the snapshot creation. err := wait.ExponentialBackoff(backoff, func() (bool, error) { conditions, _, err := plugin.DescribeSnapshot(snapshotDataObj) if err != nil { glog.Warningf("failed to get snapshot %v, err: %v", uniqueSnapshotName, err) //continue waiting return false, nil } newstatus := vs.getSimplifiedSnapshotStatus(*conditions) condition := *conditions lastCondition := condition[len(condition)-1] newSnapshot, err := vs.UpdateVolumeSnapshotStatus(snapshotObj, &lastCondition) if err != nil { glog.Errorf("Error updating volume snapshot %s: %v", uniqueSnapshotName, err) } if newstatus == statusReady { glog.Infof("waitForSnapshot: Snapshot %s created successfully. Adding it to Actual State of World.", uniqueSnapshotName) vs.actualStateOfWorld.AddSnapshot(newSnapshot) // Break out of the for loop return true, nil } else if newstatus == statusError { glog.Errorf("waitForSnapshot: Snapshot %s returns error", uniqueSnapshotName) return true, fmt.Errorf("Failed to create snapshot %s", uniqueSnapshotName) } return false, nil }) return err }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L257-L280
go
train
// This is the function responsible for determining the correct volume plugin to use, // asking it to make a snapshot and assigning it some name that it returns to the caller.
func (vs *volumeSnapshotter) takeSnapshot( snapshot *crdv1.VolumeSnapshot, pv *v1.PersistentVolume, tags *map[string]string, ) (*crdv1.VolumeSnapshotDataSource, *[]crdv1.VolumeSnapshotCondition, error)
// This is the function responsible for determining the correct volume plugin to use, // asking it to make a snapshot and assigning it some name that it returns to the caller. func (vs *volumeSnapshotter) takeSnapshot( snapshot *crdv1.VolumeSnapshot, pv *v1.PersistentVolume, tags *map[string]string, ) (*crdv1.VolumeSnapshotDataSource, *[]crdv1.VolumeSnapshotCondition, error)
{ spec := &pv.Spec volumeType := crdv1.GetSupportedVolumeFromPVSpec(spec) if len(volumeType) == 0 { return nil, nil, fmt.Errorf("unsupported volume type found in PV %#v", spec) } plugin, ok := (*vs.volumePlugins)[volumeType] if !ok { return nil, nil, fmt.Errorf("%s is not supported volume for %#v", volumeType, spec) } snapDataSource, snapConditions, err := plugin.SnapshotCreate(snapshot, pv, tags) if err != nil { glog.Warningf("failed to snapshot %#v, err: %v", spec, err) } else { glog.Infof("snapshot created: %v. Conditions: %#v", snapDataSource, snapConditions) return snapDataSource, snapConditions, nil } return nil, nil, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L284-L305
go
train
// This is the function responsible for determining the correct volume plugin to use, // asking it to make a snapshot and assigning it some name that it returns to the caller.
func (vs *volumeSnapshotter) deleteSnapshot(spec *crdv1.VolumeSnapshotDataSpec) error
// This is the function responsible for determining the correct volume plugin to use, // asking it to make a snapshot and assigning it some name that it returns to the caller. func (vs *volumeSnapshotter) deleteSnapshot(spec *crdv1.VolumeSnapshotDataSpec) error
{ volumeType := crdv1.GetSupportedVolumeFromSnapshotDataSpec(spec) if len(volumeType) == 0 { return fmt.Errorf("unsupported volume type found in VolumeSnapshotData %#v", spec) } plugin, ok := (*vs.volumePlugins)[volumeType] if !ok { return fmt.Errorf("%s is not supported volume for %#v", volumeType, spec) } source := spec.VolumeSnapshotDataSource pv, err := vs.getPVFromName(spec.PersistentVolumeRef.Name) if err != nil { glog.Warningf("failed to retrieve PV %s from the API server: %q", spec.PersistentVolumeRef.Name, err) } err = plugin.SnapshotDelete(&source, pv) if err != nil { return fmt.Errorf("failed to delete snapshot %#v, err: %v", source, err) } glog.Infof("snapshot %#v deleted", source) return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L365-L409
go
train
// Exame the given snapshot in detail and then return the status
func (vs *volumeSnapshotter) updateSnapshotIfExists(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) (string, *crdv1.VolumeSnapshot, error)
// Exame the given snapshot in detail and then return the status func (vs *volumeSnapshotter) updateSnapshotIfExists(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) (string, *crdv1.VolumeSnapshot, error)
{ snapshotName := snapshot.Metadata.Name var snapshotDataObj *crdv1.VolumeSnapshotData var snapshotDataSource *crdv1.VolumeSnapshotDataSource var conditions *[]crdv1.VolumeSnapshotCondition var err error tags := vs.findVolumeSnapshotMetadata(snapshot) // If there is no tag returned, snapshotting is not triggered yet, return new state if tags == nil { glog.Infof("No tag can be found in snapshot metadata %s", uniqueSnapshotName) return statusNew, snapshot, nil } // Check whether snapshotData object is already created or not. If yes, snapshot is already // triggered through cloud provider, bind it and return pending state if snapshotDataObj = vs.getSnapshotDataFromSnapshotName(uniqueSnapshotName); snapshotDataObj != nil { glog.Infof("Find snapshot data object %s from snapshot %s", snapshotDataObj.Metadata.Name, uniqueSnapshotName) snapshotObj, err := vs.bindandUpdateVolumeSnapshot(snapshot, snapshotDataObj.Metadata.Name, nil) if err != nil { return statusError, snapshot, err } return statusPending, snapshotObj, nil } // Find snapshot through cloud provider by existing tags, and create VolumeSnapshotData if such snapshot is found snapshotDataSource, conditions, err = vs.findSnapshotByTags(snapshotName, snapshot) if err != nil { return statusNew, snapshot, nil } // Snapshot is found. Create VolumeSnapshotData, bind VolumeSnapshotData to VolumeSnapshot, and update VolumeSnapshot status glog.Infof("updateSnapshotIfExists: create VolumeSnapshotData object for VolumeSnapshot %s.", uniqueSnapshotName) pvName, ok := snapshot.Metadata.Labels[pvNameLabel] if !ok { return statusError, snapshot, fmt.Errorf("Could not find pv name from snapshot, this should not happen.") } snapshotDataObj, err = vs.createVolumeSnapshotData(snapshotName, pvName, snapshotDataSource, conditions) if err != nil { return statusError, snapshot, err } glog.Infof("updateSnapshotIfExists: update VolumeSnapshot status and bind VolumeSnapshotData to VolumeSnapshot %s.", uniqueSnapshotName) snapshotObj, err := vs.bindandUpdateVolumeSnapshot(snapshot, snapshotDataObj.Metadata.Name, conditions) if err != nil { return statusError, nil, err } return statusPending, snapshotObj, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L413-L455
go
train
// Below are the closures meant to build the functions for the GoRoutineMap operations. // syncSnapshot is the main controller method to decide what to do to create a snapshot.
func (vs *volumeSnapshotter) syncSnapshot(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) func() error
// Below are the closures meant to build the functions for the GoRoutineMap operations. // syncSnapshot is the main controller method to decide what to do to create a snapshot. func (vs *volumeSnapshotter) syncSnapshot(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) func() error
{ return func() error { snapshotObj := snapshot status := vs.getSimplifiedSnapshotStatus(snapshot.Status.Conditions) var err error // When the condition is new, it is still possible that snapshot is already triggered but has not yet updated the condition. // Check the metadata and available VolumeSnapshotData objects and update the snapshot accordingly if status == statusNew { status, snapshotObj, err = vs.updateSnapshotIfExists(uniqueSnapshotName, snapshot) if err != nil { glog.Errorf("updateSnapshotIfExists has error %v", err) } } switch status { case statusReady: glog.Infof("Snapshot %s created successfully. Adding it to Actual State of World.", uniqueSnapshotName) vs.actualStateOfWorld.AddSnapshot(snapshot) return nil case statusError: glog.Infof("syncSnapshot: Error creating snapshot %s.", uniqueSnapshotName) return fmt.Errorf("Error creating snapshot %s", uniqueSnapshotName) case statusPending: glog.V(4).Infof("syncSnapshot: Snapshot %s is Pending.", uniqueSnapshotName) // Query the volume plugin for the status of the snapshot with snapshot id // from VolumeSnapshotData object. snapshotDataObj, err := vs.getSnapshotDataFromSnapshot(snapshotObj) if err != nil { return fmt.Errorf("Failed to find snapshot %v", err) } err = vs.waitForSnapshot(uniqueSnapshotName, snapshotObj, snapshotDataObj) if err != nil { return fmt.Errorf("Failed to check snapshot state %s with error %v", uniqueSnapshotName, err) } glog.Infof("syncSnapshot: Snapshot %s created successfully.", uniqueSnapshotName) return nil case statusNew: glog.Infof("syncSnapshot: Creating snapshot %s ...", uniqueSnapshotName) err = vs.createSnapshot(uniqueSnapshotName, snapshotObj) return err } return fmt.Errorf("Error occurred when creating snapshot %s, unknown status %s", uniqueSnapshotName, status) } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L486-L528
go
train
// The function goes through the whole snapshot creation process. // 1. Update VolumeSnapshot metadata to include the snapshotted PV name, timestamp and snapshot uid, also generate tag for cloud provider // 2. Trigger the snapshot through cloud provider and attach the tag to the snapshot. // 3. Create the VolumeSnapshotData object with the snapshot id information returned from step 2. // 4. Bind the VolumeSnapshot and VolumeSnapshotData object // 5. Query the snapshot status through cloud provider and update the status until snapshot is ready or fails.
func (vs *volumeSnapshotter) createSnapshot(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) error
// The function goes through the whole snapshot creation process. // 1. Update VolumeSnapshot metadata to include the snapshotted PV name, timestamp and snapshot uid, also generate tag for cloud provider // 2. Trigger the snapshot through cloud provider and attach the tag to the snapshot. // 3. Create the VolumeSnapshotData object with the snapshot id information returned from step 2. // 4. Bind the VolumeSnapshot and VolumeSnapshotData object // 5. Query the snapshot status through cloud provider and update the status until snapshot is ready or fails. func (vs *volumeSnapshotter) createSnapshot(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) error
{ var snapshotDataSource *crdv1.VolumeSnapshotDataSource var snapStatus *[]crdv1.VolumeSnapshotCondition var err error var tags *map[string]string glog.Infof("createSnapshot: Creating snapshot %s through the plugin ...", uniqueSnapshotName) pv, err := vs.getPVFromVolumeSnapshot(uniqueSnapshotName, snapshot) if err != nil { return err } glog.Infof("createSnapshot: Creating metadata for snapshot %s.", uniqueSnapshotName) tags, err = vs.updateVolumeSnapshotMetadata(snapshot, pv.Name) if err != nil { return fmt.Errorf("Failed to update metadata for volume snapshot %s: %q", uniqueSnapshotName, err) } snapshotDataSource, snapStatus, err = vs.takeSnapshot(snapshot, pv, tags) if err != nil || snapshotDataSource == nil { return fmt.Errorf("Failed to take snapshot of the volume %s: %q", pv.Name, err) } glog.Infof("createSnapshot: create VolumeSnapshotData object for VolumeSnapshot %s.", uniqueSnapshotName) snapshotDataObj, err := vs.createVolumeSnapshotData(uniqueSnapshotName, pv.Name, snapshotDataSource, snapStatus) if err != nil { return err } glog.Infof("createSnapshot: Update VolumeSnapshot status and bind VolumeSnapshotData to VolumeSnapshot %s.", uniqueSnapshotName) snapshotObj, err := vs.bindandUpdateVolumeSnapshot(snapshot, snapshotDataObj.Metadata.Name, snapStatus) if err != nil { glog.Errorf("createSnapshot: Error updating volume snapshot %s: %v", uniqueSnapshotName, err) return fmt.Errorf("Failed to update VolumeSnapshot for snapshot %s", uniqueSnapshotName) } // Waiting for snapshot to be ready err = vs.waitForSnapshot(uniqueSnapshotName, snapshotObj, snapshotDataObj) if err != nil { return fmt.Errorf("Failed to create snapshot %s with error %v", uniqueSnapshotName, err) } glog.Infof("createSnapshot: Snapshot %s created successfully.", uniqueSnapshotName) return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L700-L744
go
train
// Update VolumeSnapshot object with current timestamp and associated PersistentVolume name in object's metadata
func (vs *volumeSnapshotter) updateVolumeSnapshotMetadata(snapshot *crdv1.VolumeSnapshot, pvName string) (*map[string]string, error)
// Update VolumeSnapshot object with current timestamp and associated PersistentVolume name in object's metadata func (vs *volumeSnapshotter) updateVolumeSnapshotMetadata(snapshot *crdv1.VolumeSnapshot, pvName string) (*map[string]string, error)
{ glog.Infof("In updateVolumeSnapshotMetadata") var snapshotObj crdv1.VolumeSnapshot // Need to get a fresh copy of the VolumeSnapshot from the API server err := vs.restClient.Get(). Name(snapshot.Metadata.Name). Resource(crdv1.VolumeSnapshotResourcePlural). Namespace(snapshot.Metadata.Namespace). Do().Into(&snapshotObj) if err != nil { return nil, fmt.Errorf("Error retrieving VolumeSnapshot %s from API server: %v", snapshot.Metadata.Name, err) } // Copy the snapshot object before updating it snapshotCopy := snapshotObj.DeepCopy() if snapshotCopy.Metadata.Labels == nil { snapshotCopy.Metadata.Labels = make(map[string]string) } snapshotCopy.Metadata.Labels[snapshotMetadataTimeStamp] = fmt.Sprintf("%d", time.Now().UnixNano()) snapshotCopy.Metadata.Labels[snapshotMetadataPVName] = pvName glog.Infof("updateVolumeSnapshotMetadata: Metadata UID: %s Metadata Name: %s Metadata Namespace: %s Setting tags in Metadata Labels: %#v.", snapshotCopy.Metadata.UID, snapshotCopy.Metadata.Name, snapshotCopy.Metadata.Namespace, snapshotCopy.Metadata.Labels) // TODO: Use Patch instead of Put to update the object? var result crdv1.VolumeSnapshot err = vs.restClient.Put(). Name(snapshot.Metadata.Name). Resource(crdv1.VolumeSnapshotResourcePlural). Namespace(snapshot.Metadata.Namespace). Body(snapshotCopy). Do().Into(&result) if err != nil { return nil, fmt.Errorf("Error updating snapshot object %s/%s on the API server: %v", snapshot.Metadata.Namespace, snapshot.Metadata.Name, err) } cloudTags := make(map[string]string) cloudTags[CloudSnapshotCreatedForVolumeSnapshotNamespaceTag] = result.Metadata.Namespace cloudTags[CloudSnapshotCreatedForVolumeSnapshotNameTag] = result.Metadata.Name cloudTags[CloudSnapshotCreatedForVolumeSnapshotUIDTag] = fmt.Sprintf("%v", result.Metadata.UID) cloudTags[CloudSnapshotCreatedForVolumeSnapshotTimestampTag] = result.Metadata.Labels[snapshotMetadataTimeStamp] glog.Infof("updateVolumeSnapshotMetadata: returning cloudTags [%#v]", cloudTags) return &cloudTags, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L747-L800
go
train
// Propagates the VolumeSnapshot condition to VolumeSnapshotData
func (vs *volumeSnapshotter) propagateVolumeSnapshotCondition(snapshotDataName string, condition *crdv1.VolumeSnapshotCondition) error
// Propagates the VolumeSnapshot condition to VolumeSnapshotData func (vs *volumeSnapshotter) propagateVolumeSnapshotCondition(snapshotDataName string, condition *crdv1.VolumeSnapshotCondition) error
{ var snapshotDataObj crdv1.VolumeSnapshotData err := vs.restClient.Get(). Name(snapshotDataName). Resource(crdv1.VolumeSnapshotDataResourcePlural). Do().Into(&snapshotDataObj) if err != nil { return err } newCondition := &crdv1.VolumeSnapshotDataCondition{ Type: (crdv1.VolumeSnapshotDataConditionType)(condition.Type), Status: condition.Status, Message: condition.Message, LastTransitionTime: condition.LastTransitionTime, } oldStatus := snapshotDataObj.Status.DeepCopy() status := snapshotDataObj.Status isEqual := false if oldStatus.Conditions == nil || len(oldStatus.Conditions) == 0 || newCondition.Type != oldStatus.Conditions[len(oldStatus.Conditions)-1].Type { status.Conditions = append(status.Conditions, *newCondition) } else { oldCondition := oldStatus.Conditions[len(oldStatus.Conditions)-1] if newCondition.Status == oldCondition.Status { newCondition.LastTransitionTime = oldCondition.LastTransitionTime } status.Conditions[len(status.Conditions)-1] = *newCondition isEqual = newCondition.Type == oldCondition.Type && newCondition.Status == oldCondition.Status && newCondition.Reason == oldCondition.Reason && newCondition.Message == oldCondition.Message && newCondition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) } if !isEqual { var newSnapshotDataObj crdv1.VolumeSnapshotData snapshotDataObj.Status = status if snapshotDataObj.Status.CreationTimestamp.IsZero() && newCondition.Type == crdv1.VolumeSnapshotDataConditionReady { snapshotDataObj.Status.CreationTimestamp = newCondition.LastTransitionTime } err = vs.restClient.Put(). Name(snapshotDataName). Resource(crdv1.VolumeSnapshotDataResourcePlural). Body(&snapshotDataObj). Do().Into(&newSnapshotDataObj) if err != nil { return err } glog.Infof("VolumeSnapshot status propagated to VolumeSnapshotData") return nil } return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L803-L854
go
train
// Update VolumeSnapshot status if the condition is changed.
func (vs *volumeSnapshotter) UpdateVolumeSnapshotStatus(snapshot *crdv1.VolumeSnapshot, condition *crdv1.VolumeSnapshotCondition) (*crdv1.VolumeSnapshot, error)
// Update VolumeSnapshot status if the condition is changed. func (vs *volumeSnapshotter) UpdateVolumeSnapshotStatus(snapshot *crdv1.VolumeSnapshot, condition *crdv1.VolumeSnapshotCondition) (*crdv1.VolumeSnapshot, error)
{ var snapshotObj crdv1.VolumeSnapshot err := vs.restClient.Get(). Name(snapshot.Metadata.Name). Resource(crdv1.VolumeSnapshotResourcePlural). Namespace(snapshot.Metadata.Namespace). Do().Into(&snapshotObj) if err != nil { return nil, err } oldStatus := snapshotObj.Status.DeepCopy() status := snapshotObj.Status isEqual := false if oldStatus.Conditions == nil || len(oldStatus.Conditions) == 0 || condition.Type != oldStatus.Conditions[len(oldStatus.Conditions)-1].Type { status.Conditions = append(status.Conditions, *condition) } else { oldCondition := oldStatus.Conditions[len(oldStatus.Conditions)-1] if condition.Status == oldCondition.Status { condition.LastTransitionTime = oldCondition.LastTransitionTime } status.Conditions[len(status.Conditions)-1] = *condition isEqual = condition.Type == oldCondition.Type && condition.Status == oldCondition.Status && condition.Reason == oldCondition.Reason && condition.Message == oldCondition.Message && condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) } if !isEqual { var newSnapshotObj crdv1.VolumeSnapshot snapshotObj.Status = status err = vs.restClient.Put(). Name(snapshot.Metadata.Name). Resource(crdv1.VolumeSnapshotResourcePlural). Namespace(snapshot.Metadata.Namespace). Body(&snapshotObj). Do().Into(&newSnapshotObj) if err != nil { return nil, err } glog.Infof("UpdateVolumeSnapshotStatus finishes %+v", newSnapshotObj) err = vs.propagateVolumeSnapshotCondition(snapshotObj.Spec.SnapshotDataName, &snapshotObj.Status.Conditions[len(snapshotObj.Status.Conditions)-1]) if err != nil { return nil, err } return &newSnapshotObj, nil } return snapshot, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/snapshotter/snapshotter.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/snapshotter/snapshotter.go#L857-L891
go
train
// Bind the VolumeSnapshot and VolumeSnapshotData and udpate the status
func (vs *volumeSnapshotter) bindandUpdateVolumeSnapshot(snapshot *crdv1.VolumeSnapshot, snapshotDataName string, status *[]crdv1.VolumeSnapshotCondition) (*crdv1.VolumeSnapshot, error)
// Bind the VolumeSnapshot and VolumeSnapshotData and udpate the status func (vs *volumeSnapshotter) bindandUpdateVolumeSnapshot(snapshot *crdv1.VolumeSnapshot, snapshotDataName string, status *[]crdv1.VolumeSnapshotCondition) (*crdv1.VolumeSnapshot, error)
{ var snapshotObj crdv1.VolumeSnapshot glog.Infof("In bindVolumeSnapshotDataToVolumeSnapshot") // Get a fresh copy of the VolumeSnapshot from the API server glog.Infof("bindVolumeSnapshotDataToVolumeSnapshot: Namespace %s Name %s", snapshot.Metadata.Namespace, snapshot.Metadata.Name) err := vs.restClient.Get(). Name(snapshot.Metadata.Name). Resource(crdv1.VolumeSnapshotResourcePlural). Namespace(snapshot.Metadata.Namespace). Do().Into(&snapshotObj) uniqueSnapshotName := cache.MakeSnapshotName(snapshot) // TODO: Is copy needed here? snapshotCopy := snapshotObj.DeepCopy() snapshotCopy.Spec.SnapshotDataName = snapshotDataName if status != nil { snapshotCopy.Status.Conditions = *status } glog.Infof("bindVolumeSnapshotDataToVolumeSnapshot: Updating VolumeSnapshot object [%#v]", snapshotCopy) // TODO: Make diff of the two objects and then use restClient.Patch to update it var result crdv1.VolumeSnapshot err = vs.restClient.Put(). Name(snapshot.Metadata.Name). Resource(crdv1.VolumeSnapshotResourcePlural). Namespace(snapshot.Metadata.Namespace). Body(snapshotCopy). Do().Into(&result) if err != nil { return nil, fmt.Errorf("Error updating snapshot object %s on the API server: %v", uniqueSnapshotName, err) } return &result, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
nfs-client/cmd/nfs-client-provisioner/provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/nfs-client/cmd/nfs-client-provisioner/provisioner.go#L133-L146
go
train
// getClassForVolume returns StorageClass
func (p *nfsProvisioner) getClassForVolume(pv *v1.PersistentVolume) (*storage.StorageClass, error)
// getClassForVolume returns StorageClass func (p *nfsProvisioner) getClassForVolume(pv *v1.PersistentVolume) (*storage.StorageClass, error)
{ if p.client == nil { return nil, fmt.Errorf("Cannot get kube client") } className := helper.GetPersistentVolumeClass(pv) if className == "" { return nil, fmt.Errorf("Volume has no storage class") } class, err := p.client.StorageV1().StorageClasses().Get(className, metav1.GetOptions{}) if err != nil { return nil, err } return class, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/populator/desired_state_of_world_populator.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/populator/desired_state_of_world_populator.go#L42-L53
go
train
// NewDesiredStateOfWorldPopulator returns a new instance of DesiredStateOfWorldPopulator. // loopSleepDuration - the amount of time the populator loop sleeps between // successive executions // desiredStateOfWorld - the cache to populate
func NewDesiredStateOfWorldPopulator( loopSleepDuration time.Duration, listSnapshotsRetryDuration time.Duration, snapshotStore k8scache.Store, desiredStateOfWorld cache.DesiredStateOfWorld) DesiredStateOfWorldPopulator
// NewDesiredStateOfWorldPopulator returns a new instance of DesiredStateOfWorldPopulator. // loopSleepDuration - the amount of time the populator loop sleeps between // successive executions // desiredStateOfWorld - the cache to populate func NewDesiredStateOfWorldPopulator( loopSleepDuration time.Duration, listSnapshotsRetryDuration time.Duration, snapshotStore k8scache.Store, desiredStateOfWorld cache.DesiredStateOfWorld) DesiredStateOfWorldPopulator
{ return &desiredStateOfWorldPopulator{ loopSleepDuration: loopSleepDuration, listSnapshotsRetryDuration: listSnapshotsRetryDuration, desiredStateOfWorld: desiredStateOfWorld, snapshotStore: snapshotStore, } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/populator/desired_state_of_world_populator.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/populator/desired_state_of_world_populator.go#L87-L99
go
train
// Iterate through all pods in desired state of world, and remove if they no // longer exist in the informer
func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedSnapshots()
// Iterate through all pods in desired state of world, and remove if they no // longer exist in the informer func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedSnapshots()
{ for snapshotUID, snapshot := range dswp.desiredStateOfWorld.GetSnapshots() { _, exists, err := dswp.snapshotStore.Get(snapshot) if err != nil { glog.Errorf("get snapshot %s failed: %v", snapshotUID, err) continue } if !exists { glog.V(1).Infof("Removing snapshot %s from dsw because it does not exist in snapshot informer.", snapshotUID) dswp.desiredStateOfWorld.DeleteSnapshot(cache.MakeSnapshotName(snapshot)) } } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
flex/pkg/volume/driver-call.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/flex/pkg/volume/driver-call.go#L57-L59
go
train
// NewDriverCall initialize the DriverCall
func (plugin *flexProvisioner) NewDriverCall(execPath, command string) *DriverCall
// NewDriverCall initialize the DriverCall func (plugin *flexProvisioner) NewDriverCall(execPath, command string) *DriverCall
{ return plugin.NewDriverCallWithTimeout(execPath, command, 0) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
flex/pkg/volume/driver-call.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/flex/pkg/volume/driver-call.go#L62-L70
go
train
//NewDriverCallWithTimeout return the DriverCall with timeout
func (plugin *flexProvisioner) NewDriverCallWithTimeout(execPath, command string, timeout time.Duration) *DriverCall
//NewDriverCallWithTimeout return the DriverCall with timeout func (plugin *flexProvisioner) NewDriverCallWithTimeout(execPath, command string, timeout time.Duration) *DriverCall
{ return &DriverCall{ Execpath: execPath, Command: command, Timeout: timeout, plugin: plugin, args: []string{command}, } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
flex/pkg/volume/driver-call.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/flex/pkg/volume/driver-call.go#L78-L91
go
train
//AppendSpec add all option parameters to DriverCall
func (dc *DriverCall) AppendSpec(volumeOptions, extraOptions map[string]string) error
//AppendSpec add all option parameters to DriverCall func (dc *DriverCall) AppendSpec(volumeOptions, extraOptions map[string]string) error
{ optionsForDriver, err := NewOptionsForDriver(volumeOptions, extraOptions) if err != nil { return err } jsonBytes, err := json.Marshal(optionsForDriver) if err != nil { return fmt.Errorf("Failed to marshal spec, error: %s", err.Error()) } dc.Append(string(jsonBytes)) return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
flex/pkg/volume/driver-call.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/flex/pkg/volume/driver-call.go#L94-L127
go
train
//Run the command with option parameters
func (dc *DriverCall) Run() (*DriverStatus, error)
//Run the command with option parameters func (dc *DriverCall) Run() (*DriverStatus, error)
{ cmd := dc.plugin.runner.Command(dc.Execpath, dc.args...) timeout := false if dc.Timeout > 0 { timer := time.AfterFunc(dc.Timeout, func() { timeout = true cmd.Stop() }) defer timer.Stop() } output, execErr := cmd.CombinedOutput() if execErr != nil { if timeout { return nil, ErrorTimeout } _, err := handleCmdResponse(dc.Command, output) if err == nil { klog.Errorf("FlexVolume: driver bug: %s: exec error (%s) but no error in response.", dc.Execpath, execErr) return nil, execErr } klog.Warningf("FlexVolume: driver call failed: executable: %s, args: %s, error: %s, output: %q", dc.Execpath, dc.args, execErr.Error(), output) return nil, err } status, err := handleCmdResponse(dc.Command, output) if err != nil { return nil, err } return status, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
flex/pkg/volume/driver-call.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/flex/pkg/volume/driver-call.go#L133-L145
go
train
// NewOptionsForDriver assemble all option parameters
func NewOptionsForDriver(volumeOptions, extraOptions map[string]string) (OptionsForDriver, error)
// NewOptionsForDriver assemble all option parameters func NewOptionsForDriver(volumeOptions, extraOptions map[string]string) (OptionsForDriver, error)
{ options := map[string]string{} for key, value := range extraOptions { options[key] = value } for key, value := range volumeOptions { options[key] = value } return OptionsForDriver(options), nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/regions.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/regions.go#L61-L79
go
train
// RecognizeRegion is called for each AWS region we know about. // It currently registers a credential provider for that region. // There are two paths to discovering a region: // * we hard-code some well-known regions // * if a region is discovered from instance metadata, we add that
func RecognizeRegion(region string)
// RecognizeRegion is called for each AWS region we know about. // It currently registers a credential provider for that region. // There are two paths to discovering a region: // * we hard-code some well-known regions // * if a region is discovered from instance metadata, we add that func RecognizeRegion(region string)
{ awsRegionsMutex.Lock() defer awsRegionsMutex.Unlock() if awsRegions == nil { awsRegions = sets.NewString() } if awsRegions.Has(region) { glog.V(6).Infof("found AWS region %q again - ignoring", region) return } glog.V(4).Infof("found AWS region %q", region) awscredentialprovider.RegisterCredentialsProvider(region) awsRegions.Insert(region) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/cloudprovider/providers/aws/device_allocator.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/cloudprovider/providers/aws/device_allocator.go#L53-L65
go
train
// NewDeviceAllocator allocates device names according to scheme ba..bz, ca..cz // it moves along the ring and always picks next device until // device list is exhausted.
func NewDeviceAllocator(lastIndex int) DeviceAllocator
// NewDeviceAllocator allocates device names according to scheme ba..bz, ca..cz // it moves along the ring and always picks next device until // device list is exhausted. func NewDeviceAllocator(lastIndex int) DeviceAllocator
{ possibleDevices := []mountDevice{} for _, firstChar := range []rune{'b', 'c'} { for i := 'a'; i <= 'z'; i++ { dev := mountDevice([]rune{firstChar, i}) possibleDevices = append(possibleDevices, dev) } } return &deviceAllocator{ possibleDevices: possibleDevices, lastIndex: lastIndex, } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/cmd/snapshot-pv-provisioner/snapshot-pv-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/cmd/snapshot-pv-provisioner/snapshot-pv-provisioner.go#L96-L163
go
train
// Provision creates a storage asset and returns a PV object representing it.
func (p *snapshotProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error)
// Provision creates a storage asset and returns a PV object representing it. func (p *snapshotProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error)
{ if options.PVC.Spec.Selector != nil { return nil, fmt.Errorf("claim Selector is not supported") } snapshotName, ok := options.PVC.Annotations[crdclient.SnapshotPVCAnnotation] if !ok { return nil, fmt.Errorf("snapshot annotation not found on PV") } var snapshot crdv1.VolumeSnapshot err := p.crdclient.Get(). Resource(crdv1.VolumeSnapshotResourcePlural). Namespace(options.PVC.Namespace). Name(snapshotName). Do().Into(&snapshot) if err != nil { return nil, fmt.Errorf("failed to retrieve VolumeSnapshot %s: %v", snapshotName, err) } // FIXME: should also check if any VolumeSnapshotData points to this VolumeSnapshot if len(snapshot.Spec.SnapshotDataName) == 0 { return nil, fmt.Errorf("VolumeSnapshot %s is not bound to any VolumeSnapshotData", snapshotName) } var snapshotData crdv1.VolumeSnapshotData err = p.crdclient.Get(). Resource(crdv1.VolumeSnapshotDataResourcePlural). Name(snapshot.Spec.SnapshotDataName). Do().Into(&snapshotData) if err != nil { return nil, fmt.Errorf("failed to retrieve VolumeSnapshotData %s: %v", snapshot.Spec.SnapshotDataName, err) } glog.V(3).Infof("restore from VolumeSnapshotData %s", snapshot.Spec.SnapshotDataName) pvSrc, labels, err := p.snapshotRestore(snapshot.Spec.SnapshotDataName, snapshotData, options) if err != nil || pvSrc == nil { return nil, fmt.Errorf("failed to create a PV from snapshot %s: %v", snapshotName, err) } pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: options.PVName, Annotations: map[string]string{ provisionerIDAnn: p.identity, }, }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy, AccessModes: options.PVC.Spec.AccessModes, Capacity: v1.ResourceList{ v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], }, PersistentVolumeSource: *pvSrc, }, } if len(labels) != 0 { if pv.Labels == nil { pv.Labels = make(map[string]string) } for k, v := range labels { pv.Labels[k] = v } } glog.Infof("successfully created Snapshot share %#v", pv) return pv, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/cmd/snapshot-pv-provisioner/snapshot-pv-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/cmd/snapshot-pv-provisioner/snapshot-pv-provisioner.go#L167-L187
go
train
// Delete removes the storage asset that was created by Provision represented // by the given PV.
func (p *snapshotProvisioner) Delete(volume *v1.PersistentVolume) error
// Delete removes the storage asset that was created by Provision represented // by the given PV. func (p *snapshotProvisioner) Delete(volume *v1.PersistentVolume) error
{ ann, ok := volume.Annotations[provisionerIDAnn] if !ok { return errors.New("identity annotation not found on PV") } if ann != p.identity { return &controller.IgnoredError{Reason: "identity annotation on PV does not match ours"} } volumeType := crdv1.GetSupportedVolumeFromPVSpec(&volume.Spec) if len(volumeType) == 0 { return fmt.Errorf("unsupported volume type found in PV %#v", *volume) } plugin, ok := volumePlugins[volumeType] if !ok { return fmt.Errorf("%s is not supported volume for %#v", volumeType, *volume) } // delete PV return plugin.VolumeDelete(volume) }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/jobcontroller.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/jobcontroller.go#L76-L125
go
train
// NewJobController instantiates a new job controller.
func NewJobController(labelmap map[string]string, config *common.RuntimeConfig) (JobController, error)
// NewJobController instantiates a new job controller. func NewJobController(labelmap map[string]string, config *common.RuntimeConfig) (JobController, error)
{ namespace := config.Namespace queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) labelset := labels.Set(labelmap) optionsModifier := func(options *meta_v1.ListOptions) { options.LabelSelector = labels.SelectorFromSet(labelset).String() } informer := config.InformerFactory.InformerFor(&batch_v1.Job{}, func(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( cache.NewFilteredListWatchFromClient(client.BatchV1().RESTClient(), "jobs", namespace, optionsModifier), &batch_v1.Job{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) }) informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) if err == nil { queue.Add(key) } return }, UpdateFunc: func(oldObj, newObj interface{}) { key, err := cache.MetaNamespaceKeyFunc(newObj) if err == nil { glog.Infof("Got update notification for %s", key) queue.Add(key) } return }, DeleteFunc: func(obj interface{}) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err == nil { glog.Infof("Got delete notification for %s", key) queue.Add(key) } }, }) return &jobController{ RuntimeConfig: config, namespace: namespace, queue: queue, jobLister: batchlisters.NewJobLister(informer.GetIndexer()), }, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/jobcontroller.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/jobcontroller.go#L146-L170
go
train
// processNextItem serially handles the events provided by the informer.
func (c *jobController) processNextItem() bool
// processNextItem serially handles the events provided by the informer. func (c *jobController) processNextItem() bool
{ key, quit := c.queue.Get() if quit { return false } defer c.queue.Done(key) err := c.processItem(key.(string)) if err == nil { // No error, tell the queue to stop tracking history c.queue.Forget(key) } else if c.queue.NumRequeues(key) < maxRetries { glog.Errorf("Error processing %s (will retry): %v", key, err) // requeue the item to work on later c.queue.AddRateLimited(key) } else { // err != nil and too many retries glog.Errorf("Error processing %s (giving up): %v", key, err) c.queue.Forget(key) utilruntime.HandleError(err) } return true }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/jobcontroller.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/jobcontroller.go#L205-L219
go
train
// IsCleaningJobRunning returns true if a cleaning job is running for the specified PV.
func (c *jobController) IsCleaningJobRunning(pvName string) bool
// IsCleaningJobRunning returns true if a cleaning job is running for the specified PV. func (c *jobController) IsCleaningJobRunning(pvName string) bool
{ jobName := generateCleaningJobName(pvName) job, err := c.jobLister.Jobs(c.namespace).Get(jobName) if errors.IsNotFound(err) { return false } if err != nil { glog.Warningf("Failed to check whether job %s is running (%s). Assuming its still running.", jobName, err) return true } return job.Status.Succeeded <= 0 }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/jobcontroller.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/jobcontroller.go#L222-L253
go
train
// RemoveJob returns true and deletes the job if the cleaning job has completed.
func (c *jobController) RemoveJob(pvName string) (CleanupState, *time.Time, error)
// RemoveJob returns true and deletes the job if the cleaning job has completed. func (c *jobController) RemoveJob(pvName string) (CleanupState, *time.Time, error)
{ jobName := generateCleaningJobName(pvName) job, err := c.jobLister.Jobs(c.namespace).Get(jobName) if err != nil { if errors.IsNotFound(err) { return CSNotFound, nil, nil } return CSUnknown, nil, fmt.Errorf("Failed to check whether job %s has succeeded. Error - %s", jobName, err.Error()) } var startTime *time.Time if startTimeStr, ok := job.Annotations[StartTimeAnnotation]; ok { parsedStartTime, err := time.Parse(time.RFC3339Nano, startTimeStr) if err == nil { startTime = &parsedStartTime } else { glog.Errorf("Failed to parse start time %s: %v", startTimeStr, err) } } if job.Status.Succeeded == 0 { // Jobs has not yet succeeded. We assume failed jobs to be still running, until addressed by admin. return CSUnknown, nil, fmt.Errorf("Error deleting Job %q: Cannot remove job that has not succeeded", job.Name) } if err := c.RuntimeConfig.APIUtil.DeleteJob(job.Name, c.namespace); err != nil { return CSUnknown, nil, fmt.Errorf("Error deleting Job %q: %s", job.Name, err.Error()) } return CSSucceeded, startTime, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/jobcontroller.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/jobcontroller.go#L256-L325
go
train
// NewCleanupJob creates manifest for a cleaning job.
func NewCleanupJob(pv *apiv1.PersistentVolume, volMode apiv1.PersistentVolumeMode, imageName string, nodeName string, namespace string, mountPath string, config common.MountConfig) (*batch_v1.Job, error)
// NewCleanupJob creates manifest for a cleaning job. func NewCleanupJob(pv *apiv1.PersistentVolume, volMode apiv1.PersistentVolumeMode, imageName string, nodeName string, namespace string, mountPath string, config common.MountConfig) (*batch_v1.Job, error)
{ priv := true // Container definition jobContainer := apiv1.Container{ Name: JobContainerName, Image: imageName, SecurityContext: &apiv1.SecurityContext{ Privileged: &priv, }, } if volMode == apiv1.PersistentVolumeBlock { jobContainer.Command = config.BlockCleanerCommand jobContainer.Env = []apiv1.EnvVar{{Name: common.LocalPVEnv, Value: mountPath}} } else if volMode == apiv1.PersistentVolumeFilesystem { // We only have one way to clean filesystem, so no need to customize // filesystem cleaner command. jobContainer.Command = []string{"/scripts/fsclean.sh"} jobContainer.Env = []apiv1.EnvVar{{Name: common.LocalFilesystemEnv, Value: mountPath}} } else { return nil, fmt.Errorf("unknown PersistentVolume mode: %v", volMode) } mountName := common.GenerateMountName(&config) volumes := []apiv1.Volume{ { Name: mountName, VolumeSource: apiv1.VolumeSource{ HostPath: &apiv1.HostPathVolumeSource{ Path: config.HostDir, }, }, }, } jobContainer.VolumeMounts = []apiv1.VolumeMount{{ Name: mountName, MountPath: config.MountDir}, } // Make job query-able by some useful labels for admins. labels := map[string]string{ common.NodeNameLabel: nodeName, PVLabel: pv.Name, PVUuidLabel: string(pv.UID), } // Annotate job with useful information that cannot be set as labels due to label name restrictions. annotations := map[string]string{ DeviceAnnotation: mountPath, StartTimeAnnotation: time.Now().Format(time.RFC3339Nano), } podTemplate := apiv1.Pod{} podTemplate.Spec = apiv1.PodSpec{ Containers: []apiv1.Container{jobContainer}, Volumes: volumes, NodeSelector: map[string]string{common.NodeNameLabel: nodeName}, } podTemplate.ObjectMeta = meta_v1.ObjectMeta{ Name: generateCleaningJobName(pv.Name), Namespace: namespace, Labels: labels, Annotations: annotations, } job := &batch_v1.Job{} job.ObjectMeta = podTemplate.ObjectMeta job.Spec.Template.Spec = podTemplate.Spec job.Spec.Template.Spec.RestartPolicy = apiv1.RestartPolicyOnFailure return job, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/jobcontroller.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/jobcontroller.go#L361-L365
go
train
// IsCleaningJobRunning mocks the interface method.
func (c *FakeJobController) IsCleaningJobRunning(pvName string) bool
// IsCleaningJobRunning mocks the interface method. func (c *FakeJobController) IsCleaningJobRunning(pvName string) bool
{ c.IsRunningCount++ _, exists := c.pvCleanupRunning[pvName] return exists }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
local-volume/provisioner/pkg/deleter/jobcontroller.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/local-volume/provisioner/pkg/deleter/jobcontroller.go#L368-L378
go
train
// RemoveJob mocks the interface method.
func (c *FakeJobController) RemoveJob(pvName string) (CleanupState, *time.Time, error)
// RemoveJob mocks the interface method. func (c *FakeJobController) RemoveJob(pvName string) (CleanupState, *time.Time, error)
{ c.RemoveCompletedCount++ status, exists := c.pvCleanupRunning[pvName] if !exists { return CSNotFound, nil, nil } if status != CSSucceeded { return CSUnknown, nil, fmt.Errorf("cannot remove job that has not yet completed %s status %d", pvName, status) } return CSSucceeded, nil, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
openebs/openebs-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/openebs/openebs-provisioner.go#L53-L77
go
train
// NewOpenEBSProvisioner creates a new openebs provisioner
func NewOpenEBSProvisioner(client kubernetes.Interface) controller.Provisioner
// NewOpenEBSProvisioner creates a new openebs provisioner func NewOpenEBSProvisioner(client kubernetes.Interface) controller.Provisioner
{ nodeName := os.Getenv("NODE_NAME") if nodeName == "" { glog.Errorf("ENV variable 'NODE_NAME' is not set") } var openebsObj mApiv1.OpenEBSVolume //Get maya-apiserver IP address from cluster addr, err := openebsObj.GetMayaClusterIP(client) if err != nil { glog.Errorf("Error getting maya-apiserver IP Address: %v", err) return nil } mayaServiceURI := "http://" + addr + ":5656" //Set maya-apiserver IP address along with default port os.Setenv("MAPI_ADDR", mayaServiceURI) return &openEBSProvisioner{ mapiURI: mayaServiceURI, identity: nodeName, } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
openebs/openebs-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/openebs/openebs-provisioner.go#L82-L152
go
train
// Provision creates a storage asset and returns a PV object representing it.
func (p *openEBSProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error)
// Provision creates a storage asset and returns a PV object representing it. func (p *openEBSProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error)
{ //Issue a request to Maya API Server to create a volume var volume mayav1.Volume var openebsVol mApiv1.OpenEBSVolume volumeSpec := mayav1.VolumeSpec{} volSize := options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] volumeSpec.Metadata.Labels.Storage = volSize.String() volumeSpec.Metadata.Labels.StorageClass = *options.PVC.Spec.StorageClassName volumeSpec.Metadata.Labels.Namespace = options.PVC.Namespace volumeSpec.Metadata.Name = options.PVName _, err := openebsVol.CreateVolume(volumeSpec) if err != nil { glog.Errorf("Error creating volume: %v", err) return nil, err } err = openebsVol.ListVolume(options.PVName, &volume) if err != nil { glog.Errorf("Error getting volume details: %v", err) return nil, err } var iqn, targetPortal string for key, value := range volume.Metadata.Annotations.(map[string]interface{}) { switch key { case "vsm.openebs.io/iqn": iqn = value.(string) case "vsm.openebs.io/targetportals": targetPortal = value.(string) } } glog.V(2).Infof("Volume IQN: %v , Volume Target: %v", iqn, targetPortal) if !util.AccessModesContainedInAll(p.GetAccessModes(), options.PVC.Spec.AccessModes) { glog.V(1).Infof("Invalid Access Modes: %v, Supported Access Modes: %v", options.PVC.Spec.AccessModes, p.GetAccessModes()) return nil, fmt.Errorf("Invalid Access Modes: %v, Supported Access Modes: %v", options.PVC.Spec.AccessModes, p.GetAccessModes()) } pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: options.PVName, Annotations: map[string]string{ "openEBSProvisionerIdentity": p.identity, }, }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy, AccessModes: options.PVC.Spec.AccessModes, Capacity: v1.ResourceList{ v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], }, PersistentVolumeSource: v1.PersistentVolumeSource{ ISCSI: &v1.ISCSIPersistentVolumeSource{ TargetPortal: targetPortal, IQN: iqn, Lun: 1, FSType: "ext4", ReadOnly: false, }, }, }, } return pv, nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
openebs/openebs-provisioner.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/openebs/openebs-provisioner.go#L156-L176
go
train
// Delete removes the storage asset that was created by Provision represented // by the given PV.
func (p *openEBSProvisioner) Delete(volume *v1.PersistentVolume) error
// Delete removes the storage asset that was created by Provision represented // by the given PV. func (p *openEBSProvisioner) Delete(volume *v1.PersistentVolume) error
{ var openebsVol mApiv1.OpenEBSVolume ann, ok := volume.Annotations["openEBSProvisionerIdentity"] if !ok { return errors.New("identity annotation not found on PV") } if ann != p.identity { return &controller.IgnoredError{Reason: "identity annotation on PV does not match ours"} } // Issue a delete request to Maya API Server err := openebsVol.DeleteVolume(volume.Name) if err != nil { glog.Errorf("Error while deleting volume: %v", err) return err } return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/cache/actual_state_of_world.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/cache/actual_state_of_world.go#L61-L66
go
train
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
func NewActualStateOfWorld() ActualStateOfWorld
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld. func NewActualStateOfWorld() ActualStateOfWorld
{ m := make(map[string]*crdv1.VolumeSnapshot) return &actualStateOfWorld{ snapshots: m, } }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/cache/actual_state_of_world.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/cache/actual_state_of_world.go#L69-L77
go
train
// Adds a snapshot to the list of snapshots to be created.
func (asw *actualStateOfWorld) AddSnapshot(snapshot *crdv1.VolumeSnapshot) error
// Adds a snapshot to the list of snapshots to be created. func (asw *actualStateOfWorld) AddSnapshot(snapshot *crdv1.VolumeSnapshot) error
{ asw.Lock() defer asw.Unlock() snapshotName := MakeSnapshotName(snapshot) glog.Infof("Adding new snapshot to actual state of world: %s", snapshotName) asw.snapshots[snapshotName] = snapshot return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/cache/actual_state_of_world.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/cache/actual_state_of_world.go#L80-L87
go
train
// Removes the snapshot from the list of existing snapshots.
func (asw *actualStateOfWorld) DeleteSnapshot(snapshotName string) error
// Removes the snapshot from the list of existing snapshots. func (asw *actualStateOfWorld) DeleteSnapshot(snapshotName string) error
{ asw.Lock() defer asw.Unlock() glog.Infof("Deleting snapshot from actual state of world: %s", snapshotName) delete(asw.snapshots, snapshotName) return nil }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/cache/actual_state_of_world.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/cache/actual_state_of_world.go#L90-L101
go
train
// Returns a copy of the list of the snapshots known to the actual state of world.
func (asw *actualStateOfWorld) GetSnapshots() map[string]*crdv1.VolumeSnapshot
// Returns a copy of the list of the snapshots known to the actual state of world. func (asw *actualStateOfWorld) GetSnapshots() map[string]*crdv1.VolumeSnapshot
{ asw.RLock() defer asw.RUnlock() snapshots := make(map[string]*crdv1.VolumeSnapshot) for snapName, snapshot := range asw.snapshots { snapshots[snapName] = snapshot } return snapshots }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/cache/actual_state_of_world.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/cache/actual_state_of_world.go#L104-L110
go
train
// Get snapshot
func (asw *actualStateOfWorld) GetSnapshot(snapshotName string) *crdv1.VolumeSnapshot
// Get snapshot func (asw *actualStateOfWorld) GetSnapshot(snapshotName string) *crdv1.VolumeSnapshot
{ asw.RLock() defer asw.RUnlock() snapshot, _ := asw.snapshots[snapshotName] return snapshot }
kubernetes-incubator/external-storage
fbfedbf60da4e5ee25a3151bfe8504f3e3281319
snapshot/pkg/controller/cache/actual_state_of_world.go
https://github.com/kubernetes-incubator/external-storage/blob/fbfedbf60da4e5ee25a3151bfe8504f3e3281319/snapshot/pkg/controller/cache/actual_state_of_world.go#L113-L119
go
train
// Checks for the existence of the snapshot
func (asw *actualStateOfWorld) SnapshotExists(snapshotName string) bool
// Checks for the existence of the snapshot func (asw *actualStateOfWorld) SnapshotExists(snapshotName string) bool
{ asw.RLock() defer asw.RUnlock() _, snapshotExists := asw.snapshots[snapshotName] return snapshotExists }
asaskevich/govalidator
f61b66f89f4a311bef65f13e575bcf1a2ffadda6
arrays.go
https://github.com/asaskevich/govalidator/blob/f61b66f89f4a311bef65f13e575bcf1a2ffadda6/arrays.go#L13-L17
go
train
// Each iterates over the slice and apply Iterator to every item
func Each(array []interface{}, iterator Iterator)
// Each iterates over the slice and apply Iterator to every item func Each(array []interface{}, iterator Iterator)
{ for index, data := range array { iterator(data, index) } }
asaskevich/govalidator
f61b66f89f4a311bef65f13e575bcf1a2ffadda6
arrays.go
https://github.com/asaskevich/govalidator/blob/f61b66f89f4a311bef65f13e575bcf1a2ffadda6/arrays.go#L20-L26
go
train
// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result.
func Map(array []interface{}, iterator ResultIterator) []interface{}
// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. func Map(array []interface{}, iterator ResultIterator) []interface{}
{ var result = make([]interface{}, len(array)) for index, data := range array { result[index] = iterator(data, index) } return result }
asaskevich/govalidator
f61b66f89f4a311bef65f13e575bcf1a2ffadda6
arrays.go
https://github.com/asaskevich/govalidator/blob/f61b66f89f4a311bef65f13e575bcf1a2ffadda6/arrays.go#L29-L36
go
train
// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise.
func Find(array []interface{}, iterator ConditionIterator) interface{}
// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. func Find(array []interface{}, iterator ConditionIterator) interface{}
{ for index, data := range array { if iterator(data, index) { return data } } return nil }
asaskevich/govalidator
f61b66f89f4a311bef65f13e575bcf1a2ffadda6
arrays.go
https://github.com/asaskevich/govalidator/blob/f61b66f89f4a311bef65f13e575bcf1a2ffadda6/arrays.go#L39-L47
go
train
// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice.
func Filter(array []interface{}, iterator ConditionIterator) []interface{}
// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. func Filter(array []interface{}, iterator ConditionIterator) []interface{}
{ var result = make([]interface{}, 0) for index, data := range array { if iterator(data, index) { result = append(result, data) } } return result }
asaskevich/govalidator
f61b66f89f4a311bef65f13e575bcf1a2ffadda6
arrays.go
https://github.com/asaskevich/govalidator/blob/f61b66f89f4a311bef65f13e575bcf1a2ffadda6/arrays.go#L50-L58
go
train
// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator.
func Count(array []interface{}, iterator ConditionIterator) int
// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. func Count(array []interface{}, iterator ConditionIterator) int
{ count := 0 for index, data := range array { if iterator(data, index) { count = count + 1 } } return count }
asaskevich/govalidator
f61b66f89f4a311bef65f13e575bcf1a2ffadda6
utils.go
https://github.com/asaskevich/govalidator/blob/f61b66f89f4a311bef65f13e575bcf1a2ffadda6/utils.go#L29-L35
go
train
// LeftTrim trim characters from the left-side of the input. // If second argument is empty, it's will be remove leading spaces.
func LeftTrim(str, chars string) string
// LeftTrim trim characters from the left-side of the input. // If second argument is empty, it's will be remove leading spaces. func LeftTrim(str, chars string) string
{ if chars == "" { return strings.TrimLeftFunc(str, unicode.IsSpace) } r, _ := regexp.Compile("^[" + chars + "]+") return r.ReplaceAllString(str, "") }
asaskevich/govalidator
f61b66f89f4a311bef65f13e575bcf1a2ffadda6
utils.go
https://github.com/asaskevich/govalidator/blob/f61b66f89f4a311bef65f13e575bcf1a2ffadda6/utils.go#L39-L45
go
train
// RightTrim trim characters from the right-side of the input. // If second argument is empty, it's will be remove spaces.
func RightTrim(str, chars string) string
// RightTrim trim characters from the right-side of the input. // If second argument is empty, it's will be remove spaces. func RightTrim(str, chars string) string
{ if chars == "" { return strings.TrimRightFunc(str, unicode.IsSpace) } r, _ := regexp.Compile("[" + chars + "]+$") return r.ReplaceAllString(str, "") }
asaskevich/govalidator
f61b66f89f4a311bef65f13e575bcf1a2ffadda6
utils.go
https://github.com/asaskevich/govalidator/blob/f61b66f89f4a311bef65f13e575bcf1a2ffadda6/utils.go#L49-L51
go
train
// Trim trim characters from both sides of the input. // If second argument is empty, it's will be remove spaces.
func Trim(str, chars string) string
// Trim trim characters from both sides of the input. // If second argument is empty, it's will be remove spaces. func Trim(str, chars string) string
{ return LeftTrim(RightTrim(str, chars), chars) }