repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/apimachinery/pkg/fields/doc.go
vendor/k8s.io/apimachinery/pkg/fields/doc.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package fields implements a simple field system, parsing and matching // selectors with sets of fields. package fields // import "k8s.io/apimachinery/pkg/fields"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/apimachinery/pkg/fields/fields.go
vendor/k8s.io/apimachinery/pkg/fields/fields.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fields import ( "sort" "strings" ) // Fields allows you to present fields independently from their storage. type Fields interface { // Has returns whether the provided field exists. Has(field string) (exists bool) // Get returns the value for the provided field. Get(field string) (value string) } // Set is a map of field:value. It implements Fields. type Set map[string]string // String returns all fields listed as a human readable string. // Conveniently, exactly the format that ParseSelector takes. func (ls Set) String() string { selector := make([]string, 0, len(ls)) for key, value := range ls { selector = append(selector, key+"="+value) } // Sort for determinism. sort.StringSlice(selector).Sort() return strings.Join(selector, ",") } // Has returns whether the provided field exists in the map. func (ls Set) Has(field string) bool { _, exists := ls[field] return exists } // Get returns the value in the map for the provided field. func (ls Set) Get(field string) string { return ls[field] } // AsSelector converts fields into a selectors. func (ls Set) AsSelector() Selector { return SelectorFromSet(ls) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/discovery/helper.go
vendor/k8s.io/client-go/discovery/helper.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package discovery import ( "fmt" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" apimachineryversion "k8s.io/apimachinery/pkg/version" ) // IsResourceEnabled queries the server to determine if the resource specified is present on the server. // This is particularly helpful when writing a controller or an e2e test that requires a particular resource to function. func IsResourceEnabled(client DiscoveryInterface, resourceToCheck schema.GroupVersionResource) (bool, error) { // this is a single request. The ServerResourcesForGroupVersion handles the core v1 group as legacy. resourceList, err := client.ServerResourcesForGroupVersion(resourceToCheck.GroupVersion().String()) if apierrors.IsNotFound(err) { // if the discovery endpoint isn't present, then the resource isn't present. return false, nil } if err != nil { return false, err } for _, actualResource := range resourceList.APIResources { if actualResource.Name == resourceToCheck.Resource { return true, nil } } return false, nil } // MatchesServerVersion queries the server to compares the build version // (git hash) of the client with the server's build version. It returns an error // if it failed to contact the server or if the versions are not an exact match. func MatchesServerVersion(clientVersion apimachineryversion.Info, client DiscoveryInterface) error { sVer, err := client.ServerVersion() if err != nil { return fmt.Errorf("couldn't read version from server: %v", err) } // GitVersion includes GitCommit and GitTreeState, but best to be safe? if clientVersion.GitVersion != sVer.GitVersion || clientVersion.GitCommit != sVer.GitCommit || clientVersion.GitTreeState != sVer.GitTreeState { return fmt.Errorf("server version (%#v) differs from client version (%#v)", sVer, clientVersion) } return nil } // ServerSupportsVersion returns an error if the server doesn't have the required version func ServerSupportsVersion(client DiscoveryInterface, requiredGV schema.GroupVersion) error { groups, err := client.ServerGroups() if err != nil { // This is almost always a connection error, and higher level code should treat this as a generic error, // not a negotiation specific error. return err } versions := metav1.ExtractGroupVersions(groups) serverVersions := sets.String{} for _, v := range versions { serverVersions.Insert(v) } if serverVersions.Has(requiredGV.String()) { return nil } // If the server supports no versions, then we should pretend it has the version because of old servers. // This can happen because discovery fails due to 403 Forbidden errors if len(serverVersions) == 0 { return nil } return fmt.Errorf("server does not support API version %q", requiredGV) } // GroupVersionResources converts APIResourceLists to the GroupVersionResources. func GroupVersionResources(rls []*metav1.APIResourceList) (map[schema.GroupVersionResource]struct{}, error) { gvrs := map[schema.GroupVersionResource]struct{}{} for _, rl := range rls { gv, err := schema.ParseGroupVersion(rl.GroupVersion) if err != nil { return nil, err } for i := range rl.APIResources { gvrs[schema.GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: rl.APIResources[i].Name}] = struct{}{} } } return gvrs, nil } // FilteredBy filters by the given predicate. Empty APIResourceLists are dropped. func FilteredBy(pred ResourcePredicate, rls []*metav1.APIResourceList) []*metav1.APIResourceList { result := []*metav1.APIResourceList{} for _, rl := range rls { filtered := *rl filtered.APIResources = nil for i := range rl.APIResources { if pred.Match(rl.GroupVersion, &rl.APIResources[i]) { filtered.APIResources = append(filtered.APIResources, rl.APIResources[i]) } } if filtered.APIResources != nil { result = append(result, &filtered) } } return result } // ResourcePredicate has a method to check if a resource matches a given condition. type ResourcePredicate interface { Match(groupVersion string, r *metav1.APIResource) bool } // ResourcePredicateFunc returns true if it matches a resource based on a custom condition. type ResourcePredicateFunc func(groupVersion string, r *metav1.APIResource) bool // Match is a wrapper around ResourcePredicateFunc. func (fn ResourcePredicateFunc) Match(groupVersion string, r *metav1.APIResource) bool { return fn(groupVersion, r) } // SupportsAllVerbs is a predicate matching a resource iff all given verbs are supported. type SupportsAllVerbs struct { Verbs []string } // Match checks if a resource contains all the given verbs. func (p SupportsAllVerbs) Match(groupVersion string, r *metav1.APIResource) bool { return sets.NewString([]string(r.Verbs)...).HasAll(p.Verbs...) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/discovery/discovery_client.go
vendor/k8s.io/client-go/discovery/discovery_client.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package discovery import ( "context" "encoding/json" goerrors "errors" "fmt" "mime" "net/http" "net/url" "sort" "strings" "sync" "time" //nolint:staticcheck // SA1019 Keep using module since it's still being maintained and the api of google.golang.org/protobuf/proto differs "github.com/golang/protobuf/proto" openapi_v2 "github.com/google/gnostic-models/openapiv2" apidiscoveryv2 "k8s.io/api/apidiscovery/v2" apidiscoveryv2beta1 "k8s.io/api/apidiscovery/v2beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/openapi" restclient "k8s.io/client-go/rest" ) const ( // defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. CustomResourceDefinitions). defaultRetries = 2 // protobuf mime type openAPIV2mimePb = "application/com.github.proto-openapi.spec.v2@v1.0+protobuf" // defaultTimeout is the maximum amount of time per request when no timeout has been set on a RESTClient. // Defaults to 32s in order to have a distinguishable length of time, relative to other timeouts that exist. defaultTimeout = 32 * time.Second // defaultBurst is the default burst to be used with the discovery client's token bucket rate limiter defaultBurst = 300 AcceptV1 = runtime.ContentTypeJSON // Aggregated discovery content-type (v2beta1). NOTE: content-type parameters // MUST be ordered (g, v, as) for server in "Accept" header (BUT we are resilient // to ordering when comparing returned values in "Content-Type" header). AcceptV2Beta1 = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList" AcceptV2 = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2;as=APIGroupDiscoveryList" // Prioritize aggregated discovery by placing first in the order of discovery accept types. acceptDiscoveryFormats = AcceptV2 + "," + AcceptV2Beta1 + "," + AcceptV1 ) // Aggregated discovery content-type GVK. var v2Beta1GVK = schema.GroupVersionKind{Group: "apidiscovery.k8s.io", Version: "v2beta1", Kind: "APIGroupDiscoveryList"} var v2GVK = schema.GroupVersionKind{Group: "apidiscovery.k8s.io", Version: "v2", Kind: "APIGroupDiscoveryList"} // DiscoveryInterface holds the methods that discover server-supported API groups, // versions and resources. type DiscoveryInterface interface { RESTClient() restclient.Interface ServerGroupsInterface ServerResourcesInterface ServerVersionInterface OpenAPISchemaInterface OpenAPIV3SchemaInterface // Returns copy of current discovery client that will only // receive the legacy discovery format, or pointer to current // discovery client if it does not support legacy-only discovery. WithLegacy() DiscoveryInterface } // AggregatedDiscoveryInterface extends DiscoveryInterface to include a method to possibly // return discovery resources along with the discovery groups, which is what the newer // aggregated discovery format does (APIGroupDiscoveryList). type AggregatedDiscoveryInterface interface { DiscoveryInterface GroupsAndMaybeResources() (*metav1.APIGroupList, map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error, error) } // CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness. // Note that If the ServerResourcesForGroupVersion method returns a cache miss // error, the user needs to explicitly call Invalidate to clear the cache, // otherwise the same cache miss error will be returned next time. type CachedDiscoveryInterface interface { DiscoveryInterface // Fresh is supposed to tell the caller whether or not to retry if the cache // fails to find something (false = retry, true = no need to retry). // // TODO: this needs to be revisited, this interface can't be locked properly // and doesn't make a lot of sense. Fresh() bool // Invalidate enforces that no cached data that is older than the current time // is used. Invalidate() } // ServerGroupsInterface has methods for obtaining supported groups on the API server type ServerGroupsInterface interface { // ServerGroups returns the supported groups, with information like supported versions and the // preferred version. ServerGroups() (*metav1.APIGroupList, error) } // ServerResourcesInterface has methods for obtaining supported resources on the API server type ServerResourcesInterface interface { // ServerResourcesForGroupVersion returns the supported resources for a group and version. ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) // ServerGroupsAndResources returns the supported groups and resources for all groups and versions. // // The returned group and resource lists might be non-nil with partial results even in the // case of non-nil error. ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) // ServerPreferredResources returns the supported resources with the version preferred by the // server. // // The returned group and resource lists might be non-nil with partial results even in the // case of non-nil error. ServerPreferredResources() ([]*metav1.APIResourceList, error) // ServerPreferredNamespacedResources returns the supported namespaced resources with the // version preferred by the server. // // The returned resource list might be non-nil with partial results even in the case of // non-nil error. ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) } // ServerVersionInterface has a method for retrieving the server's version. type ServerVersionInterface interface { // ServerVersion retrieves and parses the server's version (git version). ServerVersion() (*version.Info, error) } // OpenAPISchemaInterface has a method to retrieve the open API schema. type OpenAPISchemaInterface interface { // OpenAPISchema retrieves and parses the swagger API schema the server supports. OpenAPISchema() (*openapi_v2.Document, error) } type OpenAPIV3SchemaInterface interface { OpenAPIV3() openapi.Client } // DiscoveryClient implements the functions that discover server-supported API groups, // versions and resources. type DiscoveryClient struct { restClient restclient.Interface LegacyPrefix string // Forces the client to request only "unaggregated" (legacy) discovery. UseLegacyDiscovery bool } var _ AggregatedDiscoveryInterface = &DiscoveryClient{} // Convert metav1.APIVersions to metav1.APIGroup. APIVersions is used by legacy v1, so // group would be "". func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.APIGroup) { groupVersions := []metav1.GroupVersionForDiscovery{} for _, version := range apiVersions.Versions { groupVersion := metav1.GroupVersionForDiscovery{ GroupVersion: version, Version: version, } groupVersions = append(groupVersions, groupVersion) } apiGroup.Versions = groupVersions // There should be only one groupVersion returned at /api apiGroup.PreferredVersion = groupVersions[0] return } // GroupsAndMaybeResources returns the discovery groups, and (if new aggregated // discovery format) the resources keyed by group/version. Merges discovery groups // and resources from /api and /apis (either aggregated or not). Legacy groups // must be ordered first. The server will either return both endpoints (/api, /apis) // as aggregated discovery format or legacy format. For safety, resources will only // be returned if both endpoints returned resources. Returned "failedGVs" can be // empty, but will only be nil in the case an error is returned. func (d *DiscoveryClient) GroupsAndMaybeResources() ( *metav1.APIGroupList, map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error, error) { // Legacy group ordered first (there is only one -- core/v1 group). Returned groups must // be non-nil, but it could be empty. Returned resources, apiResources map could be nil. groups, resources, failedGVs, err := d.downloadLegacy() if err != nil { return nil, nil, nil, err } // Discovery groups and (possibly) resources downloaded from /apis. apiGroups, apiResources, failedApisGVs, aerr := d.downloadAPIs() if aerr != nil { return nil, nil, nil, aerr } // Merge apis groups into the legacy groups. for _, group := range apiGroups.Groups { groups.Groups = append(groups.Groups, group) } // For safety, only return resources if both endpoints returned resources. if resources != nil && apiResources != nil { for gv, resourceList := range apiResources { resources[gv] = resourceList } } else if resources != nil { resources = nil } // Merge failed GroupVersions from /api and /apis for gv, err := range failedApisGVs { failedGVs[gv] = err } return groups, resources, failedGVs, err } // downloadLegacy returns the discovery groups and possibly resources // for the legacy v1 GVR at /api, or an error if one occurred. It is // possible for the resource map to be nil if the server returned // the unaggregated discovery. Returned "failedGVs" can be empty, but // will only be nil in the case of a returned error. func (d *DiscoveryClient) downloadLegacy() ( *metav1.APIGroupList, map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error, error) { accept := acceptDiscoveryFormats if d.UseLegacyDiscovery { accept = AcceptV1 } var responseContentType string body, err := d.restClient.Get(). AbsPath("/api"). SetHeader("Accept", accept). Do(context.TODO()). ContentType(&responseContentType). Raw() apiGroupList := &metav1.APIGroupList{} failedGVs := map[schema.GroupVersion]error{} if err != nil { // Tolerate 404, since aggregated api servers can return it. if errors.IsNotFound(err) { // Return empty structures and no error. emptyGVMap := map[schema.GroupVersion]*metav1.APIResourceList{} return apiGroupList, emptyGVMap, failedGVs, nil } else { return nil, nil, nil, err } } var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList // Based on the content-type server responded with: aggregated or unaggregated. if isGVK, _ := ContentTypeIsGVK(responseContentType, v2GVK); isGVK { var aggregatedDiscovery apidiscoveryv2.APIGroupDiscoveryList err = json.Unmarshal(body, &aggregatedDiscovery) if err != nil { return nil, nil, nil, err } apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) } else if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK { var aggregatedDiscovery apidiscoveryv2beta1.APIGroupDiscoveryList err = json.Unmarshal(body, &aggregatedDiscovery) if err != nil { return nil, nil, nil, err } apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResourcesV2Beta1(aggregatedDiscovery) } else { // Default is unaggregated discovery v1. var v metav1.APIVersions err = json.Unmarshal(body, &v) if err != nil { return nil, nil, nil, err } apiGroup := metav1.APIGroup{} if len(v.Versions) != 0 { apiGroup = apiVersionsToAPIGroup(&v) } apiGroupList.Groups = []metav1.APIGroup{apiGroup} } return apiGroupList, resourcesByGV, failedGVs, nil } // downloadAPIs returns the discovery groups and (if aggregated format) the // discovery resources. The returned groups will always exist, but the // resources map may be nil. Returned "failedGVs" can be empty, but will // only be nil in the case of a returned error. func (d *DiscoveryClient) downloadAPIs() ( *metav1.APIGroupList, map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error, error) { accept := acceptDiscoveryFormats if d.UseLegacyDiscovery { accept = AcceptV1 } var responseContentType string body, err := d.restClient.Get(). AbsPath("/apis"). SetHeader("Accept", accept). Do(context.TODO()). ContentType(&responseContentType). Raw() if err != nil { return nil, nil, nil, err } apiGroupList := &metav1.APIGroupList{} failedGVs := map[schema.GroupVersion]error{} var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList // Based on the content-type server responded with: aggregated or unaggregated. if isGVK, _ := ContentTypeIsGVK(responseContentType, v2GVK); isGVK { var aggregatedDiscovery apidiscoveryv2.APIGroupDiscoveryList err = json.Unmarshal(body, &aggregatedDiscovery) if err != nil { return nil, nil, nil, err } apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) } else if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK { var aggregatedDiscovery apidiscoveryv2beta1.APIGroupDiscoveryList err = json.Unmarshal(body, &aggregatedDiscovery) if err != nil { return nil, nil, nil, err } apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResourcesV2Beta1(aggregatedDiscovery) } else { // Default is unaggregated discovery v1. err = json.Unmarshal(body, apiGroupList) if err != nil { return nil, nil, nil, err } } return apiGroupList, resourcesByGV, failedGVs, nil } // ContentTypeIsGVK checks of the content-type string is both // "application/json" and matches the provided GVK. An error // is returned if the content type string is malformed. // NOTE: This function is resilient to the ordering of the // content-type parameters, as well as parameters added by // intermediaries such as proxies or gateways. Examples: // // ("application/json; g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) // ("application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) // ("application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io;charset=utf-8", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) // ("application/json", any GVK) = (false, nil) // ("application/json; charset=UTF-8", any GVK) = (false, nil) // ("malformed content type string", any GVK) = (false, error) func ContentTypeIsGVK(contentType string, gvk schema.GroupVersionKind) (bool, error) { base, params, err := mime.ParseMediaType(contentType) if err != nil { return false, err } gvkMatch := runtime.ContentTypeJSON == base && params["g"] == gvk.Group && params["v"] == gvk.Version && params["as"] == gvk.Kind return gvkMatch, nil } // ServerGroups returns the supported groups, with information like supported versions and the // preferred version. func (d *DiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { groups, _, _, err := d.GroupsAndMaybeResources() if err != nil { return nil, err } return groups, nil } // ServerResourcesForGroupVersion returns the supported resources for a group and version. func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *metav1.APIResourceList, err error) { url := url.URL{} if len(groupVersion) == 0 { return nil, fmt.Errorf("groupVersion shouldn't be empty") } if len(d.LegacyPrefix) > 0 && groupVersion == "v1" { url.Path = d.LegacyPrefix + "/" + groupVersion } else { url.Path = "/apis/" + groupVersion } resources = &metav1.APIResourceList{ GroupVersion: groupVersion, } err = d.restClient.Get().AbsPath(url.String()).Do(context.TODO()).Into(resources) if err != nil { // Tolerate core/v1 not found response by returning empty resource list; // this probably should not happen. But we should verify all callers are // not depending on this toleration before removal. if groupVersion == "v1" && errors.IsNotFound(err) { return resources, nil } return nil, err } return resources, nil } // ServerGroupsAndResources returns the supported resources for all groups and versions. func (d *DiscoveryClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { return withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { return ServerGroupsAndResources(d) }) } // ErrGroupDiscoveryFailed is returned if one or more API groups fail to load. type ErrGroupDiscoveryFailed struct { // Groups is a list of the groups that failed to load and the error cause Groups map[schema.GroupVersion]error } // Error implements the error interface func (e *ErrGroupDiscoveryFailed) Error() string { var groups []string for k, v := range e.Groups { groups = append(groups, fmt.Sprintf("%s: %v", k, v)) } sort.Strings(groups) return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", ")) } // Is makes it possible for the callers to use `errors.Is(` helper on errors wrapped with ErrGroupDiscoveryFailed error. func (e *ErrGroupDiscoveryFailed) Is(target error) bool { _, ok := target.(*ErrGroupDiscoveryFailed) return ok } // IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover // a complete list of APIs for the client to use. func IsGroupDiscoveryFailedError(err error) bool { _, ok := err.(*ErrGroupDiscoveryFailed) return err != nil && ok } // GroupDiscoveryFailedErrorGroups returns true if the error is an ErrGroupDiscoveryFailed error, // along with the map of group versions that failed discovery. func GroupDiscoveryFailedErrorGroups(err error) (map[schema.GroupVersion]error, bool) { var groupDiscoveryError *ErrGroupDiscoveryFailed if err != nil && goerrors.As(err, &groupDiscoveryError) { return groupDiscoveryError.Groups, true } return nil, false } func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { var sgs *metav1.APIGroupList var resources []*metav1.APIResourceList var failedGVs map[schema.GroupVersion]error var err error // If the passed discovery object implements the wider AggregatedDiscoveryInterface, // then attempt to retrieve aggregated discovery with both groups and the resources. if ad, ok := d.(AggregatedDiscoveryInterface); ok { var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList sgs, resourcesByGV, failedGVs, err = ad.GroupsAndMaybeResources() for _, resourceList := range resourcesByGV { resources = append(resources, resourceList) } } else { sgs, err = d.ServerGroups() } if sgs == nil { return nil, nil, err } resultGroups := []*metav1.APIGroup{} for i := range sgs.Groups { resultGroups = append(resultGroups, &sgs.Groups[i]) } // resources is non-nil if aggregated discovery succeeded. if resources != nil { // Any stale Group/Versions returned by aggregated discovery // must be surfaced to the caller as failed Group/Versions. var ferr error if len(failedGVs) > 0 { ferr = &ErrGroupDiscoveryFailed{Groups: failedGVs} } return resultGroups, resources, ferr } groupVersionResources, failedGroups := fetchGroupVersionResources(d, sgs) // order results by group/version discovery order result := []*metav1.APIResourceList{} for _, apiGroup := range sgs.Groups { for _, version := range apiGroup.Versions { gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} if resources, ok := groupVersionResources[gv]; ok { result = append(result, resources) } } } if len(failedGroups) == 0 { return resultGroups, result, nil } return resultGroups, result, &ErrGroupDiscoveryFailed{Groups: failedGroups} } // ServerPreferredResources uses the provided discovery interface to look up preferred resources func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { var serverGroupList *metav1.APIGroupList var failedGroups map[schema.GroupVersion]error var groupVersionResources map[schema.GroupVersion]*metav1.APIResourceList var err error // If the passed discovery object implements the wider AggregatedDiscoveryInterface, // then it is attempt to retrieve both the groups and the resources. "failedGroups" // are Group/Versions returned as stale in AggregatedDiscovery format. ad, ok := d.(AggregatedDiscoveryInterface) if ok { serverGroupList, groupVersionResources, failedGroups, err = ad.GroupsAndMaybeResources() } else { serverGroupList, err = d.ServerGroups() } if err != nil { return nil, err } // Non-aggregated discovery must fetch resources from Groups. if groupVersionResources == nil { groupVersionResources, failedGroups = fetchGroupVersionResources(d, serverGroupList) } result := []*metav1.APIResourceList{} grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource grAPIResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource gvAPIResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping for _, apiGroup := range serverGroupList.Groups { for _, version := range apiGroup.Versions { groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} apiResourceList, ok := groupVersionResources[groupVersion] if !ok { continue } // create empty list which is filled later in another loop emptyAPIResourceList := metav1.APIResourceList{ GroupVersion: version.GroupVersion, } gvAPIResourceLists[groupVersion] = &emptyAPIResourceList result = append(result, &emptyAPIResourceList) for i := range apiResourceList.APIResources { apiResource := &apiResourceList.APIResources[i] if strings.Contains(apiResource.Name, "/") { continue } gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name} if _, ok := grAPIResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { // only override with preferred version continue } grVersions[gv] = version.Version grAPIResources[gv] = apiResource } } } // group selected APIResources according to GroupVersion into APIResourceLists for groupResource, apiResource := range grAPIResources { version := grVersions[groupResource] groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version} apiResourceList := gvAPIResourceLists[groupVersion] apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource) } if len(failedGroups) == 0 { return result, nil } return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} } // fetchServerResourcesForGroupVersions uses the discovery client to fetch the resources for the specified groups in parallel. func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroupList) (map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error) { groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) failedGroups := make(map[schema.GroupVersion]error) wg := &sync.WaitGroup{} resultLock := &sync.Mutex{} for _, apiGroup := range apiGroups.Groups { for _, version := range apiGroup.Versions { groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} wg.Add(1) go func() { defer wg.Done() defer utilruntime.HandleCrash() apiResourceList, err := d.ServerResourcesForGroupVersion(groupVersion.String()) // lock to record results resultLock.Lock() defer resultLock.Unlock() if err != nil { // TODO: maybe restrict this to NotFound errors failedGroups[groupVersion] = err } if apiResourceList != nil { // even in case of error, some fallback might have been returned groupVersionResources[groupVersion] = apiResourceList } }() } } wg.Wait() return groupVersionResources, failedGroups } // ServerPreferredResources returns the supported resources with the version preferred by the // server. func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { _, rs, err := withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { rs, err := ServerPreferredResources(d) return nil, rs, err }) return rs, err } // ServerPreferredNamespacedResources returns the supported namespaced resources with the // version preferred by the server. func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { return ServerPreferredNamespacedResources(d) } // ServerPreferredNamespacedResources uses the provided discovery interface to look up preferred namespaced resources func ServerPreferredNamespacedResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { all, err := ServerPreferredResources(d) return FilteredBy(ResourcePredicateFunc(func(groupVersion string, r *metav1.APIResource) bool { return r.Namespaced }), all), err } // ServerVersion retrieves and parses the server's version (git version). func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { body, err := d.restClient.Get().AbsPath("/version").Do(context.TODO()).Raw() if err != nil { return nil, err } var info version.Info err = json.Unmarshal(body, &info) if err != nil { return nil, fmt.Errorf("unable to parse the server version: %v", err) } return &info, nil } // OpenAPISchema fetches the open api v2 schema using a rest client and parses the proto. func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", openAPIV2mimePb).Do(context.TODO()).Raw() if err != nil { return nil, err } document := &openapi_v2.Document{} err = proto.Unmarshal(data, document) if err != nil { return nil, err } return document, nil } func (d *DiscoveryClient) OpenAPIV3() openapi.Client { return openapi.NewClient(d.restClient) } // WithLegacy returns copy of current discovery client that will only // receive the legacy discovery format. func (d *DiscoveryClient) WithLegacy() DiscoveryInterface { client := *d client.UseLegacyDiscovery = true return &client } // withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns. func withRetries(maxRetries int, f func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error)) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { var result []*metav1.APIResourceList var resultGroups []*metav1.APIGroup var err error for i := 0; i < maxRetries; i++ { resultGroups, result, err = f() if err == nil { return resultGroups, result, nil } if _, ok := err.(*ErrGroupDiscoveryFailed); !ok { return nil, nil, err } } return resultGroups, result, err } func setDiscoveryDefaults(config *restclient.Config) error { config.APIPath = "" config.GroupVersion = nil if config.Timeout == 0 { config.Timeout = defaultTimeout } // if a burst limit is not already configured if config.Burst == 0 { // discovery is expected to be bursty, increase the default burst // to accommodate looking up resource info for many API groups. // matches burst set by ConfigFlags#ToDiscoveryClient(). // see https://issue.k8s.io/86149 config.Burst = defaultBurst } codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()} config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) if len(config.UserAgent) == 0 { config.UserAgent = restclient.DefaultKubernetesUserAgent() } return nil } // NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client // can be used to discover supported resources in the API server. // NewDiscoveryClientForConfig is equivalent to NewDiscoveryClientForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) { config := *c if err := setDiscoveryDefaults(&config); err != nil { return nil, err } httpClient, err := restclient.HTTPClientFor(&config) if err != nil { return nil, err } return NewDiscoveryClientForConfigAndClient(&config, httpClient) } // NewDiscoveryClientForConfigAndClient creates a new DiscoveryClient for the given config. This client // can be used to discover supported resources in the API server. // Note the http client provided takes precedence over the configured transport values. func NewDiscoveryClientForConfigAndClient(c *restclient.Config, httpClient *http.Client) (*DiscoveryClient, error) { config := *c if err := setDiscoveryDefaults(&config); err != nil { return nil, err } client, err := restclient.UnversionedRESTClientForConfigAndClient(&config, httpClient) return &DiscoveryClient{restClient: client, LegacyPrefix: "/api", UseLegacyDiscovery: false}, err } // NewDiscoveryClientForConfigOrDie creates a new DiscoveryClient for the given config. If // there is an error, it panics. func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { client, err := NewDiscoveryClientForConfig(c) if err != nil { panic(err) } return client } // NewDiscoveryClient returns a new DiscoveryClient for the given RESTClient. func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { return &DiscoveryClient{restClient: c, LegacyPrefix: "/api", UseLegacyDiscovery: false} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (d *DiscoveryClient) RESTClient() restclient.Interface { if d == nil { return nil } return d.restClient }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/discovery/doc.go
vendor/k8s.io/client-go/discovery/doc.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package discovery provides ways to discover server-supported // API groups, versions and resources. package discovery // import "k8s.io/client-go/discovery"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/discovery/aggregated_discovery.go
vendor/k8s.io/client-go/discovery/aggregated_discovery.go
/* Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package discovery import ( "fmt" apidiscovery "k8s.io/api/apidiscovery/v2" apidiscoveryv2beta1 "k8s.io/api/apidiscovery/v2beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" ) // StaleGroupVersionError encasulates failed GroupVersion marked "stale" // in the returned AggregatedDiscovery format. type StaleGroupVersionError struct { gv schema.GroupVersion } func (s StaleGroupVersionError) Error() string { return fmt.Sprintf("stale GroupVersion discovery: %v", s.gv) } // SplitGroupsAndResources transforms "aggregated" discovery top-level structure into // the previous "unaggregated" discovery groups and resources. func SplitGroupsAndResources(aggregatedGroups apidiscovery.APIGroupDiscoveryList) ( *metav1.APIGroupList, map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error) { // Aggregated group list will contain the entirety of discovery, including // groups, versions, and resources. GroupVersions marked "stale" are failed. groups := []*metav1.APIGroup{} failedGVs := map[schema.GroupVersion]error{} resourcesByGV := map[schema.GroupVersion]*metav1.APIResourceList{} for _, aggGroup := range aggregatedGroups.Items { group, resources, failed := convertAPIGroup(aggGroup) groups = append(groups, group) for gv, resourceList := range resources { resourcesByGV[gv] = resourceList } for gv, err := range failed { failedGVs[gv] = err } } // Transform slice of groups to group list before returning. groupList := &metav1.APIGroupList{} groupList.Groups = make([]metav1.APIGroup, 0, len(groups)) for _, group := range groups { groupList.Groups = append(groupList.Groups, *group) } return groupList, resourcesByGV, failedGVs } // convertAPIGroup tranforms an "aggregated" APIGroupDiscovery to an "legacy" APIGroup, // also returning the map of APIResourceList for resources within GroupVersions. func convertAPIGroup(g apidiscovery.APIGroupDiscovery) ( *metav1.APIGroup, map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error) { // Iterate through versions to convert to group and resources. group := &metav1.APIGroup{} gvResources := map[schema.GroupVersion]*metav1.APIResourceList{} failedGVs := map[schema.GroupVersion]error{} group.Name = g.ObjectMeta.Name for _, v := range g.Versions { gv := schema.GroupVersion{Group: g.Name, Version: v.Version} if v.Freshness == apidiscovery.DiscoveryFreshnessStale { failedGVs[gv] = StaleGroupVersionError{gv: gv} continue } version := metav1.GroupVersionForDiscovery{} version.GroupVersion = gv.String() version.Version = v.Version group.Versions = append(group.Versions, version) // PreferredVersion is first non-stale Version if group.PreferredVersion == (metav1.GroupVersionForDiscovery{}) { group.PreferredVersion = version } resourceList := &metav1.APIResourceList{} resourceList.GroupVersion = gv.String() for _, r := range v.Resources { resource, err := convertAPIResource(r) if err == nil { resourceList.APIResources = append(resourceList.APIResources, resource) } // Subresources field in new format get transformed into full APIResources. // It is possible a partial result with an error was returned to be used // as the parent resource for the subresource. for _, subresource := range r.Subresources { sr, err := convertAPISubresource(resource, subresource) if err == nil { resourceList.APIResources = append(resourceList.APIResources, sr) } } } gvResources[gv] = resourceList } return group, gvResources, failedGVs } var emptyKind = metav1.GroupVersionKind{} // convertAPIResource tranforms a APIResourceDiscovery to an APIResource. We are // resilient to missing GVK, since this resource might be the parent resource // for a subresource. If the parent is missing a GVK, it is not returned in // discovery, and the subresource MUST have the GVK. func convertAPIResource(in apidiscovery.APIResourceDiscovery) (metav1.APIResource, error) { result := metav1.APIResource{ Name: in.Resource, SingularName: in.SingularResource, Namespaced: in.Scope == apidiscovery.ScopeNamespace, Verbs: in.Verbs, ShortNames: in.ShortNames, Categories: in.Categories, } var err error if in.ResponseKind != nil && (*in.ResponseKind) != emptyKind { result.Group = in.ResponseKind.Group result.Version = in.ResponseKind.Version result.Kind = in.ResponseKind.Kind } else { err = fmt.Errorf("discovery resource %s missing GVK", in.Resource) } // Can return partial result with error, which can be the parent for a // subresource. Do not add this result to the returned discovery resources. return result, err } // convertAPISubresource tranforms a APISubresourceDiscovery to an APIResource. func convertAPISubresource(parent metav1.APIResource, in apidiscovery.APISubresourceDiscovery) (metav1.APIResource, error) { result := metav1.APIResource{} if in.ResponseKind == nil || (*in.ResponseKind) == emptyKind { return result, fmt.Errorf("subresource %s/%s missing GVK", parent.Name, in.Subresource) } result.Name = fmt.Sprintf("%s/%s", parent.Name, in.Subresource) result.SingularName = parent.SingularName result.Namespaced = parent.Namespaced result.Group = in.ResponseKind.Group result.Version = in.ResponseKind.Version result.Kind = in.ResponseKind.Kind result.Verbs = in.Verbs return result, nil } // Please note the functions below will be removed in v1.33. They facilitate conversion // between the deprecated type apidiscoveryv2beta1.APIGroupDiscoveryList. // SplitGroupsAndResourcesV2Beta1 transforms "aggregated" discovery top-level structure into // the previous "unaggregated" discovery groups and resources. // Deprecated: Please use SplitGroupsAndResources func SplitGroupsAndResourcesV2Beta1(aggregatedGroups apidiscoveryv2beta1.APIGroupDiscoveryList) ( *metav1.APIGroupList, map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error) { // Aggregated group list will contain the entirety of discovery, including // groups, versions, and resources. GroupVersions marked "stale" are failed. groups := []*metav1.APIGroup{} failedGVs := map[schema.GroupVersion]error{} resourcesByGV := map[schema.GroupVersion]*metav1.APIResourceList{} for _, aggGroup := range aggregatedGroups.Items { group, resources, failed := convertAPIGroupv2beta1(aggGroup) groups = append(groups, group) for gv, resourceList := range resources { resourcesByGV[gv] = resourceList } for gv, err := range failed { failedGVs[gv] = err } } // Transform slice of groups to group list before returning. groupList := &metav1.APIGroupList{} groupList.Groups = make([]metav1.APIGroup, 0, len(groups)) for _, group := range groups { groupList.Groups = append(groupList.Groups, *group) } return groupList, resourcesByGV, failedGVs } // convertAPIGroupv2beta1 tranforms an "aggregated" APIGroupDiscovery to an "legacy" APIGroup, // also returning the map of APIResourceList for resources within GroupVersions. func convertAPIGroupv2beta1(g apidiscoveryv2beta1.APIGroupDiscovery) ( *metav1.APIGroup, map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error) { // Iterate through versions to convert to group and resources. group := &metav1.APIGroup{} gvResources := map[schema.GroupVersion]*metav1.APIResourceList{} failedGVs := map[schema.GroupVersion]error{} group.Name = g.ObjectMeta.Name for _, v := range g.Versions { gv := schema.GroupVersion{Group: g.Name, Version: v.Version} if v.Freshness == apidiscoveryv2beta1.DiscoveryFreshnessStale { failedGVs[gv] = StaleGroupVersionError{gv: gv} continue } version := metav1.GroupVersionForDiscovery{} version.GroupVersion = gv.String() version.Version = v.Version group.Versions = append(group.Versions, version) // PreferredVersion is first non-stale Version if group.PreferredVersion == (metav1.GroupVersionForDiscovery{}) { group.PreferredVersion = version } resourceList := &metav1.APIResourceList{} resourceList.GroupVersion = gv.String() for _, r := range v.Resources { resource, err := convertAPIResourcev2beta1(r) if err == nil { resourceList.APIResources = append(resourceList.APIResources, resource) } // Subresources field in new format get transformed into full APIResources. // It is possible a partial result with an error was returned to be used // as the parent resource for the subresource. for _, subresource := range r.Subresources { sr, err := convertAPISubresourcev2beta1(resource, subresource) if err == nil { resourceList.APIResources = append(resourceList.APIResources, sr) } } } gvResources[gv] = resourceList } return group, gvResources, failedGVs } // convertAPIResource tranforms a APIResourceDiscovery to an APIResource. We are // resilient to missing GVK, since this resource might be the parent resource // for a subresource. If the parent is missing a GVK, it is not returned in // discovery, and the subresource MUST have the GVK. func convertAPIResourcev2beta1(in apidiscoveryv2beta1.APIResourceDiscovery) (metav1.APIResource, error) { result := metav1.APIResource{ Name: in.Resource, SingularName: in.SingularResource, Namespaced: in.Scope == apidiscoveryv2beta1.ScopeNamespace, Verbs: in.Verbs, ShortNames: in.ShortNames, Categories: in.Categories, } // Can return partial result with error, which can be the parent for a // subresource. Do not add this result to the returned discovery resources. if in.ResponseKind == nil || (*in.ResponseKind) == emptyKind { return result, fmt.Errorf("discovery resource %s missing GVK", in.Resource) } result.Group = in.ResponseKind.Group result.Version = in.ResponseKind.Version result.Kind = in.ResponseKind.Kind return result, nil } // convertAPISubresource tranforms a APISubresourceDiscovery to an APIResource. func convertAPISubresourcev2beta1(parent metav1.APIResource, in apidiscoveryv2beta1.APISubresourceDiscovery) (metav1.APIResource, error) { result := metav1.APIResource{} if in.ResponseKind == nil || (*in.ResponseKind) == emptyKind { return result, fmt.Errorf("subresource %s/%s missing GVK", parent.Name, in.Subresource) } result.Name = fmt.Sprintf("%s/%s", parent.Name, in.Subresource) result.SingularName = parent.SingularName result.Namespaced = parent.Namespaced result.Group = in.ResponseKind.Group result.Version = in.ResponseKind.Version result.Kind = in.ResponseKind.Kind result.Verbs = in.Verbs return result, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/discovery/fake/discovery.go
vendor/k8s.io/client-go/discovery/fake/discovery.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fake import ( "fmt" "net/http" openapi_v2 "github.com/google/gnostic-models/openapiv2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/discovery" "k8s.io/client-go/openapi" kubeversion "k8s.io/client-go/pkg/version" restclient "k8s.io/client-go/rest" "k8s.io/client-go/testing" ) // FakeDiscovery implements discovery.DiscoveryInterface and sometimes calls testing.Fake.Invoke with an action, // but doesn't respect the return value if any. There is a way to fake static values like ServerVersion by using the Faked... fields on the struct. type FakeDiscovery struct { *testing.Fake FakedServerVersion *version.Info } // ServerResourcesForGroupVersion returns the supported resources for a group // and version. func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { action := testing.ActionImpl{ Verb: "get", Resource: schema.GroupVersionResource{Resource: "resource"}, } if _, err := c.Invokes(action, nil); err != nil { return nil, err } for _, resourceList := range c.Resources { if resourceList.GroupVersion == groupVersion { return resourceList, nil } } return nil, &errors.StatusError{ ErrStatus: metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusNotFound, Reason: metav1.StatusReasonNotFound, Message: fmt.Sprintf("the server could not find the requested resource, GroupVersion %q not found", groupVersion), }} } // ServerGroupsAndResources returns the supported groups and resources for all groups and versions. func (c *FakeDiscovery) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { sgs, err := c.ServerGroups() if err != nil { return nil, nil, err } resultGroups := []*metav1.APIGroup{} for i := range sgs.Groups { resultGroups = append(resultGroups, &sgs.Groups[i]) } action := testing.ActionImpl{ Verb: "get", Resource: schema.GroupVersionResource{Resource: "resource"}, } if _, err = c.Invokes(action, nil); err != nil { return resultGroups, c.Resources, err } return resultGroups, c.Resources, nil } // ServerPreferredResources returns the supported resources with the version // preferred by the server. func (c *FakeDiscovery) ServerPreferredResources() ([]*metav1.APIResourceList, error) { return nil, nil } // ServerPreferredNamespacedResources returns the supported namespaced resources // with the version preferred by the server. func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { return nil, nil } // ServerGroups returns the supported groups, with information like supported // versions and the preferred version. func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { action := testing.ActionImpl{ Verb: "get", Resource: schema.GroupVersionResource{Resource: "group"}, } if _, err := c.Invokes(action, nil); err != nil { return nil, err } groups := map[string]*metav1.APIGroup{} for _, res := range c.Resources { gv, err := schema.ParseGroupVersion(res.GroupVersion) if err != nil { return nil, err } group := groups[gv.Group] if group == nil { group = &metav1.APIGroup{ Name: gv.Group, PreferredVersion: metav1.GroupVersionForDiscovery{ GroupVersion: res.GroupVersion, Version: gv.Version, }, } groups[gv.Group] = group } group.Versions = append(group.Versions, metav1.GroupVersionForDiscovery{ GroupVersion: res.GroupVersion, Version: gv.Version, }) } list := &metav1.APIGroupList{} for _, apiGroup := range groups { list.Groups = append(list.Groups, *apiGroup) } return list, nil } // ServerVersion retrieves and parses the server's version. func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { action := testing.ActionImpl{} action.Verb = "get" action.Resource = schema.GroupVersionResource{Resource: "version"} _, err := c.Invokes(action, nil) if err != nil { return nil, err } if c.FakedServerVersion != nil { return c.FakedServerVersion, nil } versionInfo := kubeversion.Get() return &versionInfo, nil } // OpenAPISchema retrieves and parses the swagger API schema the server supports. func (c *FakeDiscovery) OpenAPISchema() (*openapi_v2.Document, error) { return &openapi_v2.Document{}, nil } func (c *FakeDiscovery) OpenAPIV3() openapi.Client { panic("unimplemented") } // RESTClient returns a RESTClient that is used to communicate with API server // by this client implementation. func (c *FakeDiscovery) RESTClient() restclient.Interface { return nil } func (c *FakeDiscovery) WithLegacy() discovery.DiscoveryInterface { panic("unimplemented") }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/version/base.go
vendor/k8s.io/client-go/pkg/version/base.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package version // Base version information. // // This is the fallback data used when version information from git is not // provided via go ldflags. It provides an approximation of the Kubernetes // version for ad-hoc builds (e.g. `go build`) that cannot get the version // information from git. // // If you are looking at these fields in the git tree, they look // strange. They are modified on the fly by the build process. The // in-tree values are dummy values used for "git archive", which also // works for GitHub tar downloads. // // When releasing a new Kubernetes version, this file is updated by // build/mark_new_version.sh to reflect the new version, and then a // git annotated tag (using format vX.Y where X == Major version and Y // == Minor version) is created to point to the commit that updates // pkg/version/base.go var ( // TODO: Deprecate gitMajor and gitMinor, use only gitVersion // instead. First step in deprecation, keep the fields but make // them irrelevant. (Next we'll take it out, which may muck with // scripts consuming the kubectl version output - but most of // these should be looking at gitVersion already anyways.) gitMajor string = "" // major version, always numeric gitMinor string = "" // minor version, numeric possibly followed by "+" // semantic version, derived by build scripts (see // https://github.com/kubernetes/sig-release/blob/master/release-engineering/versioning.md#kubernetes-release-versioning // https://kubernetes.io/releases/version-skew-policy/ // for a detailed discussion of this field) // // TODO: This field is still called "gitVersion" for legacy // reasons. For prerelease versions, the build metadata on the // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. // NOTE: The $Format strings are replaced during 'git archive' thanks to the // companion .gitattributes file containing 'export-subst' in this same // directory. See also https://git-scm.com/docs/gitattributes gitVersion string = "v0.0.0-master+$Format:%H$" gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) gitTreeState string = "" // state of git tree, either "clean" or "dirty" buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/version/version.go
vendor/k8s.io/client-go/pkg/version/version.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package version import ( "fmt" "runtime" apimachineryversion "k8s.io/apimachinery/pkg/version" ) // Get returns the overall codebase version. It's for detecting // what code a binary was built from. func Get() apimachineryversion.Info { // These variables typically come from -ldflags settings and in // their absence fallback to the settings in pkg/version/base.go return apimachineryversion.Info{ Major: gitMajor, Minor: gitMinor, GitVersion: gitVersion, GitCommit: gitCommit, GitTreeState: gitTreeState, BuildDate: buildDate, GoVersion: runtime.Version(), Compiler: runtime.Compiler, Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/version/doc.go
vendor/k8s.io/client-go/pkg/version/doc.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // +k8s:openapi-gen=true // Package version supplies version information collected at build time to // kubernetes components. package version // import "k8s.io/client-go/pkg/version"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by deepcopy-gen. DO NOT EDIT. package clientauthentication import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Cluster) DeepCopyInto(out *Cluster) { *out = *in if in.CertificateAuthorityData != nil { in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData *out = make([]byte, len(*in)) copy(*out, *in) } if in.Config != nil { out.Config = in.Config.DeepCopyObject() } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. func (in *Cluster) DeepCopy() *Cluster { if in == nil { return nil } out := new(Cluster) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { *out = *in out.TypeMeta = in.TypeMeta in.Spec.DeepCopyInto(&out.Spec) if in.Status != nil { in, out := &in.Status, &out.Status *out = new(ExecCredentialStatus) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential. func (in *ExecCredential) DeepCopy() *ExecCredential { if in == nil { return nil } out := new(ExecCredential) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ExecCredential) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { *out = *in if in.Cluster != nil { in, out := &in.Cluster, &out.Cluster *out = new(Cluster) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec. func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec { if in == nil { return nil } out := new(ExecCredentialSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { *out = *in if in.ExpirationTimestamp != nil { in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp *out = (*in).DeepCopy() } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus. func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus { if in == nil { return nil } out := new(ExecCredentialStatus) in.DeepCopyInto(out) return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package clientauthentication import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ExecCredential is used by exec-based plugins to communicate credentials to // HTTP transports. type ExecCredential struct { metav1.TypeMeta // Spec holds information passed to the plugin by the transport. This contains // request and runtime specific information, such as if the session is interactive. Spec ExecCredentialSpec // Status is filled in by the plugin and holds the credentials that the transport // should use to contact the API. // +optional Status *ExecCredentialStatus } // ExecCredentialSpec holds request and runtime specific information provided by // the transport. type ExecCredentialSpec struct { // Interactive is true when the transport detects the command is being called from an // interactive prompt, i.e., when stdin has been passed to this exec plugin. // +optional Interactive bool // Cluster contains information to allow an exec plugin to communicate with the // kubernetes cluster being authenticated to. Note that Cluster is non-nil only // when provideClusterInfo is set to true in the exec provider config (i.e., // ExecConfig.ProvideClusterInfo). // +optional Cluster *Cluster } // ExecCredentialStatus holds credentials for the transport to use. type ExecCredentialStatus struct { // ExpirationTimestamp indicates a time when the provided credentials expire. // +optional ExpirationTimestamp *metav1.Time // Token is a bearer token used by the client for request authentication. // +optional Token string `datapolicy:"token"` // PEM-encoded client TLS certificate. // +optional ClientCertificateData string // PEM-encoded client TLS private key. // +optional ClientKeyData string `datapolicy:"secret-key"` } // Cluster contains information to allow an exec plugin to communicate // with the kubernetes cluster being authenticated to. // // To ensure that this struct contains everything someone would need to communicate // with a kubernetes cluster (just like they would via a kubeconfig), the fields // should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception // of CertificateAuthority, since CA data will always be passed to the plugin as bytes. type Cluster struct { // Server is the address of the kubernetes cluster (https://hostname:port). Server string // TLSServerName is passed to the server for SNI and is used in the client to // check server certificates against. If ServerName is empty, the hostname // used to contact the server is used. // +optional TLSServerName string // InsecureSkipTLSVerify skips the validity check for the server's certificate. // This will make your HTTPS connections insecure. // +optional InsecureSkipTLSVerify bool // CAData contains PEM-encoded certificate authority certificates. // If empty, system roots should be used. // +listType=atomic // +optional CertificateAuthorityData []byte // ProxyURL is the URL to the proxy to be used for all requests to this // cluster. // +optional ProxyURL string // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. // +optional DisableCompression bool // Config holds additional config data that is specific to the exec // plugin with regards to the cluster being authenticated to. // // This data is sourced from the clientcmd Cluster object's // extensions[client.authentication.k8s.io/exec] field: // // clusters: // - name: my-cluster // cluster: // ... // extensions: // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config // extension: // audience: 06e3fbd18de8 # arbitrary config // // In some environments, the user config may be exactly the same across many clusters // (i.e. call this exec plugin) minus some details that are specific to each cluster // such as the audience. This field allows the per cluster config to be directly // specified with the cluster info. Using this field to store secret data is not // recommended as one of the prime benefits of exec plugins is that no secrets need // to be stored directly in the kubeconfig. // +optional Config runtime.Object }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package clientauthentication import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "client.authentication.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} // Kind takes an unqualified kind and returns a Group qualified GroupKind func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) AddToScheme = SchemeBuilder.AddToScheme ) func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &ExecCredential{}, ) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // +k8s:deepcopy-gen=package // +groupName=client.authentication.k8s.io package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by deepcopy-gen. DO NOT EDIT. package v1 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Cluster) DeepCopyInto(out *Cluster) { *out = *in if in.CertificateAuthorityData != nil { in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData *out = make([]byte, len(*in)) copy(*out, *in) } in.Config.DeepCopyInto(&out.Config) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. func (in *Cluster) DeepCopy() *Cluster { if in == nil { return nil } out := new(Cluster) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { *out = *in out.TypeMeta = in.TypeMeta in.Spec.DeepCopyInto(&out.Spec) if in.Status != nil { in, out := &in.Status, &out.Status *out = new(ExecCredentialStatus) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential. func (in *ExecCredential) DeepCopy() *ExecCredential { if in == nil { return nil } out := new(ExecCredential) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ExecCredential) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { *out = *in if in.Cluster != nil { in, out := &in.Cluster, &out.Cluster *out = new(Cluster) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec. func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec { if in == nil { return nil } out := new(ExecCredentialSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { *out = *in if in.ExpirationTimestamp != nil { in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp *out = (*in).DeepCopy() } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus. func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus { if in == nil { return nil } out := new(ExecCredentialStatus) in.DeepCopyInto(out) return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ExecCredential is used by exec-based plugins to communicate credentials to // HTTP transports. type ExecCredential struct { metav1.TypeMeta `json:",inline"` // Spec holds information passed to the plugin by the transport. Spec ExecCredentialSpec `json:"spec,omitempty"` // Status is filled in by the plugin and holds the credentials that the transport // should use to contact the API. // +optional Status *ExecCredentialStatus `json:"status,omitempty"` } // ExecCredentialSpec holds request and runtime specific information provided by // the transport. type ExecCredentialSpec struct { // Cluster contains information to allow an exec plugin to communicate with the // kubernetes cluster being authenticated to. Note that Cluster is non-nil only // when provideClusterInfo is set to true in the exec provider config (i.e., // ExecConfig.ProvideClusterInfo). // +optional Cluster *Cluster `json:"cluster,omitempty"` // Interactive declares whether stdin has been passed to this exec plugin. Interactive bool `json:"interactive"` } // ExecCredentialStatus holds credentials for the transport to use. // // Token and ClientKeyData are sensitive fields. This data should only be // transmitted in-memory between client and exec plugin process. Exec plugin // itself should at least be protected via file permissions. type ExecCredentialStatus struct { // ExpirationTimestamp indicates a time when the provided credentials expire. // +optional ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` // Token is a bearer token used by the client for request authentication. Token string `json:"token,omitempty" datapolicy:"token"` // PEM-encoded client TLS certificates (including intermediates, if any). ClientCertificateData string `json:"clientCertificateData,omitempty"` // PEM-encoded private key for the above certificate. ClientKeyData string `json:"clientKeyData,omitempty" datapolicy:"security-key"` } // Cluster contains information to allow an exec plugin to communicate // with the kubernetes cluster being authenticated to. // // To ensure that this struct contains everything someone would need to communicate // with a kubernetes cluster (just like they would via a kubeconfig), the fields // should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception // of CertificateAuthority, since CA data will always be passed to the plugin as bytes. type Cluster struct { // Server is the address of the kubernetes cluster (https://hostname:port). Server string `json:"server"` // TLSServerName is passed to the server for SNI and is used in the client to // check server certificates against. If ServerName is empty, the hostname // used to contact the server is used. // +optional TLSServerName string `json:"tls-server-name,omitempty"` // InsecureSkipTLSVerify skips the validity check for the server's certificate. // This will make your HTTPS connections insecure. // +optional InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` // CAData contains PEM-encoded certificate authority certificates. // If empty, system roots should be used. // +listType=atomic // +optional CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` // ProxyURL is the URL to the proxy to be used for all requests to this // cluster. // +optional ProxyURL string `json:"proxy-url,omitempty"` // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. // +optional DisableCompression bool `json:"disable-compression,omitempty"` // Config holds additional config data that is specific to the exec // plugin with regards to the cluster being authenticated to. // // This data is sourced from the clientcmd Cluster object's // extensions[client.authentication.k8s.io/exec] field: // // clusters: // - name: my-cluster // cluster: // ... // extensions: // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config // extension: // audience: 06e3fbd18de8 # arbitrary config // // In some environments, the user config may be exactly the same across many clusters // (i.e. call this exec plugin) minus some details that are specific to each cluster // such as the audience. This field allows the per cluster config to be directly // specified with the cluster info. Using this field to store secret data is not // recommended as one of the prime benefits of exec plugins is that no secrets need // to be stored directly in the kubeconfig. // +optional Config runtime.RawExtension `json:"config,omitempty"` }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by conversion-gen. DO NOT EDIT. package v1 import ( unsafe "unsafe" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" ) func init() { localSchemeBuilder.Register(RegisterConversions) } // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { if err := s.AddGeneratedConversionFunc((*Cluster)(nil), (*clientauthentication.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_Cluster_To_clientauthentication_Cluster(a.(*Cluster), b.(*clientauthentication.Cluster), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*clientauthentication.Cluster)(nil), (*Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_clientauthentication_Cluster_To_v1_Cluster(a.(*clientauthentication.Cluster), b.(*Cluster), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_clientauthentication_ExecCredential_To_v1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope) }); err != nil { return err } return nil } func autoConvert_v1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error { out.Server = in.Server out.TLSServerName = in.TLSServerName out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL out.DisableCompression = in.DisableCompression if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Config, &out.Config, s); err != nil { return err } return nil } // Convert_v1_Cluster_To_clientauthentication_Cluster is an autogenerated conversion function. func Convert_v1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error { return autoConvert_v1_Cluster_To_clientauthentication_Cluster(in, out, s) } func autoConvert_clientauthentication_Cluster_To_v1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error { out.Server = in.Server out.TLSServerName = in.TLSServerName out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL out.DisableCompression = in.DisableCompression if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Config, &out.Config, s); err != nil { return err } return nil } // Convert_clientauthentication_Cluster_To_v1_Cluster is an autogenerated conversion function. func Convert_clientauthentication_Cluster_To_v1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error { return autoConvert_clientauthentication_Cluster_To_v1_Cluster(in, out, s) } func autoConvert_v1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { if err := Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { return err } out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status)) return nil } // Convert_v1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function. func Convert_v1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { return autoConvert_v1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s) } func autoConvert_clientauthentication_ExecCredential_To_v1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { if err := Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { return err } out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status)) return nil } // Convert_clientauthentication_ExecCredential_To_v1_ExecCredential is an autogenerated conversion function. func Convert_clientauthentication_ExecCredential_To_v1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { return autoConvert_clientauthentication_ExecCredential_To_v1_ExecCredential(in, out, s) } func autoConvert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { if in.Cluster != nil { in, out := &in.Cluster, &out.Cluster *out = new(clientauthentication.Cluster) if err := Convert_v1_Cluster_To_clientauthentication_Cluster(*in, *out, s); err != nil { return err } } else { out.Cluster = nil } out.Interactive = in.Interactive return nil } // Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function. func Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { return autoConvert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s) } func autoConvert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { out.Interactive = in.Interactive if in.Cluster != nil { in, out := &in.Cluster, &out.Cluster *out = new(Cluster) if err := Convert_clientauthentication_Cluster_To_v1_Cluster(*in, *out, s); err != nil { return err } } else { out.Cluster = nil } return nil } // Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec is an autogenerated conversion function. func Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { return autoConvert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in, out, s) } func autoConvert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { out.ExpirationTimestamp = (*metav1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) out.Token = in.Token out.ClientCertificateData = in.ClientCertificateData out.ClientKeyData = in.ClientKeyData return nil } // Convert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function. func Convert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { return autoConvert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s) } func autoConvert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { out.ExpirationTimestamp = (*metav1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) out.Token = in.Token out.ClientCertificateData = in.ClientCertificateData out.ClientKeyData = in.ClientKeyData return nil } // Convert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus is an autogenerated conversion function. func Convert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { return autoConvert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(in, out, s) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/register.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/register.go
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "client.authentication.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } var ( SchemeBuilder runtime.SchemeBuilder localSchemeBuilder = &SchemeBuilder AddToScheme = localSchemeBuilder.AddToScheme ) func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. localSchemeBuilder.Register(addKnownTypes) } func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &ExecCredential{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by defaulter-gen. DO NOT EDIT. package v1 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // RegisterDefaults adds defaulters functions to the given scheme. // Public to allow building arbitrary schemes. // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta // +groupName=client.authentication.k8s.io package v1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by deepcopy-gen. DO NOT EDIT. package v1beta1 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Cluster) DeepCopyInto(out *Cluster) { *out = *in if in.CertificateAuthorityData != nil { in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData *out = make([]byte, len(*in)) copy(*out, *in) } in.Config.DeepCopyInto(&out.Config) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. func (in *Cluster) DeepCopy() *Cluster { if in == nil { return nil } out := new(Cluster) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { *out = *in out.TypeMeta = in.TypeMeta in.Spec.DeepCopyInto(&out.Spec) if in.Status != nil { in, out := &in.Status, &out.Status *out = new(ExecCredentialStatus) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential. func (in *ExecCredential) DeepCopy() *ExecCredential { if in == nil { return nil } out := new(ExecCredential) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ExecCredential) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { *out = *in if in.Cluster != nil { in, out := &in.Cluster, &out.Cluster *out = new(Cluster) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec. func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec { if in == nil { return nil } out := new(ExecCredentialSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { *out = *in if in.ExpirationTimestamp != nil { in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp *out = (*in).DeepCopy() } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus. func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus { if in == nil { return nil } out := new(ExecCredentialStatus) in.DeepCopyInto(out) return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ExecCredential is used by exec-based plugins to communicate credentials to // HTTP transports. type ExecCredential struct { metav1.TypeMeta `json:",inline"` // Spec holds information passed to the plugin by the transport. Spec ExecCredentialSpec `json:"spec,omitempty"` // Status is filled in by the plugin and holds the credentials that the transport // should use to contact the API. // +optional Status *ExecCredentialStatus `json:"status,omitempty"` } // ExecCredentialSpec holds request and runtime specific information provided by // the transport. type ExecCredentialSpec struct { // Cluster contains information to allow an exec plugin to communicate with the // kubernetes cluster being authenticated to. Note that Cluster is non-nil only // when provideClusterInfo is set to true in the exec provider config (i.e., // ExecConfig.ProvideClusterInfo). // +optional Cluster *Cluster `json:"cluster,omitempty"` // Interactive declares whether stdin has been passed to this exec plugin. Interactive bool `json:"interactive"` } // ExecCredentialStatus holds credentials for the transport to use. // // Token and ClientKeyData are sensitive fields. This data should only be // transmitted in-memory between client and exec plugin process. Exec plugin // itself should at least be protected via file permissions. type ExecCredentialStatus struct { // ExpirationTimestamp indicates a time when the provided credentials expire. // +optional ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` // Token is a bearer token used by the client for request authentication. Token string `json:"token,omitempty" datapolicy:"token"` // PEM-encoded client TLS certificates (including intermediates, if any). ClientCertificateData string `json:"clientCertificateData,omitempty"` // PEM-encoded private key for the above certificate. ClientKeyData string `json:"clientKeyData,omitempty" datapolicy:"security-key"` } // Cluster contains information to allow an exec plugin to communicate // with the kubernetes cluster being authenticated to. // // To ensure that this struct contains everything someone would need to communicate // with a kubernetes cluster (just like they would via a kubeconfig), the fields // should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception // of CertificateAuthority, since CA data will always be passed to the plugin as bytes. type Cluster struct { // Server is the address of the kubernetes cluster (https://hostname:port). Server string `json:"server"` // TLSServerName is passed to the server for SNI and is used in the client to // check server certificates against. If ServerName is empty, the hostname // used to contact the server is used. // +optional TLSServerName string `json:"tls-server-name,omitempty"` // InsecureSkipTLSVerify skips the validity check for the server's certificate. // This will make your HTTPS connections insecure. // +optional InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` // CAData contains PEM-encoded certificate authority certificates. // If empty, system roots should be used. // +listType=atomic // +optional CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` // ProxyURL is the URL to the proxy to be used for all requests to this // cluster. // +optional ProxyURL string `json:"proxy-url,omitempty"` // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. // +optional DisableCompression bool `json:"disable-compression,omitempty"` // Config holds additional config data that is specific to the exec // plugin with regards to the cluster being authenticated to. // // This data is sourced from the clientcmd Cluster object's // extensions[client.authentication.k8s.io/exec] field: // // clusters: // - name: my-cluster // cluster: // ... // extensions: // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config // extension: // audience: 06e3fbd18de8 # arbitrary config // // In some environments, the user config may be exactly the same across many clusters // (i.e. call this exec plugin) minus some details that are specific to each cluster // such as the audience. This field allows the per cluster config to be directly // specified with the cluster info. Using this field to store secret data is not // recommended as one of the prime benefits of exec plugins is that no secrets need // to be stored directly in the kubeconfig. // +optional Config runtime.RawExtension `json:"config,omitempty"` }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by conversion-gen. DO NOT EDIT. package v1beta1 import ( unsafe "unsafe" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" ) func init() { localSchemeBuilder.Register(RegisterConversions) } // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { if err := s.AddGeneratedConversionFunc((*Cluster)(nil), (*clientauthentication.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_Cluster_To_clientauthentication_Cluster(a.(*Cluster), b.(*clientauthentication.Cluster), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*clientauthentication.Cluster)(nil), (*Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_clientauthentication_Cluster_To_v1beta1_Cluster(a.(*clientauthentication.Cluster), b.(*Cluster), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope) }); err != nil { return err } return nil } func autoConvert_v1beta1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error { out.Server = in.Server out.TLSServerName = in.TLSServerName out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL out.DisableCompression = in.DisableCompression if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Config, &out.Config, s); err != nil { return err } return nil } // Convert_v1beta1_Cluster_To_clientauthentication_Cluster is an autogenerated conversion function. func Convert_v1beta1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error { return autoConvert_v1beta1_Cluster_To_clientauthentication_Cluster(in, out, s) } func autoConvert_clientauthentication_Cluster_To_v1beta1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error { out.Server = in.Server out.TLSServerName = in.TLSServerName out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL out.DisableCompression = in.DisableCompression if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Config, &out.Config, s); err != nil { return err } return nil } // Convert_clientauthentication_Cluster_To_v1beta1_Cluster is an autogenerated conversion function. func Convert_clientauthentication_Cluster_To_v1beta1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error { return autoConvert_clientauthentication_Cluster_To_v1beta1_Cluster(in, out, s) } func autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { if err := Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { return err } out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status)) return nil } // Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function. func Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { return autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s) } func autoConvert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { if err := Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { return err } out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status)) return nil } // Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential is an autogenerated conversion function. func Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { return autoConvert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in, out, s) } func autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { if in.Cluster != nil { in, out := &in.Cluster, &out.Cluster *out = new(clientauthentication.Cluster) if err := Convert_v1beta1_Cluster_To_clientauthentication_Cluster(*in, *out, s); err != nil { return err } } else { out.Cluster = nil } out.Interactive = in.Interactive return nil } // Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function. func Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { return autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s) } func autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { out.Interactive = in.Interactive if in.Cluster != nil { in, out := &in.Cluster, &out.Cluster *out = new(Cluster) if err := Convert_clientauthentication_Cluster_To_v1beta1_Cluster(*in, *out, s); err != nil { return err } } else { out.Cluster = nil } return nil } // Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec is an autogenerated conversion function. func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { return autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in, out, s) } func autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) out.Token = in.Token out.ClientCertificateData = in.ClientCertificateData out.ClientKeyData = in.ClientKeyData return nil } // Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function. func Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { return autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s) } func autoConvert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) out.Token = in.Token out.ClientCertificateData = in.ClientCertificateData out.ClientKeyData = in.ClientKeyData return nil } // Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus is an autogenerated conversion function. func Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { return autoConvert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in, out, s) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "client.authentication.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } var ( SchemeBuilder runtime.SchemeBuilder localSchemeBuilder = &SchemeBuilder AddToScheme = localSchemeBuilder.AddToScheme ) func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. localSchemeBuilder.Register(addKnownTypes) } func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &ExecCredential{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by defaulter-gen. DO NOT EDIT. package v1beta1 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // RegisterDefaults adds defaulters functions to the given scheme. // Public to allow building arbitrary schemes. // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta // +groupName=client.authentication.k8s.io package v1beta1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go
vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package install installs the experimental API group, making it available as // an option to all of the API encoding/decoding machinery. package install import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/pkg/apis/clientauthentication" "k8s.io/client-go/pkg/apis/clientauthentication/v1" "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" ) // Install registers the API group and adds types to a scheme func Install(scheme *runtime.Scheme) { utilruntime.Must(clientauthentication.AddToScheme(scheme)) utilruntime.Must(v1.AddToScheme(scheme)) utilruntime.Must(v1beta1.AddToScheme(scheme)) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package workqueue import ( "math" "sync" "time" "golang.org/x/time/rate" ) // Deprecated: RateLimiter is deprecated, use TypedRateLimiter instead. type RateLimiter TypedRateLimiter[any] type TypedRateLimiter[T comparable] interface { // When gets an item and gets to decide how long that item should wait When(item T) time.Duration // Forget indicates that an item is finished being retried. Doesn't matter whether it's for failing // or for success, we'll stop tracking it Forget(item T) // NumRequeues returns back how many failures the item has had NumRequeues(item T) int } // DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has // both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential // // Deprecated: Use DefaultTypedControllerRateLimiter instead. func DefaultControllerRateLimiter() RateLimiter { return DefaultTypedControllerRateLimiter[any]() } // DefaultTypedControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has // both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential func DefaultTypedControllerRateLimiter[T comparable]() TypedRateLimiter[T] { return NewTypedMaxOfRateLimiter( NewTypedItemExponentialFailureRateLimiter[T](5*time.Millisecond, 1000*time.Second), // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) &TypedBucketRateLimiter[T]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, ) } // Deprecated: BucketRateLimiter is deprecated, use TypedBucketRateLimiter instead. type BucketRateLimiter = TypedBucketRateLimiter[any] // TypedBucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API type TypedBucketRateLimiter[T comparable] struct { *rate.Limiter } var _ RateLimiter = &BucketRateLimiter{} func (r *TypedBucketRateLimiter[T]) When(item T) time.Duration { return r.Limiter.Reserve().Delay() } func (r *TypedBucketRateLimiter[T]) NumRequeues(item T) int { return 0 } func (r *TypedBucketRateLimiter[T]) Forget(item T) { } // Deprecated: ItemExponentialFailureRateLimiter is deprecated, use TypedItemExponentialFailureRateLimiter instead. type ItemExponentialFailureRateLimiter = TypedItemExponentialFailureRateLimiter[any] // TypedItemExponentialFailureRateLimiter does a simple baseDelay*2^<num-failures> limit // dealing with max failures and expiration are up to the caller type TypedItemExponentialFailureRateLimiter[T comparable] struct { failuresLock sync.Mutex failures map[T]int baseDelay time.Duration maxDelay time.Duration } var _ RateLimiter = &ItemExponentialFailureRateLimiter{} // Deprecated: NewItemExponentialFailureRateLimiter is deprecated, use NewTypedItemExponentialFailureRateLimiter instead. func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter { return NewTypedItemExponentialFailureRateLimiter[any](baseDelay, maxDelay) } func NewTypedItemExponentialFailureRateLimiter[T comparable](baseDelay time.Duration, maxDelay time.Duration) TypedRateLimiter[T] { return &TypedItemExponentialFailureRateLimiter[T]{ failures: map[T]int{}, baseDelay: baseDelay, maxDelay: maxDelay, } } // Deprecated: DefaultItemBasedRateLimiter is deprecated, use DefaultTypedItemBasedRateLimiter instead. func DefaultItemBasedRateLimiter() RateLimiter { return DefaultTypedItemBasedRateLimiter[any]() } func DefaultTypedItemBasedRateLimiter[T comparable]() TypedRateLimiter[T] { return NewTypedItemExponentialFailureRateLimiter[T](time.Millisecond, 1000*time.Second) } func (r *TypedItemExponentialFailureRateLimiter[T]) When(item T) time.Duration { r.failuresLock.Lock() defer r.failuresLock.Unlock() exp := r.failures[item] r.failures[item] = r.failures[item] + 1 // The backoff is capped such that 'calculated' value never overflows. backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp)) if backoff > math.MaxInt64 { return r.maxDelay } calculated := time.Duration(backoff) if calculated > r.maxDelay { return r.maxDelay } return calculated } func (r *TypedItemExponentialFailureRateLimiter[T]) NumRequeues(item T) int { r.failuresLock.Lock() defer r.failuresLock.Unlock() return r.failures[item] } func (r *TypedItemExponentialFailureRateLimiter[T]) Forget(item T) { r.failuresLock.Lock() defer r.failuresLock.Unlock() delete(r.failures, item) } // ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that // Deprecated: Use TypedItemFastSlowRateLimiter instead. type ItemFastSlowRateLimiter = TypedItemFastSlowRateLimiter[any] // TypedItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that type TypedItemFastSlowRateLimiter[T comparable] struct { failuresLock sync.Mutex failures map[T]int maxFastAttempts int fastDelay time.Duration slowDelay time.Duration } var _ RateLimiter = &ItemFastSlowRateLimiter{} // Deprecated: NewItemFastSlowRateLimiter is deprecated, use NewTypedItemFastSlowRateLimiter instead. func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter { return NewTypedItemFastSlowRateLimiter[any](fastDelay, slowDelay, maxFastAttempts) } func NewTypedItemFastSlowRateLimiter[T comparable](fastDelay, slowDelay time.Duration, maxFastAttempts int) TypedRateLimiter[T] { return &TypedItemFastSlowRateLimiter[T]{ failures: map[T]int{}, fastDelay: fastDelay, slowDelay: slowDelay, maxFastAttempts: maxFastAttempts, } } func (r *TypedItemFastSlowRateLimiter[T]) When(item T) time.Duration { r.failuresLock.Lock() defer r.failuresLock.Unlock() r.failures[item] = r.failures[item] + 1 if r.failures[item] <= r.maxFastAttempts { return r.fastDelay } return r.slowDelay } func (r *TypedItemFastSlowRateLimiter[T]) NumRequeues(item T) int { r.failuresLock.Lock() defer r.failuresLock.Unlock() return r.failures[item] } func (r *TypedItemFastSlowRateLimiter[T]) Forget(item T) { r.failuresLock.Lock() defer r.failuresLock.Unlock() delete(r.failures, item) } // MaxOfRateLimiter calls every RateLimiter and returns the worst case response // When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items // were separately delayed a longer time. // // Deprecated: Use TypedMaxOfRateLimiter instead. type MaxOfRateLimiter = TypedMaxOfRateLimiter[any] // TypedMaxOfRateLimiter calls every RateLimiter and returns the worst case response // When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items // were separately delayed a longer time. type TypedMaxOfRateLimiter[T comparable] struct { limiters []TypedRateLimiter[T] } func (r *TypedMaxOfRateLimiter[T]) When(item T) time.Duration { ret := time.Duration(0) for _, limiter := range r.limiters { curr := limiter.When(item) if curr > ret { ret = curr } } return ret } // Deprecated: NewMaxOfRateLimiter is deprecated, use NewTypedMaxOfRateLimiter instead. func NewMaxOfRateLimiter(limiters ...TypedRateLimiter[any]) RateLimiter { return NewTypedMaxOfRateLimiter(limiters...) } func NewTypedMaxOfRateLimiter[T comparable](limiters ...TypedRateLimiter[T]) TypedRateLimiter[T] { return &TypedMaxOfRateLimiter[T]{limiters: limiters} } func (r *TypedMaxOfRateLimiter[T]) NumRequeues(item T) int { ret := 0 for _, limiter := range r.limiters { curr := limiter.NumRequeues(item) if curr > ret { ret = curr } } return ret } func (r *TypedMaxOfRateLimiter[T]) Forget(item T) { for _, limiter := range r.limiters { limiter.Forget(item) } } // WithMaxWaitRateLimiter have maxDelay which avoids waiting too long // Deprecated: Use TypedWithMaxWaitRateLimiter instead. type WithMaxWaitRateLimiter = TypedWithMaxWaitRateLimiter[any] // TypedWithMaxWaitRateLimiter have maxDelay which avoids waiting too long type TypedWithMaxWaitRateLimiter[T comparable] struct { limiter TypedRateLimiter[T] maxDelay time.Duration } // Deprecated: NewWithMaxWaitRateLimiter is deprecated, use NewTypedWithMaxWaitRateLimiter instead. func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter { return NewTypedWithMaxWaitRateLimiter[any](limiter, maxDelay) } func NewTypedWithMaxWaitRateLimiter[T comparable](limiter TypedRateLimiter[T], maxDelay time.Duration) TypedRateLimiter[T] { return &TypedWithMaxWaitRateLimiter[T]{limiter: limiter, maxDelay: maxDelay} } func (w TypedWithMaxWaitRateLimiter[T]) When(item T) time.Duration { delay := w.limiter.When(item) if delay > w.maxDelay { return w.maxDelay } return delay } func (w TypedWithMaxWaitRateLimiter[T]) Forget(item T) { w.limiter.Forget(item) } func (w TypedWithMaxWaitRateLimiter[T]) NumRequeues(item T) int { return w.limiter.NumRequeues(item) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/workqueue/metrics.go
vendor/k8s.io/client-go/util/workqueue/metrics.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package workqueue import ( "sync" "time" "k8s.io/utils/clock" ) // This file provides abstractions for setting the provider (e.g., prometheus) // of metrics. type queueMetrics[T comparable] interface { add(item T) get(item T) done(item T) updateUnfinishedWork() } // GaugeMetric represents a single numerical value that can arbitrarily go up // and down. type GaugeMetric interface { Inc() Dec() } // SettableGaugeMetric represents a single numerical value that can arbitrarily go up // and down. (Separate from GaugeMetric to preserve backwards compatibility.) type SettableGaugeMetric interface { Set(float64) } // CounterMetric represents a single numerical value that only ever // goes up. type CounterMetric interface { Inc() } // SummaryMetric captures individual observations. type SummaryMetric interface { Observe(float64) } // HistogramMetric counts individual observations. type HistogramMetric interface { Observe(float64) } type noopMetric struct{} func (noopMetric) Inc() {} func (noopMetric) Dec() {} func (noopMetric) Set(float64) {} func (noopMetric) Observe(float64) {} // defaultQueueMetrics expects the caller to lock before setting any metrics. type defaultQueueMetrics[T comparable] struct { clock clock.Clock // current depth of a workqueue depth GaugeMetric // total number of adds handled by a workqueue adds CounterMetric // how long an item stays in a workqueue latency HistogramMetric // how long processing an item from a workqueue takes workDuration HistogramMetric addTimes map[T]time.Time processingStartTimes map[T]time.Time // how long have current threads been working? unfinishedWorkSeconds SettableGaugeMetric longestRunningProcessor SettableGaugeMetric } func (m *defaultQueueMetrics[T]) add(item T) { if m == nil { return } m.adds.Inc() m.depth.Inc() if _, exists := m.addTimes[item]; !exists { m.addTimes[item] = m.clock.Now() } } func (m *defaultQueueMetrics[T]) get(item T) { if m == nil { return } m.depth.Dec() m.processingStartTimes[item] = m.clock.Now() if startTime, exists := m.addTimes[item]; exists { m.latency.Observe(m.sinceInSeconds(startTime)) delete(m.addTimes, item) } } func (m *defaultQueueMetrics[T]) done(item T) { if m == nil { return } if startTime, exists := m.processingStartTimes[item]; exists { m.workDuration.Observe(m.sinceInSeconds(startTime)) delete(m.processingStartTimes, item) } } func (m *defaultQueueMetrics[T]) updateUnfinishedWork() { // Note that a summary metric would be better for this, but prometheus // doesn't seem to have non-hacky ways to reset the summary metrics. var total float64 var oldest float64 for _, t := range m.processingStartTimes { age := m.sinceInSeconds(t) total += age if age > oldest { oldest = age } } m.unfinishedWorkSeconds.Set(total) m.longestRunningProcessor.Set(oldest) } type noMetrics[T any] struct{} func (noMetrics[T]) add(item T) {} func (noMetrics[T]) get(item T) {} func (noMetrics[T]) done(item T) {} func (noMetrics[T]) updateUnfinishedWork() {} // Gets the time since the specified start in seconds. func (m *defaultQueueMetrics[T]) sinceInSeconds(start time.Time) float64 { return m.clock.Since(start).Seconds() } type retryMetrics interface { retry() } type defaultRetryMetrics struct { retries CounterMetric } func (m *defaultRetryMetrics) retry() { if m == nil { return } m.retries.Inc() } // MetricsProvider generates various metrics used by the queue. type MetricsProvider interface { NewDepthMetric(name string) GaugeMetric NewAddsMetric(name string) CounterMetric NewLatencyMetric(name string) HistogramMetric NewWorkDurationMetric(name string) HistogramMetric NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric NewRetriesMetric(name string) CounterMetric } type noopMetricsProvider struct{} func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric { return noopMetric{} } func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric { return noopMetric{} } func (_ noopMetricsProvider) NewLatencyMetric(name string) HistogramMetric { return noopMetric{} } func (_ noopMetricsProvider) NewWorkDurationMetric(name string) HistogramMetric { return noopMetric{} } func (_ noopMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric { return noopMetric{} } func (_ noopMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric { return noopMetric{} } func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric { return noopMetric{} } var globalMetricsProvider MetricsProvider = noopMetricsProvider{} var setGlobalMetricsProviderOnce sync.Once func newQueueMetrics[T comparable](mp MetricsProvider, name string, clock clock.Clock) queueMetrics[T] { if len(name) == 0 || mp == (noopMetricsProvider{}) { return noMetrics[T]{} } return &defaultQueueMetrics[T]{ clock: clock, depth: mp.NewDepthMetric(name), adds: mp.NewAddsMetric(name), latency: mp.NewLatencyMetric(name), workDuration: mp.NewWorkDurationMetric(name), unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name), addTimes: map[T]time.Time{}, processingStartTimes: map[T]time.Time{}, } } func newRetryMetrics(name string, provider MetricsProvider) retryMetrics { var ret *defaultRetryMetrics if len(name) == 0 { return ret } if provider == nil { provider = globalMetricsProvider } return &defaultRetryMetrics{ retries: provider.NewRetriesMetric(name), } } // SetProvider sets the metrics provider for all subsequently created work // queues. Only the first call has an effect. func SetProvider(metricsProvider MetricsProvider) { setGlobalMetricsProviderOnce.Do(func() { globalMetricsProvider = metricsProvider }) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package workqueue import ( "container/heap" "sync" "time" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/utils/clock" ) // DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to // requeue items after failures without ending up in a hot-loop. // // Deprecated: use TypedDelayingInterface instead. type DelayingInterface TypedDelayingInterface[any] // TypedDelayingInterface is an Interface that can Add an item at a later time. This makes it easier to // requeue items after failures without ending up in a hot-loop. type TypedDelayingInterface[T comparable] interface { TypedInterface[T] // AddAfter adds an item to the workqueue after the indicated duration has passed AddAfter(item T, duration time.Duration) } // DelayingQueueConfig specifies optional configurations to customize a DelayingInterface. // // Deprecated: use TypedDelayingQueueConfig instead. type DelayingQueueConfig = TypedDelayingQueueConfig[any] // TypedDelayingQueueConfig specifies optional configurations to customize a DelayingInterface. type TypedDelayingQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string // MetricsProvider optionally allows specifying a metrics provider to use for the queue // instead of the global provider. MetricsProvider MetricsProvider // Clock optionally allows injecting a real or fake clock for testing purposes. Clock clock.WithTicker // Queue optionally allows injecting custom queue Interface instead of the default one. Queue TypedInterface[T] } // NewDelayingQueue constructs a new workqueue with delayed queuing ability. // NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use // NewDelayingQueueWithConfig instead and specify a name. // // Deprecated: use NewTypedDelayingQueue instead. func NewDelayingQueue() DelayingInterface { return NewDelayingQueueWithConfig(DelayingQueueConfig{}) } // NewTypedDelayingQueue constructs a new workqueue with delayed queuing ability. // NewTypedDelayingQueue does not emit metrics. For use with a MetricsProvider, please use // NewTypedDelayingQueueWithConfig instead and specify a name. func NewTypedDelayingQueue[T comparable]() TypedDelayingInterface[T] { return NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{}) } // NewDelayingQueueWithConfig constructs a new workqueue with options to // customize different properties. // // Deprecated: use NewTypedDelayingQueueWithConfig instead. func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface { return NewTypedDelayingQueueWithConfig[any](config) } // TypedNewDelayingQueue exists for backwards compatibility only. // // Deprecated: use NewTypedDelayingQueueWithConfig instead. func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] { return NewTypedDelayingQueue[T]() } // NewTypedDelayingQueueWithConfig constructs a new workqueue with options to // customize different properties. func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConfig[T]) TypedDelayingInterface[T] { if config.Clock == nil { config.Clock = clock.RealClock{} } if config.Queue == nil { config.Queue = NewTypedWithConfig[T](TypedQueueConfig[T]{ Name: config.Name, MetricsProvider: config.MetricsProvider, Clock: config.Clock, }) } return newDelayingQueue(config.Clock, config.Queue, config.Name, config.MetricsProvider) } // NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to // inject custom queue Interface instead of the default one // Deprecated: Use NewDelayingQueueWithConfig instead. func NewDelayingQueueWithCustomQueue(q Interface, name string) DelayingInterface { return NewDelayingQueueWithConfig(DelayingQueueConfig{ Name: name, Queue: q, }) } // NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability. // Deprecated: Use NewDelayingQueueWithConfig instead. func NewNamedDelayingQueue(name string) DelayingInterface { return NewDelayingQueueWithConfig(DelayingQueueConfig{Name: name}) } // NewDelayingQueueWithCustomClock constructs a new named workqueue // with ability to inject real or fake clock for testing purposes. // Deprecated: Use NewDelayingQueueWithConfig instead. func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface { return NewDelayingQueueWithConfig(DelayingQueueConfig{ Name: name, Clock: clock, }) } func newDelayingQueue[T comparable](clock clock.WithTicker, q TypedInterface[T], name string, provider MetricsProvider) *delayingType[T] { ret := &delayingType[T]{ TypedInterface: q, clock: clock, heartbeat: clock.NewTicker(maxWait), stopCh: make(chan struct{}), waitingForAddCh: make(chan *waitFor[T], 1000), metrics: newRetryMetrics(name, provider), } go ret.waitingLoop() return ret } // delayingType wraps an Interface and provides delayed re-enquing type delayingType[T comparable] struct { TypedInterface[T] // clock tracks time for delayed firing clock clock.Clock // stopCh lets us signal a shutdown to the waiting loop stopCh chan struct{} // stopOnce guarantees we only signal shutdown a single time stopOnce sync.Once // heartbeat ensures we wait no more than maxWait before firing heartbeat clock.Ticker // waitingForAddCh is a buffered channel that feeds waitingForAdd waitingForAddCh chan *waitFor[T] // metrics counts the number of retries metrics retryMetrics } // waitFor holds the data to add and the time it should be added type waitFor[T any] struct { data T readyAt time.Time // index in the priority queue (heap) index int } // waitForPriorityQueue implements a priority queue for waitFor items. // // waitForPriorityQueue implements heap.Interface. The item occurring next in // time (i.e., the item with the smallest readyAt) is at the root (index 0). // Peek returns this minimum item at index 0. Pop returns the minimum item after // it has been removed from the queue and placed at index Len()-1 by // container/heap. Push adds an item at index Len(), and container/heap // percolates it into the correct location. type waitForPriorityQueue[T any] []*waitFor[T] func (pq waitForPriorityQueue[T]) Len() int { return len(pq) } func (pq waitForPriorityQueue[T]) Less(i, j int) bool { return pq[i].readyAt.Before(pq[j].readyAt) } func (pq waitForPriorityQueue[T]) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] pq[i].index = i pq[j].index = j } // Push adds an item to the queue. Push should not be called directly; instead, // use `heap.Push`. func (pq *waitForPriorityQueue[T]) Push(x interface{}) { n := len(*pq) item := x.(*waitFor[T]) item.index = n *pq = append(*pq, item) } // Pop removes an item from the queue. Pop should not be called directly; // instead, use `heap.Pop`. func (pq *waitForPriorityQueue[T]) Pop() interface{} { n := len(*pq) item := (*pq)[n-1] item.index = -1 *pq = (*pq)[0:(n - 1)] return item } // Peek returns the item at the beginning of the queue, without removing the // item or otherwise mutating the queue. It is safe to call directly. func (pq waitForPriorityQueue[T]) Peek() interface{} { return pq[0] } // ShutDown stops the queue. After the queue drains, the returned shutdown bool // on Get() will be true. This method may be invoked more than once. func (q *delayingType[T]) ShutDown() { q.stopOnce.Do(func() { q.TypedInterface.ShutDown() close(q.stopCh) q.heartbeat.Stop() }) } // AddAfter adds the given item to the work queue after the given delay func (q *delayingType[T]) AddAfter(item T, duration time.Duration) { // don't add if we're already shutting down if q.ShuttingDown() { return } q.metrics.retry() // immediately add things with no delay if duration <= 0 { q.Add(item) return } select { case <-q.stopCh: // unblock if ShutDown() is called case q.waitingForAddCh <- &waitFor[T]{data: item, readyAt: q.clock.Now().Add(duration)}: } } // maxWait keeps a max bound on the wait time. It's just insurance against weird things happening. // Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an // expired item sitting for more than 10 seconds. const maxWait = 10 * time.Second // waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added. func (q *delayingType[T]) waitingLoop() { defer utilruntime.HandleCrash() // Make a placeholder channel to use when there are no items in our list never := make(<-chan time.Time) // Make a timer that expires when the item at the head of the waiting queue is ready var nextReadyAtTimer clock.Timer waitingForQueue := &waitForPriorityQueue[T]{} heap.Init(waitingForQueue) waitingEntryByData := map[T]*waitFor[T]{} for { if q.TypedInterface.ShuttingDown() { return } now := q.clock.Now() // Add ready entries for waitingForQueue.Len() > 0 { entry := waitingForQueue.Peek().(*waitFor[T]) if entry.readyAt.After(now) { break } entry = heap.Pop(waitingForQueue).(*waitFor[T]) q.Add(entry.data) delete(waitingEntryByData, entry.data) } // Set up a wait for the first item's readyAt (if one exists) nextReadyAt := never if waitingForQueue.Len() > 0 { if nextReadyAtTimer != nil { nextReadyAtTimer.Stop() } entry := waitingForQueue.Peek().(*waitFor[T]) nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now)) nextReadyAt = nextReadyAtTimer.C() } select { case <-q.stopCh: return case <-q.heartbeat.C(): // continue the loop, which will add ready items case <-nextReadyAt: // continue the loop, which will add ready items case waitEntry := <-q.waitingForAddCh: if waitEntry.readyAt.After(q.clock.Now()) { insert(waitingForQueue, waitingEntryByData, waitEntry) } else { q.Add(waitEntry.data) } drained := false for !drained { select { case waitEntry := <-q.waitingForAddCh: if waitEntry.readyAt.After(q.clock.Now()) { insert(waitingForQueue, waitingEntryByData, waitEntry) } else { q.Add(waitEntry.data) } default: drained = true } } } } } // insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue func insert[T comparable](q *waitForPriorityQueue[T], knownEntries map[T]*waitFor[T], entry *waitFor[T]) { // if the entry already exists, update the time only if it would cause the item to be queued sooner existing, exists := knownEntries[entry.data] if exists { if existing.readyAt.After(entry.readyAt) { existing.readyAt = entry.readyAt heap.Fix(q, existing.index) } return } heap.Push(q, entry) knownEntries[entry.data] = entry }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go
vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package workqueue import "k8s.io/utils/clock" // RateLimitingInterface is an interface that rate limits items being added to the queue. // // Deprecated: Use TypedRateLimitingInterface instead. type RateLimitingInterface TypedRateLimitingInterface[any] // TypedRateLimitingInterface is an interface that rate limits items being added to the queue. type TypedRateLimitingInterface[T comparable] interface { TypedDelayingInterface[T] // AddRateLimited adds an item to the workqueue after the rate limiter says it's ok AddRateLimited(item T) // Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you // still have to call `Done` on the queue. Forget(item T) // NumRequeues returns back how many times the item was requeued NumRequeues(item T) int } // RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface. // // Deprecated: Use TypedRateLimitingQueueConfig instead. type RateLimitingQueueConfig = TypedRateLimitingQueueConfig[any] // TypedRateLimitingQueueConfig specifies optional configurations to customize a TypedRateLimitingInterface. type TypedRateLimitingQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string // MetricsProvider optionally allows specifying a metrics provider to use for the queue // instead of the global provider. MetricsProvider MetricsProvider // Clock optionally allows injecting a real or fake clock for testing purposes. Clock clock.WithTicker // DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one. DelayingQueue TypedDelayingInterface[T] } // NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability // Remember to call Forget! If you don't, you may end up tracking failures forever. // NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use // NewRateLimitingQueueWithConfig instead and specify a name. // // Deprecated: Use NewTypedRateLimitingQueue instead. func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{}) } // NewTypedRateLimitingQueue constructs a new workqueue with rateLimited queuing ability // Remember to call Forget! If you don't, you may end up tracking failures forever. // NewTypedRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use // NewTypedRateLimitingQueueWithConfig instead and specify a name. func NewTypedRateLimitingQueue[T comparable](rateLimiter TypedRateLimiter[T]) TypedRateLimitingInterface[T] { return NewTypedRateLimitingQueueWithConfig(rateLimiter, TypedRateLimitingQueueConfig[T]{}) } // NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability // with options to customize different properties. // Remember to call Forget! If you don't, you may end up tracking failures forever. // // Deprecated: Use NewTypedRateLimitingQueueWithConfig instead. func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface { return NewTypedRateLimitingQueueWithConfig(rateLimiter, config) } // NewTypedRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability // with options to customize different properties. // Remember to call Forget! If you don't, you may end up tracking failures forever. func NewTypedRateLimitingQueueWithConfig[T comparable](rateLimiter TypedRateLimiter[T], config TypedRateLimitingQueueConfig[T]) TypedRateLimitingInterface[T] { if config.Clock == nil { config.Clock = clock.RealClock{} } if config.DelayingQueue == nil { config.DelayingQueue = NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{ Name: config.Name, MetricsProvider: config.MetricsProvider, Clock: config.Clock, }) } return &rateLimitingType[T]{ TypedDelayingInterface: config.DelayingQueue, rateLimiter: rateLimiter, } } // NewNamedRateLimitingQueue constructs a new named workqueue with rateLimited queuing ability. // Deprecated: Use NewRateLimitingQueueWithConfig instead. func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface { return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{ Name: name, }) } // NewRateLimitingQueueWithDelayingInterface constructs a new named workqueue with rateLimited queuing ability // with the option to inject a custom delaying queue instead of the default one. // Deprecated: Use NewRateLimitingQueueWithConfig instead. func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter RateLimiter) RateLimitingInterface { return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{ DelayingQueue: di, }) } // rateLimitingType wraps an Interface and provides rateLimited re-enquing type rateLimitingType[T comparable] struct { TypedDelayingInterface[T] rateLimiter TypedRateLimiter[T] } // AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok func (q *rateLimitingType[T]) AddRateLimited(item T) { q.TypedDelayingInterface.AddAfter(item, q.rateLimiter.When(item)) } func (q *rateLimitingType[T]) NumRequeues(item T) int { return q.rateLimiter.NumRequeues(item) } func (q *rateLimitingType[T]) Forget(item T) { q.rateLimiter.Forget(item) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/workqueue/parallelizer.go
vendor/k8s.io/client-go/util/workqueue/parallelizer.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package workqueue import ( "context" "sync" utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) type DoWorkPieceFunc func(piece int) type options struct { chunkSize int } type Options func(*options) // WithChunkSize allows to set chunks of work items to the workers, rather than // processing one by one. // It is recommended to use this option if the number of pieces significantly // higher than the number of workers and the work done for each item is small. func WithChunkSize(c int) func(*options) { return func(o *options) { o.chunkSize = c } } // ParallelizeUntil is a framework that allows for parallelizing N // independent pieces of work until done or the context is canceled. func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc, opts ...Options) { if pieces == 0 { return } o := options{} for _, opt := range opts { opt(&o) } chunkSize := o.chunkSize if chunkSize < 1 { chunkSize = 1 } chunks := ceilDiv(pieces, chunkSize) toProcess := make(chan int, chunks) for i := 0; i < chunks; i++ { toProcess <- i } close(toProcess) var stop <-chan struct{} if ctx != nil { stop = ctx.Done() } if chunks < workers { workers = chunks } wg := sync.WaitGroup{} wg.Add(workers) for i := 0; i < workers; i++ { go func() { defer utilruntime.HandleCrash() defer wg.Done() for chunk := range toProcess { start := chunk * chunkSize end := start + chunkSize if end > pieces { end = pieces } for p := start; p < end; p++ { select { case <-stop: return default: doWorkPiece(p) } } } }() } wg.Wait() } func ceilDiv(a, b int) int { return (a + b - 1) / b }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/workqueue/doc.go
vendor/k8s.io/client-go/util/workqueue/doc.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package workqueue provides a simple queue that supports the following // features: // - Fair: items processed in the order in which they are added. // - Stingy: a single item will not be processed multiple times concurrently, // and if an item is added multiple times before it can be processed, it // will only be processed once. // - Multiple consumers and producers. In particular, it is allowed for an // item to be reenqueued while it is being processed. // - Shutdown notifications. package workqueue // import "k8s.io/client-go/util/workqueue"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/workqueue/queue.go
vendor/k8s.io/client-go/util/workqueue/queue.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package workqueue import ( "sync" "time" "k8s.io/utils/clock" ) // Deprecated: Interface is deprecated, use TypedInterface instead. type Interface TypedInterface[any] type TypedInterface[T comparable] interface { Add(item T) Len() int Get() (item T, shutdown bool) Done(item T) ShutDown() ShutDownWithDrain() ShuttingDown() bool } // Queue is the underlying storage for items. The functions below are always // called from the same goroutine. type Queue[T comparable] interface { // Touch can be hooked when an existing item is added again. This may be // useful if the implementation allows priority change for the given item. Touch(item T) // Push adds a new item. Push(item T) // Len tells the total number of items. Len() int // Pop retrieves an item. Pop() (item T) } // DefaultQueue is a slice based FIFO queue. func DefaultQueue[T comparable]() Queue[T] { return new(queue[T]) } // queue is a slice which implements Queue. type queue[T comparable] []T func (q *queue[T]) Touch(item T) {} func (q *queue[T]) Push(item T) { *q = append(*q, item) } func (q *queue[T]) Len() int { return len(*q) } func (q *queue[T]) Pop() (item T) { item = (*q)[0] // The underlying array still exists and reference this object, so the object will not be garbage collected. (*q)[0] = *new(T) *q = (*q)[1:] return item } // QueueConfig specifies optional configurations to customize an Interface. // Deprecated: use TypedQueueConfig instead. type QueueConfig = TypedQueueConfig[any] type TypedQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string // MetricsProvider optionally allows specifying a metrics provider to use for the queue // instead of the global provider. MetricsProvider MetricsProvider // Clock ability to inject real or fake clock for testing purposes. Clock clock.WithTicker // Queue provides the underlying queue to use. It is optional and defaults to slice based FIFO queue. Queue Queue[T] } // New constructs a new work queue (see the package comment). // // Deprecated: use NewTyped instead. func New() *Type { return NewWithConfig(QueueConfig{ Name: "", }) } // NewTyped constructs a new work queue (see the package comment). func NewTyped[T comparable]() *Typed[T] { return NewTypedWithConfig(TypedQueueConfig[T]{ Name: "", }) } // NewWithConfig constructs a new workqueue with ability to // customize different properties. // // Deprecated: use NewTypedWithConfig instead. func NewWithConfig(config QueueConfig) *Type { return NewTypedWithConfig(config) } // NewTypedWithConfig constructs a new workqueue with ability to // customize different properties. func NewTypedWithConfig[T comparable](config TypedQueueConfig[T]) *Typed[T] { return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod) } // NewNamed creates a new named queue. // Deprecated: Use NewWithConfig instead. func NewNamed(name string) *Type { return NewWithConfig(QueueConfig{ Name: name, }) } // newQueueWithConfig constructs a new named workqueue // with the ability to customize different properties for testing purposes func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod time.Duration) *Typed[T] { metricsProvider := globalMetricsProvider if config.MetricsProvider != nil { metricsProvider = config.MetricsProvider } if config.Clock == nil { config.Clock = clock.RealClock{} } if config.Queue == nil { config.Queue = DefaultQueue[T]() } return newQueue( config.Clock, config.Queue, newQueueMetrics[T](metricsProvider, config.Name, config.Clock), updatePeriod, ) } func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics[T], updatePeriod time.Duration) *Typed[T] { t := &Typed[T]{ clock: c, queue: queue, dirty: set[T]{}, processing: set[T]{}, cond: sync.NewCond(&sync.Mutex{}), metrics: metrics, unfinishedWorkUpdatePeriod: updatePeriod, } // Don't start the goroutine for a type of noMetrics so we don't consume // resources unnecessarily if _, ok := metrics.(noMetrics[T]); !ok { go t.updateUnfinishedWorkLoop() } return t } const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond // Type is a work queue (see the package comment). // Deprecated: Use Typed instead. type Type = Typed[any] type Typed[t comparable] struct { // queue defines the order in which we will work on items. Every // element of queue should be in the dirty set and not in the // processing set. queue Queue[t] // dirty defines all of the items that need to be processed. dirty set[t] // Things that are currently being processed are in the processing set. // These things may be simultaneously in the dirty set. When we finish // processing something and remove it from this set, we'll check if // it's in the dirty set, and if so, add it to the queue. processing set[t] cond *sync.Cond shuttingDown bool drain bool metrics queueMetrics[t] unfinishedWorkUpdatePeriod time.Duration clock clock.WithTicker } type empty struct{} type set[t comparable] map[t]empty func (s set[t]) has(item t) bool { _, exists := s[item] return exists } func (s set[t]) insert(item t) { s[item] = empty{} } func (s set[t]) delete(item t) { delete(s, item) } func (s set[t]) len() int { return len(s) } // Add marks item as needing processing. func (q *Typed[T]) Add(item T) { q.cond.L.Lock() defer q.cond.L.Unlock() if q.shuttingDown { return } if q.dirty.has(item) { // the same item is added again before it is processed, call the Touch // function if the queue cares about it (for e.g, reset its priority) if !q.processing.has(item) { q.queue.Touch(item) } return } q.metrics.add(item) q.dirty.insert(item) if q.processing.has(item) { return } q.queue.Push(item) q.cond.Signal() } // Len returns the current queue length, for informational purposes only. You // shouldn't e.g. gate a call to Add() or Get() on Len() being a particular // value, that can't be synchronized properly. func (q *Typed[T]) Len() int { q.cond.L.Lock() defer q.cond.L.Unlock() return q.queue.Len() } // Get blocks until it can return an item to be processed. If shutdown = true, // the caller should end their goroutine. You must call Done with item when you // have finished processing it. func (q *Typed[T]) Get() (item T, shutdown bool) { q.cond.L.Lock() defer q.cond.L.Unlock() for q.queue.Len() == 0 && !q.shuttingDown { q.cond.Wait() } if q.queue.Len() == 0 { // We must be shutting down. return *new(T), true } item = q.queue.Pop() q.metrics.get(item) q.processing.insert(item) q.dirty.delete(item) return item, false } // Done marks item as done processing, and if it has been marked as dirty again // while it was being processed, it will be re-added to the queue for // re-processing. func (q *Typed[T]) Done(item T) { q.cond.L.Lock() defer q.cond.L.Unlock() q.metrics.done(item) q.processing.delete(item) if q.dirty.has(item) { q.queue.Push(item) q.cond.Signal() } else if q.processing.len() == 0 { q.cond.Signal() } } // ShutDown will cause q to ignore all new items added to it and // immediately instruct the worker goroutines to exit. func (q *Typed[T]) ShutDown() { q.cond.L.Lock() defer q.cond.L.Unlock() q.drain = false q.shuttingDown = true q.cond.Broadcast() } // ShutDownWithDrain will cause q to ignore all new items added to it. As soon // as the worker goroutines have "drained", i.e: finished processing and called // Done on all existing items in the queue; they will be instructed to exit and // ShutDownWithDrain will return. Hence: a strict requirement for using this is; // your workers must ensure that Done is called on all items in the queue once // the shut down has been initiated, if that is not the case: this will block // indefinitely. It is, however, safe to call ShutDown after having called // ShutDownWithDrain, as to force the queue shut down to terminate immediately // without waiting for the drainage. func (q *Typed[T]) ShutDownWithDrain() { q.cond.L.Lock() defer q.cond.L.Unlock() q.drain = true q.shuttingDown = true q.cond.Broadcast() for q.processing.len() != 0 && q.drain { q.cond.Wait() } } func (q *Typed[T]) ShuttingDown() bool { q.cond.L.Lock() defer q.cond.L.Unlock() return q.shuttingDown } func (q *Typed[T]) updateUnfinishedWorkLoop() { t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod) defer t.Stop() for range t.C() { if !func() bool { q.cond.L.Lock() defer q.cond.L.Unlock() if !q.shuttingDown { q.metrics.updateUnfinishedWork() return true } return false }() { return } } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/flowcontrol/throttle.go
vendor/k8s.io/client-go/util/flowcontrol/throttle.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package flowcontrol import ( "context" "errors" "sync" "time" "golang.org/x/time/rate" "k8s.io/utils/clock" ) type PassiveRateLimiter interface { // TryAccept returns true if a token is taken immediately. Otherwise, // it returns false. TryAccept() bool // Stop stops the rate limiter, subsequent calls to CanAccept will return false Stop() // QPS returns QPS of this rate limiter QPS() float32 } type RateLimiter interface { PassiveRateLimiter // Accept returns once a token becomes available. Accept() // Wait returns nil if a token is taken before the Context is done. Wait(ctx context.Context) error } type tokenBucketPassiveRateLimiter struct { limiter *rate.Limiter qps float32 clock clock.PassiveClock } type tokenBucketRateLimiter struct { tokenBucketPassiveRateLimiter clock Clock } // NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach. // The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a // smoothed qps rate of 'qps'. // The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'. // The maximum number of tokens in the bucket is capped at 'burst'. func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter { limiter := rate.NewLimiter(rate.Limit(qps), burst) return newTokenBucketRateLimiterWithClock(limiter, clock.RealClock{}, qps) } // NewTokenBucketPassiveRateLimiter is similar to NewTokenBucketRateLimiter except that it returns // a PassiveRateLimiter which does not have Accept() and Wait() methods. func NewTokenBucketPassiveRateLimiter(qps float32, burst int) PassiveRateLimiter { limiter := rate.NewLimiter(rate.Limit(qps), burst) return newTokenBucketRateLimiterWithPassiveClock(limiter, clock.RealClock{}, qps) } // An injectable, mockable clock interface. type Clock interface { clock.PassiveClock Sleep(time.Duration) } var _ Clock = (*clock.RealClock)(nil) // NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter // but allows an injectable clock, for testing. func NewTokenBucketRateLimiterWithClock(qps float32, burst int, c Clock) RateLimiter { limiter := rate.NewLimiter(rate.Limit(qps), burst) return newTokenBucketRateLimiterWithClock(limiter, c, qps) } // NewTokenBucketPassiveRateLimiterWithClock is similar to NewTokenBucketRateLimiterWithClock // except that it returns a PassiveRateLimiter which does not have Accept() and Wait() methods // and uses a PassiveClock. func NewTokenBucketPassiveRateLimiterWithClock(qps float32, burst int, c clock.PassiveClock) PassiveRateLimiter { limiter := rate.NewLimiter(rate.Limit(qps), burst) return newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps) } func newTokenBucketRateLimiterWithClock(limiter *rate.Limiter, c Clock, qps float32) *tokenBucketRateLimiter { return &tokenBucketRateLimiter{ tokenBucketPassiveRateLimiter: *newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps), clock: c, } } func newTokenBucketRateLimiterWithPassiveClock(limiter *rate.Limiter, c clock.PassiveClock, qps float32) *tokenBucketPassiveRateLimiter { return &tokenBucketPassiveRateLimiter{ limiter: limiter, qps: qps, clock: c, } } func (tbprl *tokenBucketPassiveRateLimiter) Stop() { } func (tbprl *tokenBucketPassiveRateLimiter) QPS() float32 { return tbprl.qps } func (tbprl *tokenBucketPassiveRateLimiter) TryAccept() bool { return tbprl.limiter.AllowN(tbprl.clock.Now(), 1) } // Accept will block until a token becomes available func (tbrl *tokenBucketRateLimiter) Accept() { now := tbrl.clock.Now() tbrl.clock.Sleep(tbrl.limiter.ReserveN(now, 1).DelayFrom(now)) } func (tbrl *tokenBucketRateLimiter) Wait(ctx context.Context) error { return tbrl.limiter.Wait(ctx) } type fakeAlwaysRateLimiter struct{} func NewFakeAlwaysRateLimiter() RateLimiter { return &fakeAlwaysRateLimiter{} } func (t *fakeAlwaysRateLimiter) TryAccept() bool { return true } func (t *fakeAlwaysRateLimiter) Stop() {} func (t *fakeAlwaysRateLimiter) Accept() {} func (t *fakeAlwaysRateLimiter) QPS() float32 { return 1 } func (t *fakeAlwaysRateLimiter) Wait(ctx context.Context) error { return nil } type fakeNeverRateLimiter struct { wg sync.WaitGroup } func NewFakeNeverRateLimiter() RateLimiter { rl := fakeNeverRateLimiter{} rl.wg.Add(1) return &rl } func (t *fakeNeverRateLimiter) TryAccept() bool { return false } func (t *fakeNeverRateLimiter) Stop() { t.wg.Done() } func (t *fakeNeverRateLimiter) Accept() { t.wg.Wait() } func (t *fakeNeverRateLimiter) QPS() float32 { return 1 } func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error { return errors.New("can not be accept") } var ( _ RateLimiter = (*tokenBucketRateLimiter)(nil) _ RateLimiter = (*fakeAlwaysRateLimiter)(nil) _ RateLimiter = (*fakeNeverRateLimiter)(nil) ) var _ PassiveRateLimiter = (*tokenBucketPassiveRateLimiter)(nil)
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
vendor/k8s.io/client-go/util/flowcontrol/backoff.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package flowcontrol import ( "math/rand" "sync" "time" "k8s.io/utils/clock" testingclock "k8s.io/utils/clock/testing" ) type backoffEntry struct { backoff time.Duration lastUpdate time.Time } type Backoff struct { sync.RWMutex Clock clock.Clock // HasExpiredFunc controls the logic that determines whether the backoff // counter should be reset, and when to GC old backoff entries. If nil, the // default hasExpired function will restart the backoff factor to the // beginning after observing time has passed at least equal to 2*maxDuration HasExpiredFunc func(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool defaultDuration time.Duration maxDuration time.Duration perItemBackoff map[string]*backoffEntry rand *rand.Rand // maxJitterFactor adds jitter to the exponentially backed off delay. // if maxJitterFactor is zero, no jitter is added to the delay in // order to maintain current behavior. maxJitterFactor float64 } func NewFakeBackOff(initial, max time.Duration, tc *testingclock.FakeClock) *Backoff { return newBackoff(tc, initial, max, 0.0) } func NewBackOff(initial, max time.Duration) *Backoff { return NewBackOffWithJitter(initial, max, 0.0) } func NewFakeBackOffWithJitter(initial, max time.Duration, tc *testingclock.FakeClock, maxJitterFactor float64) *Backoff { return newBackoff(tc, initial, max, maxJitterFactor) } func NewBackOffWithJitter(initial, max time.Duration, maxJitterFactor float64) *Backoff { clock := clock.RealClock{} return newBackoff(clock, initial, max, maxJitterFactor) } func newBackoff(clock clock.Clock, initial, max time.Duration, maxJitterFactor float64) *Backoff { var random *rand.Rand if maxJitterFactor > 0 { random = rand.New(rand.NewSource(clock.Now().UnixNano())) } return &Backoff{ perItemBackoff: map[string]*backoffEntry{}, Clock: clock, defaultDuration: initial, maxDuration: max, maxJitterFactor: maxJitterFactor, rand: random, } } // Get the current backoff Duration func (p *Backoff) Get(id string) time.Duration { p.RLock() defer p.RUnlock() var delay time.Duration entry, ok := p.perItemBackoff[id] if ok { delay = entry.backoff } return delay } // move backoff to the next mark, capping at maxDuration func (p *Backoff) Next(id string, eventTime time.Time) { p.Lock() defer p.Unlock() entry, ok := p.perItemBackoff[id] if !ok || p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { entry = p.initEntryUnsafe(id) entry.backoff += p.jitter(entry.backoff) } else { delay := entry.backoff * 2 // exponential delay += p.jitter(entry.backoff) // add some jitter to the delay entry.backoff = min(delay, p.maxDuration) } entry.lastUpdate = p.Clock.Now() } // Reset forces clearing of all backoff data for a given key. func (p *Backoff) Reset(id string) { p.Lock() defer p.Unlock() delete(p.perItemBackoff, id) } // Returns True if the elapsed time since eventTime is smaller than the current backoff window func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { p.RLock() defer p.RUnlock() entry, ok := p.perItemBackoff[id] if !ok { return false } if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { return false } return p.Clock.Since(eventTime) < entry.backoff } // Returns True if time since lastupdate is less than the current backoff window. func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool { p.RLock() defer p.RUnlock() entry, ok := p.perItemBackoff[id] if !ok { return false } if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { return false } return eventTime.Sub(entry.lastUpdate) < entry.backoff } // Garbage collect records that have aged past their expiration, which defaults // to 2*maxDuration (see hasExpired godoc). Backoff users are expected to invoke // this periodically. func (p *Backoff) GC() { p.Lock() defer p.Unlock() now := p.Clock.Now() for id, entry := range p.perItemBackoff { if p.hasExpired(now, entry.lastUpdate, p.maxDuration) { delete(p.perItemBackoff, id) } } } func (p *Backoff) DeleteEntry(id string) { p.Lock() defer p.Unlock() delete(p.perItemBackoff, id) } // Take a lock on *Backoff, before calling initEntryUnsafe func (p *Backoff) initEntryUnsafe(id string) *backoffEntry { entry := &backoffEntry{backoff: p.defaultDuration} p.perItemBackoff[id] = entry return entry } func (p *Backoff) jitter(delay time.Duration) time.Duration { if p.rand == nil { return 0 } return time.Duration(p.rand.Float64() * p.maxJitterFactor * float64(delay)) } // Unless an alternate function is provided, after 2*maxDuration we restart the backoff factor to the beginning func (p *Backoff) hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { if p.HasExpiredFunc != nil { return p.HasExpiredFunc(eventTime, lastUpdate, maxDuration) } return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/apply/apply.go
vendor/k8s.io/client-go/util/apply/apply.go
/* Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package apply import ( "fmt" cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" "k8s.io/client-go/features" "k8s.io/client-go/rest" ) // NewRequest builds a new server-side apply request. The provided apply configuration object will // be marshalled to the request's body using the default encoding, and the Content-Type header will // be set to application/apply-patch with the appropriate structured syntax name suffix (today, // either +yaml or +cbor, see // https://www.iana.org/assignments/media-type-structured-suffix/media-type-structured-suffix.xhtml). func NewRequest(client rest.Interface, applyConfiguration interface{}) (*rest.Request, error) { pt := types.ApplyYAMLPatchType marshal := json.Marshal if features.FeatureGates().Enabled(features.ClientsAllowCBOR) && features.FeatureGates().Enabled(features.ClientsPreferCBOR) { pt = types.ApplyCBORPatchType marshal = cbor.Marshal } body, err := marshal(applyConfiguration) if err != nil { return nil, fmt.Errorf("failed to marshal apply configuration: %w", err) } return client.Patch(pt).Body(body), nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/watchlist/watch_list.go
vendor/k8s.io/client-go/util/watchlist/watch_list.go
/* Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package watchlist import ( metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientfeatures "k8s.io/client-go/features" "k8s.io/utils/ptr" ) var scheme = runtime.NewScheme() func init() { utilruntime.Must(metainternalversion.AddToScheme(scheme)) } // PrepareWatchListOptionsFromListOptions creates a new ListOptions // that can be used for a watch-list request from the given listOptions. // // This function also determines if the given listOptions can be used to form a watch-list request, // which would result in streaming semantically equivalent data from the server. func PrepareWatchListOptionsFromListOptions(listOptions metav1.ListOptions) (metav1.ListOptions, bool, error) { if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) { return metav1.ListOptions{}, false, nil } internalListOptions := &metainternalversion.ListOptions{} if err := scheme.Convert(&listOptions, internalListOptions, nil); err != nil { return metav1.ListOptions{}, false, err } if errs := metainternalversionvalidation.ValidateListOptions(internalListOptions, true); len(errs) > 0 { return metav1.ListOptions{}, false, nil } watchListOptions := listOptions // this is our legacy case, the cache ignores LIMIT for // ResourceVersion == 0 and RVM=unset|NotOlderThan if listOptions.Limit > 0 && listOptions.ResourceVersion != "0" { return metav1.ListOptions{}, false, nil } watchListOptions.Limit = 0 // to ensure that we can create a watch-list request that returns // semantically equivalent data for the given listOptions, // we need to validate that the RVM for the list is supported by watch-list requests. if listOptions.ResourceVersionMatch == metav1.ResourceVersionMatchExact { return metav1.ListOptions{}, false, nil } watchListOptions.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan watchListOptions.Watch = true watchListOptions.AllowWatchBookmarks = true watchListOptions.SendInitialEvents = ptr.To(true) internalWatchListOptions := &metainternalversion.ListOptions{} if err := scheme.Convert(&watchListOptions, internalWatchListOptions, nil); err != nil { return metav1.ListOptions{}, false, err } if errs := metainternalversionvalidation.ValidateListOptions(internalWatchListOptions, true); len(errs) > 0 { return metav1.ListOptions{}, false, nil } return watchListOptions, true, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/cert/csr.go
vendor/k8s.io/client-go/util/cert/csr.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cert import ( cryptorand "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "net" ) // MakeCSR generates a PEM-encoded CSR using the supplied private key, subject, and SANs. // All key types that are implemented via crypto.Signer are supported (This includes *rsa.PrivateKey and *ecdsa.PrivateKey.) func MakeCSR(privateKey interface{}, subject *pkix.Name, dnsSANs []string, ipSANs []net.IP) (csr []byte, err error) { template := &x509.CertificateRequest{ Subject: *subject, DNSNames: dnsSANs, IPAddresses: ipSANs, } return MakeCSRFromTemplate(privateKey, template) } // MakeCSRFromTemplate generates a PEM-encoded CSR using the supplied private // key and certificate request as a template. All key types that are // implemented via crypto.Signer are supported (This includes *rsa.PrivateKey // and *ecdsa.PrivateKey.) func MakeCSRFromTemplate(privateKey interface{}, template *x509.CertificateRequest) ([]byte, error) { t := *template t.SignatureAlgorithm = sigType(privateKey) csrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, &t, privateKey) if err != nil { return nil, err } csrPemBlock := &pem.Block{ Type: CertificateRequestBlockType, Bytes: csrDER, } return pem.EncodeToMemory(csrPemBlock), nil } func sigType(privateKey interface{}) x509.SignatureAlgorithm { // Customize the signature for RSA keys, depending on the key size if privateKey, ok := privateKey.(*rsa.PrivateKey); ok { keySize := privateKey.N.BitLen() switch { case keySize >= 4096: return x509.SHA512WithRSA case keySize >= 3072: return x509.SHA384WithRSA default: return x509.SHA256WithRSA } } return x509.UnknownSignatureAlgorithm }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/cert/io.go
vendor/k8s.io/client-go/util/cert/io.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cert import ( "crypto/x509" "fmt" "os" "path/filepath" ) // CanReadCertAndKey returns true if the certificate and key files already exists, // otherwise returns false. If lost one of cert and key, returns error. func CanReadCertAndKey(certPath, keyPath string) (bool, error) { certReadable := canReadFile(certPath) keyReadable := canReadFile(keyPath) if certReadable == false && keyReadable == false { return false, nil } if certReadable == false { return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", certPath) } if keyReadable == false { return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", keyPath) } return true, nil } // If the file represented by path exists and // readable, returns true otherwise returns false. func canReadFile(path string) bool { f, err := os.Open(path) if err != nil { return false } defer f.Close() return true } // WriteCert writes the pem-encoded certificate data to certPath. // The certificate file will be created with file mode 0644. // If the certificate file already exists, it will be overwritten. // The parent directory of the certPath will be created as needed with file mode 0755. func WriteCert(certPath string, data []byte) error { if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil { return err } return os.WriteFile(certPath, data, os.FileMode(0644)) } // NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. // Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates func NewPool(filename string) (*x509.CertPool, error) { pemBlock, err := os.ReadFile(filename) if err != nil { return nil, err } pool, err := NewPoolFromBytes(pemBlock) if err != nil { return nil, fmt.Errorf("error creating pool from %s: %s", filename, err) } return pool, nil } // NewPoolFromBytes returns an x509.CertPool containing the certificates in the given PEM-encoded bytes. // Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates func NewPoolFromBytes(pemBlock []byte) (*x509.CertPool, error) { certs, err := ParseCertsPEM(pemBlock) if err != nil { return nil, err } pool := x509.NewCertPool() for _, cert := range certs { pool.AddCert(cert) } return pool, nil } // CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file. // Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates func CertsFromFile(file string) ([]*x509.Certificate, error) { pemBlock, err := os.ReadFile(file) if err != nil { return nil, err } certs, err := ParseCertsPEM(pemBlock) if err != nil { return nil, fmt.Errorf("error reading %s: %s", file, err) } return certs, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/cert/server_inspection.go
vendor/k8s.io/client-go/util/cert/server_inspection.go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cert import ( "crypto/tls" "crypto/x509" "fmt" "net/url" "strings" ) // GetClientCANames gets the CA names for client certs that a server accepts. This is useful when inspecting the // state of particular servers. apiHost is "host:port" func GetClientCANames(apiHost string) ([]string, error) { // when we run this the second time, we know which one we are expecting acceptableCAs := []string{} tlsConfig := &tls.Config{ InsecureSkipVerify: true, // this is insecure to always get to the GetClientCertificate GetClientCertificate: func(hello *tls.CertificateRequestInfo) (*tls.Certificate, error) { acceptableCAs = []string{} for _, curr := range hello.AcceptableCAs { acceptableCAs = append(acceptableCAs, string(curr)) } return &tls.Certificate{}, nil }, } conn, err := tls.Dial("tcp", apiHost, tlsConfig) if err != nil { return nil, err } if err := conn.Close(); err != nil { return nil, err } return acceptableCAs, nil } // GetClientCANamesForURL is GetClientCANames against a URL string like we use in kubeconfigs func GetClientCANamesForURL(kubeConfigURL string) ([]string, error) { apiserverURL, err := url.Parse(kubeConfigURL) if err != nil { return nil, err } return GetClientCANames(apiserverURL.Host) } // GetServingCertificates returns the x509 certs used by a server as certificates and pem encoded bytes. // The serverName is optional for specifying a different name to get SNI certificates. apiHost is "host:port" func GetServingCertificates(apiHost, serverName string) ([]*x509.Certificate, [][]byte, error) { tlsConfig := &tls.Config{ InsecureSkipVerify: true, // this is insecure so that we always get connected } // if a name is specified for SNI, set it. if len(serverName) > 0 { tlsConfig.ServerName = serverName } conn, err := tls.Dial("tcp", apiHost, tlsConfig) if err != nil { return nil, nil, err } if err = conn.Close(); err != nil { return nil, nil, fmt.Errorf("failed to close connection : %v", err) } peerCerts := conn.ConnectionState().PeerCertificates peerCertBytes := [][]byte{} for _, a := range peerCerts { actualCert, err := EncodeCertificates(a) if err != nil { return nil, nil, err } peerCertBytes = append(peerCertBytes, []byte(strings.TrimSpace(string(actualCert)))) } return peerCerts, peerCertBytes, err } // GetServingCertificatesForURL is GetServingCertificates against a URL string like we use in kubeconfigs func GetServingCertificatesForURL(kubeConfigURL, serverName string) ([]*x509.Certificate, [][]byte, error) { apiserverURL, err := url.Parse(kubeConfigURL) if err != nil { return nil, nil, err } return GetServingCertificates(apiserverURL.Host, serverName) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/cert/cert.go
vendor/k8s.io/client-go/util/cert/cert.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cert import ( "bytes" "crypto" cryptorand "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "fmt" "math" "math/big" "net" "os" "path/filepath" "strings" "time" "k8s.io/client-go/util/keyutil" netutils "k8s.io/utils/net" ) const duration365d = time.Hour * 24 * 365 // Config contains the basic fields required for creating a certificate type Config struct { CommonName string Organization []string AltNames AltNames Usages []x509.ExtKeyUsage NotBefore time.Time } // AltNames contains the domain names and IP addresses that will be added // to the API Server's x509 certificate SubAltNames field. The values will // be passed directly to the x509.Certificate object. type AltNames struct { DNSNames []string IPs []net.IP } // NewSelfSignedCACert creates a CA certificate func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) { now := time.Now() // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) if err != nil { return nil, err } serial = new(big.Int).Add(serial, big.NewInt(1)) notBefore := now.UTC() if !cfg.NotBefore.IsZero() { notBefore = cfg.NotBefore.UTC() } tmpl := x509.Certificate{ SerialNumber: serial, Subject: pkix.Name{ CommonName: cfg.CommonName, Organization: cfg.Organization, }, DNSNames: []string{cfg.CommonName}, NotBefore: notBefore, NotAfter: now.Add(duration365d * 10).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, IsCA: true, } certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) if err != nil { return nil, err } return x509.ParseCertificate(certDERBytes) } // GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host. // Host may be an IP or a DNS name // You may also specify additional subject alt names (either ip or dns names) for the certificate. func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) { return GenerateSelfSignedCertKeyWithFixtures(host, alternateIPs, alternateDNS, "") } // GenerateSelfSignedCertKeyWithFixtures creates a self-signed certificate and key for the given host. // Host may be an IP or a DNS name. You may also specify additional subject alt names (either ip or dns names) // for the certificate. // // If fixtureDirectory is non-empty, it is a directory path which can contain pre-generated certs. The format is: // <host>_<ip>-<ip>_<alternateDNS>-<alternateDNS>.crt // <host>_<ip>-<ip>_<alternateDNS>-<alternateDNS>.key // Certs/keys not existing in that directory are created. func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, alternateDNS []string, fixtureDirectory string) ([]byte, []byte, error) { validFrom := time.Now().Add(-time.Hour) // valid an hour earlier to avoid flakes due to clock skew maxAge := time.Hour * 24 * 365 // one year self-signed certs baseName := fmt.Sprintf("%s_%s_%s", host, strings.Join(ipsToStrings(alternateIPs), "-"), strings.Join(alternateDNS, "-")) certFixturePath := filepath.Join(fixtureDirectory, baseName+".crt") keyFixturePath := filepath.Join(fixtureDirectory, baseName+".key") if len(fixtureDirectory) > 0 { cert, err := os.ReadFile(certFixturePath) if err == nil { key, err := os.ReadFile(keyFixturePath) if err == nil { return cert, key, nil } return nil, nil, fmt.Errorf("cert %s can be read, but key %s cannot: %v", certFixturePath, keyFixturePath, err) } maxAge = 100 * time.Hour * 24 * 365 // 100 years fixtures } caKey, err := rsa.GenerateKey(cryptorand.Reader, 2048) if err != nil { return nil, nil, err } // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) if err != nil { return nil, nil, err } serial = new(big.Int).Add(serial, big.NewInt(1)) caTemplate := x509.Certificate{ SerialNumber: serial, Subject: pkix.Name{ CommonName: fmt.Sprintf("%s-ca@%d", host, time.Now().Unix()), }, NotBefore: validFrom, NotAfter: validFrom.Add(maxAge), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, IsCA: true, } caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey) if err != nil { return nil, nil, err } caCertificate, err := x509.ParseCertificate(caDERBytes) if err != nil { return nil, nil, err } priv, err := rsa.GenerateKey(cryptorand.Reader, 2048) if err != nil { return nil, nil, err } // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). serial, err = cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) if err != nil { return nil, nil, err } serial = new(big.Int).Add(serial, big.NewInt(1)) template := x509.Certificate{ SerialNumber: serial, Subject: pkix.Name{ CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()), }, NotBefore: validFrom, NotAfter: validFrom.Add(maxAge), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, BasicConstraintsValid: true, } if ip := netutils.ParseIPSloppy(host); ip != nil { template.IPAddresses = append(template.IPAddresses, ip) } else { template.DNSNames = append(template.DNSNames, host) } template.IPAddresses = append(template.IPAddresses, alternateIPs...) template.DNSNames = append(template.DNSNames, alternateDNS...) derBytes, err := x509.CreateCertificate(cryptorand.Reader, &template, caCertificate, &priv.PublicKey, caKey) if err != nil { return nil, nil, err } // Generate cert, followed by ca certBuffer := bytes.Buffer{} if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: derBytes}); err != nil { return nil, nil, err } if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: caDERBytes}); err != nil { return nil, nil, err } // Generate key keyBuffer := bytes.Buffer{} if err := pem.Encode(&keyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { return nil, nil, err } if len(fixtureDirectory) > 0 { if err := os.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil { return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err) } if err := os.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0600); err != nil { return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err) } } return certBuffer.Bytes(), keyBuffer.Bytes(), nil } func ipsToStrings(ips []net.IP) []string { ss := make([]string, 0, len(ips)) for _, ip := range ips { ss = append(ss, ip.String()) } return ss }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/cert/pem.go
vendor/k8s.io/client-go/util/cert/pem.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cert import ( "bytes" "crypto/x509" "encoding/pem" "errors" ) const ( // CertificateBlockType is a possible value for pem.Block.Type. CertificateBlockType = "CERTIFICATE" // CertificateRequestBlockType is a possible value for pem.Block.Type. CertificateRequestBlockType = "CERTIFICATE REQUEST" ) // ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array // Returns an error if a certificate could not be parsed, or if the data does not contain any certificates func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { ok := false certs := []*x509.Certificate{} for len(pemCerts) > 0 { var block *pem.Block block, pemCerts = pem.Decode(pemCerts) if block == nil { break } // Only use PEM "CERTIFICATE" blocks without extra headers if block.Type != CertificateBlockType || len(block.Headers) != 0 { continue } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { return certs, err } certs = append(certs, cert) ok = true } if !ok { return certs, errors.New("data does not contain any valid RSA or ECDSA certificates") } return certs, nil } // EncodeCertificates returns the PEM-encoded byte array that represents by the specified certs. func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) { b := bytes.Buffer{} for _, cert := range certs { if err := pem.Encode(&b, &pem.Block{Type: CertificateBlockType, Bytes: cert.Raw}); err != nil { return []byte{}, err } } return b.Bytes(), nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/keyutil/key.go
vendor/k8s.io/client-go/util/keyutil/key.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package keyutil contains utilities for managing public/private key pairs. package keyutil import ( "crypto" "crypto/ecdsa" "crypto/elliptic" cryptorand "crypto/rand" "crypto/rsa" "crypto/x509" "encoding/pem" "fmt" "os" "path/filepath" ) const ( // ECPrivateKeyBlockType is a possible value for pem.Block.Type. ECPrivateKeyBlockType = "EC PRIVATE KEY" // RSAPrivateKeyBlockType is a possible value for pem.Block.Type. RSAPrivateKeyBlockType = "RSA PRIVATE KEY" // PrivateKeyBlockType is a possible value for pem.Block.Type. PrivateKeyBlockType = "PRIVATE KEY" // PublicKeyBlockType is a possible value for pem.Block.Type. PublicKeyBlockType = "PUBLIC KEY" ) // MakeEllipticPrivateKeyPEM creates an ECDSA private key func MakeEllipticPrivateKeyPEM() ([]byte, error) { privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) if err != nil { return nil, err } derBytes, err := x509.MarshalECPrivateKey(privateKey) if err != nil { return nil, err } privateKeyPemBlock := &pem.Block{ Type: ECPrivateKeyBlockType, Bytes: derBytes, } return pem.EncodeToMemory(privateKeyPemBlock), nil } // WriteKey writes the pem-encoded key data to keyPath. // The key file will be created with file mode 0600. // If the key file already exists, it will be overwritten. // The parent directory of the keyPath will be created as needed with file mode 0755. func WriteKey(keyPath string, data []byte) error { if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { return err } return os.WriteFile(keyPath, data, os.FileMode(0600)) } // LoadOrGenerateKeyFile looks for a key in the file at the given path. If it // can't find one, it will generate a new key and store it there. func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { loadedData, err := os.ReadFile(keyPath) // Call verifyKeyData to ensure the file wasn't empty/corrupt. if err == nil && verifyKeyData(loadedData) { return loadedData, false, err } if !os.IsNotExist(err) { return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err) } generatedData, err := MakeEllipticPrivateKeyPEM() if err != nil { return nil, false, fmt.Errorf("error generating key: %v", err) } if err := WriteKey(keyPath, generatedData); err != nil { return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err) } return generatedData, true, nil } // MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to // a PEM encoded block or returns an error. func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) { switch t := privateKey.(type) { case *ecdsa.PrivateKey: derBytes, err := x509.MarshalECPrivateKey(t) if err != nil { return nil, err } block := &pem.Block{ Type: ECPrivateKeyBlockType, Bytes: derBytes, } return pem.EncodeToMemory(block), nil case *rsa.PrivateKey: block := &pem.Block{ Type: RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(t), } return pem.EncodeToMemory(block), nil default: return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey) } } // PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. // Returns an error if the file could not be read or if the private key could not be parsed. func PrivateKeyFromFile(file string) (interface{}, error) { data, err := os.ReadFile(file) if err != nil { return nil, err } key, err := ParsePrivateKeyPEM(data) if err != nil { return nil, fmt.Errorf("error reading private key file %s: %v", file, err) } return key, nil } // PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file. // Reads public keys from both public and private key files. func PublicKeysFromFile(file string) ([]interface{}, error) { data, err := os.ReadFile(file) if err != nil { return nil, err } keys, err := ParsePublicKeysPEM(data) if err != nil { return nil, fmt.Errorf("error reading public key file %s: %v", file, err) } return keys, nil } // verifyKeyData returns true if the provided data appears to be a valid private key. func verifyKeyData(data []byte) bool { if len(data) == 0 { return false } _, err := ParsePrivateKeyPEM(data) return err == nil } // ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data. // Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY" func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { var privateKeyPemBlock *pem.Block for { privateKeyPemBlock, keyData = pem.Decode(keyData) if privateKeyPemBlock == nil { break } switch privateKeyPemBlock.Type { case ECPrivateKeyBlockType: // ECDSA Private Key in ASN.1 format if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil { return key, nil } case RSAPrivateKeyBlockType: // RSA Private Key in PKCS#1 format if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil { return key, nil } case PrivateKeyBlockType: // RSA or ECDSA Private Key in unencrypted PKCS#8 format if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil { return key, nil } } // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks // originally, only the first PEM block was parsed and expected to be a key block } // we read all the PEM blocks and didn't recognize one return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") } // ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array. // Reads public keys from both public and private key files. func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) { var block *pem.Block keys := []interface{}{} for { // read the next block block, keyData = pem.Decode(keyData) if block == nil { break } // test block against parsing functions if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil { keys = append(keys, &privateKey.PublicKey) continue } if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil { keys = append(keys, publicKey) continue } if privateKey, err := parseECPrivateKey(block.Bytes); err == nil { keys = append(keys, &privateKey.PublicKey) continue } if publicKey, err := parseECPublicKey(block.Bytes); err == nil { keys = append(keys, publicKey) continue } // tolerate non-key PEM blocks for backwards compatibility // originally, only the first PEM block was parsed and expected to be a key block } if len(keys) == 0 { return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys") } return keys, nil } // parseRSAPublicKey parses a single RSA public key from the provided data func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) { var err error // Parse the key var parsedKey interface{} if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { if cert, err := x509.ParseCertificate(data); err == nil { parsedKey = cert.PublicKey } else { return nil, err } } // Test if parsed key is an RSA Public Key var pubKey *rsa.PublicKey var ok bool if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok { return nil, fmt.Errorf("data doesn't contain valid RSA Public Key") } return pubKey, nil } // parseRSAPrivateKey parses a single RSA private key from the provided data func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) { var err error // Parse the key var parsedKey interface{} if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil { if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil { return nil, err } } // Test if parsed key is an RSA Private Key var privKey *rsa.PrivateKey var ok bool if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok { return nil, fmt.Errorf("data doesn't contain valid RSA Private Key") } return privKey, nil } // parseECPublicKey parses a single ECDSA public key from the provided data func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) { var err error // Parse the key var parsedKey interface{} if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { if cert, err := x509.ParseCertificate(data); err == nil { parsedKey = cert.PublicKey } else { return nil, err } } // Test if parsed key is an ECDSA Public Key var pubKey *ecdsa.PublicKey var ok bool if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok { return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key") } return pubKey, nil } // parseECPrivateKey parses a single ECDSA private key from the provided data func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) { var err error // Parse the key var parsedKey interface{} if parsedKey, err = x509.ParseECPrivateKey(data); err != nil { return nil, err } // Test if parsed key is an ECDSA Private Key var privKey *ecdsa.PrivateKey var ok bool if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key") } return privKey, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/homedir/homedir.go
vendor/k8s.io/client-go/util/homedir/homedir.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package homedir import ( "os" "path/filepath" "runtime" ) // HomeDir returns the home directory for the current user. // On Windows: // 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned. // 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned. // 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned. // 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned. func HomeDir() string { if runtime.GOOS == "windows" { home := os.Getenv("HOME") homeDriveHomePath := "" if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { homeDriveHomePath = homeDrive + homePath } userProfile := os.Getenv("USERPROFILE") // Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file. // %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility. for _, p := range []string{home, homeDriveHomePath, userProfile} { if len(p) == 0 { continue } if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil { continue } return p } firstSetPath := "" firstExistingPath := "" // Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools for _, p := range []string{home, userProfile, homeDriveHomePath} { if len(p) == 0 { continue } if len(firstSetPath) == 0 { // remember the first path that is set firstSetPath = p } info, err := os.Stat(p) if err != nil { continue } if len(firstExistingPath) == 0 { // remember the first path that exists firstExistingPath = p } if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 { // return first path that is writeable return p } } // If none are writeable, return first location that exists if len(firstExistingPath) > 0 { return firstExistingPath } // If none exist, return first location that is set if len(firstSetPath) > 0 { return firstSetPath } // We've got nothing return "" } return os.Getenv("HOME") }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/exec/exec.go
vendor/k8s.io/client-go/util/exec/exec.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package exec // ExitError is an interface that presents an API similar to os.ProcessState, which is // what ExitError from os/exec is. This is designed to make testing a bit easier and // probably loses some of the cross-platform properties of the underlying library. type ExitError interface { String() string Error() string Exited() bool ExitStatus() int } // CodeExitError is an implementation of ExitError consisting of an error object // and an exit code (the upper bits of os.exec.ExitStatus). type CodeExitError struct { Err error Code int } var _ ExitError = CodeExitError{} func (e CodeExitError) Error() string { return e.Err.Error() } func (e CodeExitError) String() string { return e.Err.Error() } func (e CodeExitError) Exited() bool { return true } func (e CodeExitError) ExitStatus() int { return e.Code }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go
vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go
/* Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package consistencydetector import ( "context" "os" "strconv" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) var dataConsistencyDetectionForListFromCacheEnabled = false func init() { dataConsistencyDetectionForListFromCacheEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR")) } // IsDataConsistencyDetectionForListEnabled returns true when // the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. func IsDataConsistencyDetectionForListEnabled() bool { return dataConsistencyDetectionForListFromCacheEnabled } // CheckListFromCacheDataConsistencyIfRequested performs a data consistency check only when // the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup // for requests that have a high chance of being served from the watch-cache. // // The consistency check is meant to be enforced only in the CI, not in production. // The check ensures that data retrieved by a list api call from the watch-cache // is exactly the same as data received by the list api call from etcd. // // Note that this function will panic when data inconsistency is detected. // This is intentional because we want to catch it in the CI. // // Note that this function doesn't examine the ListOptions to determine // if the original request has hit the cache because it would be challenging // to maintain consistency with the server-side implementation. // For simplicity, we assume that the first request retrieved data from // the cache (even though this might not be true for some requests) // and issue the second call to get data from etcd for comparison. func CheckListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { if !IsDataConsistencyDetectionForListEnabled() { return } checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList) } func checkListFromCacheDataConsistencyIfRequestedInternal[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { receivedListMeta, err := meta.ListAccessor(receivedList) if err != nil { panic(err) } rawListItems, err := meta.ExtractListWithAlloc(receivedList) if err != nil { panic(err) // this should never happen } lastSyncedResourceVersion := receivedListMeta.GetResourceVersion() CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listItemsFn, optionsUsedToReceiveList, func() []runtime.Object { return rawListItems }) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go
vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go
/* Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package consistencydetector import ( "context" "os" "strconv" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) var dataConsistencyDetectionForWatchListEnabled = false func init() { dataConsistencyDetectionForWatchListEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR")) } // IsDataConsistencyDetectionForWatchListEnabled returns true when // the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. func IsDataConsistencyDetectionForWatchListEnabled() bool { return dataConsistencyDetectionForWatchListEnabled } // CheckWatchListFromCacheDataConsistencyIfRequested performs a data consistency check only when // the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. // // The consistency check is meant to be enforced only in the CI, not in production. // The check ensures that data retrieved by the watch-list api call // is exactly the same as data received by the standard list api call against etcd. // // Note that this function will panic when data inconsistency is detected. // This is intentional because we want to catch it in the CI. func CheckWatchListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { if !IsDataConsistencyDetectionForWatchListEnabled() { return } checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go
vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go
/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package consistencydetector import ( "context" "fmt" "sort" "time" "github.com/google/go-cmp/cmp" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" ) type RetrieveItemsFunc[U any] func() []U type ListFunc[T runtime.Object] func(ctx context.Context, options metav1.ListOptions) (T, error) // CheckDataConsistency exists solely for testing purposes. // we cannot use checkWatchListDataConsistencyIfRequested because // it is guarded by an environmental variable. // we cannot manipulate the environmental variable because // it will affect other tests in this package. func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) { if !canFormAdditionalListCall(lastSyncedResourceVersion, listOptions) { klog.V(4).Infof("data consistency check for %s is enabled but the parameters (RV, ListOptions) doesn't allow for creating a valid LIST request. Skipping the data consistency check.", identity) return } klog.Warningf("data consistency check for %s is enabled, this will result in an additional call to the API server.", identity) retrievedItems := toMetaObjectSliceOrDie(retrieveItemsFn()) listOptions = prepareListCallOptions(lastSyncedResourceVersion, listOptions, len(retrievedItems)) var list runtime.Object err := wait.PollUntilContextCancel(ctx, time.Second, true, func(_ context.Context) (done bool, err error) { list, err = listFn(ctx, listOptions) if err != nil { // the consistency check will only be enabled in the CI // and LIST calls in general will be retired by the client-go library // if we fail simply log and retry klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err) return false, nil } return true, nil }) if err != nil { klog.Errorf("failed to list data from the server, the data consistency check for %s won't be performed, stopCh was closed, err: %v", identity, err) return } rawListItems, err := meta.ExtractListWithAlloc(list) if err != nil { panic(err) // this should never happen } listItems := toMetaObjectSliceOrDie(rawListItems) sort.Sort(byUID(listItems)) sort.Sort(byUID(retrievedItems)) if !cmp.Equal(listItems, retrievedItems) { klog.Infof("previously received data for %s is different than received by the standard list api call against etcd, diff: %v", identity, cmp.Diff(listItems, retrievedItems)) msg := fmt.Sprintf("data inconsistency detected for %s, panicking!", identity) panic(msg) } } // canFormAdditionalListCall ensures that we can form a valid LIST requests // for checking data consistency. func canFormAdditionalListCall(lastSyncedResourceVersion string, listOptions metav1.ListOptions) bool { // since we are setting ResourceVersionMatch to metav1.ResourceVersionMatchExact // we need to make sure that the continuation hasn't been set // https://github.com/kubernetes/kubernetes/blob/be4afb9ef90b19ccb6f7e595cbdb247e088b2347/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go#L38 if len(listOptions.Continue) > 0 { return false } // since we are setting ResourceVersionMatch to metav1.ResourceVersionMatchExact // we need to make sure that the RV is valid because the validation code forbids RV == "0" // https://github.com/kubernetes/kubernetes/blob/be4afb9ef90b19ccb6f7e595cbdb247e088b2347/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go#L44 if lastSyncedResourceVersion == "0" { return false } return true } // prepareListCallOptions changes the input list options so that // the list call goes directly to etcd func prepareListCallOptions(lastSyncedResourceVersion string, listOptions metav1.ListOptions, retrievedItemsCount int) metav1.ListOptions { // this is our legacy case: // // the watch cache skips the Limit if the ResourceVersion was set to "0" // thus, to compare with data retrieved directly from etcd // we need to skip the limit to for the list call as well. // // note that when the number of retrieved items is less than the request limit, // it means either the watch cache is disabled, or there is not enough data. // in both cases, we can use the limit because we will be able to compare // the data with the items retrieved from etcd. if listOptions.ResourceVersion == "0" && listOptions.Limit > 0 && int64(retrievedItemsCount) > listOptions.Limit { listOptions.Limit = 0 } // set the RV and RVM so that we get the snapshot of data // directly from etcd. listOptions.ResourceVersion = lastSyncedResourceVersion listOptions.ResourceVersionMatch = metav1.ResourceVersionMatchExact return listOptions } type byUID []metav1.Object func (a byUID) Len() int { return len(a) } func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() } func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object { result := make([]metav1.Object, len(s)) for i, v := range s { m, err := meta.Accessor(v) if err != nil { panic(err) } result[i] = m } return result }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/util/connrotation/connrotation.go
vendor/k8s.io/client-go/util/connrotation/connrotation.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package connrotation implements a connection dialer that tracks and can close // all created connections. // // This is used for credential rotation of long-lived connections, when there's // no way to re-authenticate on a live connection. package connrotation import ( "context" "net" "sync" ) // DialFunc is a shorthand for signature of net.DialContext. type DialFunc func(ctx context.Context, network, address string) (net.Conn, error) // Dialer opens connections through Dial and tracks them. type Dialer struct { dial DialFunc *ConnectionTracker } // NewDialer creates a new Dialer instance. // Equivalent to NewDialerWithTracker(dial, nil). func NewDialer(dial DialFunc) *Dialer { return NewDialerWithTracker(dial, nil) } // NewDialerWithTracker creates a new Dialer instance. // // If dial is not nil, it will be used to create new underlying connections. // Otherwise net.DialContext is used. // If tracker is not nil, it will be used to track new underlying connections. // Otherwise NewConnectionTracker() is used. func NewDialerWithTracker(dial DialFunc, tracker *ConnectionTracker) *Dialer { if tracker == nil { tracker = NewConnectionTracker() } return &Dialer{ dial: dial, ConnectionTracker: tracker, } } // ConnectionTracker keeps track of opened connections type ConnectionTracker struct { mu sync.Mutex conns map[*closableConn]struct{} } // NewConnectionTracker returns a connection tracker for use with NewDialerWithTracker func NewConnectionTracker() *ConnectionTracker { return &ConnectionTracker{ conns: make(map[*closableConn]struct{}), } } // CloseAll forcibly closes all tracked connections. // // Note: new connections may get created before CloseAll returns. func (c *ConnectionTracker) CloseAll() { c.mu.Lock() conns := c.conns c.conns = make(map[*closableConn]struct{}) c.mu.Unlock() for conn := range conns { conn.Close() } } // Track adds the connection to the list of tracked connections, // and returns a wrapped copy of the connection that stops tracking the connection // when it is closed. func (c *ConnectionTracker) Track(conn net.Conn) net.Conn { closable := &closableConn{Conn: conn} // When the connection is closed, remove it from the map. This will // be no-op if the connection isn't in the map, e.g. if CloseAll() // is called. closable.onClose = func() { c.mu.Lock() delete(c.conns, closable) c.mu.Unlock() } // Start tracking the connection c.mu.Lock() c.conns[closable] = struct{}{} c.mu.Unlock() return closable } // Dial creates a new tracked connection. func (d *Dialer) Dial(network, address string) (net.Conn, error) { return d.DialContext(context.Background(), network, address) } // DialContext creates a new tracked connection. func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { conn, err := d.dial(ctx, network, address) if err != nil { return nil, err } return d.ConnectionTracker.Track(conn), nil } type closableConn struct { onClose func() net.Conn } func (c *closableConn) Close() error { go c.onClose() return c.Conn.Close() }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp_stub.go
vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp_stub.go
/* Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package gcp import ( "errors" "k8s.io/client-go/rest" "k8s.io/klog/v2" ) func init() { if err := rest.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil { klog.Fatalf("Failed to register gcp auth plugin: %v", err) } } func newGCPAuthProvider(_ string, _ map[string]string, _ rest.AuthProviderConfigPersister) (rest.AuthProvider, error) { return nil, errors.New(`The gcp auth plugin has been removed. Please use the "gke-gcloud-auth-plugin" kubectl/client-go credential plugin instead. See https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke for further details`) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package exec import ( "bytes" "crypto/tls" "crypto/x509" "errors" "fmt" "io" "net" "net/http" "os" "os/exec" "reflect" "strings" "sync" "time" "golang.org/x/term" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/dump" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/client-go/pkg/apis/clientauthentication" "k8s.io/client-go/pkg/apis/clientauthentication/install" clientauthenticationv1 "k8s.io/client-go/pkg/apis/clientauthentication/v1" clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/transport" "k8s.io/client-go/util/connrotation" "k8s.io/klog/v2" "k8s.io/utils/clock" ) const execInfoEnv = "KUBERNETES_EXEC_INFO" const installHintVerboseHelp = ` It looks like you are trying to use a client-go credential plugin that is not installed. To learn more about this feature, consult the documentation available at: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins` var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) func init() { install.Install(scheme) } var ( // Since transports can be constantly re-initialized by programs like kubectl, // keep a cache of initialized authenticators keyed by a hash of their config. globalCache = newCache() // The list of API versions we accept. apiVersions = map[string]schema.GroupVersion{ clientauthenticationv1beta1.SchemeGroupVersion.String(): clientauthenticationv1beta1.SchemeGroupVersion, clientauthenticationv1.SchemeGroupVersion.String(): clientauthenticationv1.SchemeGroupVersion, } ) func newCache() *cache { return &cache{m: make(map[string]*Authenticator)} } func cacheKey(conf *api.ExecConfig, cluster *clientauthentication.Cluster) string { key := struct { conf *api.ExecConfig cluster *clientauthentication.Cluster }{ conf: conf, cluster: cluster, } return dump.Pretty(key) } type cache struct { mu sync.Mutex m map[string]*Authenticator } func (c *cache) get(s string) (*Authenticator, bool) { c.mu.Lock() defer c.mu.Unlock() a, ok := c.m[s] return a, ok } // put inserts an authenticator into the cache. If an authenticator is already // associated with the key, the first one is returned instead. func (c *cache) put(s string, a *Authenticator) *Authenticator { c.mu.Lock() defer c.mu.Unlock() existing, ok := c.m[s] if ok { return existing } c.m[s] = a return a } // sometimes rate limits how often a function f() is called. Specifically, Do() // will run the provided function f() up to threshold times every interval // duration. type sometimes struct { threshold int interval time.Duration clock clock.Clock mu sync.Mutex count int // times we have called f() in this window window time.Time // beginning of current window of length interval } func (s *sometimes) Do(f func()) { s.mu.Lock() defer s.mu.Unlock() now := s.clock.Now() if s.window.IsZero() { s.window = now } // If we are no longer in our saved time window, then we get to reset our run // count back to 0 and start increasing towards the threshold again. if inWindow := now.Sub(s.window) < s.interval; !inWindow { s.window = now s.count = 0 } // If we have not run the function more than threshold times in this current // time window, we get to run it now! if underThreshold := s.count < s.threshold; underThreshold { s.count++ f() } } // GetAuthenticator returns an exec-based plugin for providing client credentials. func GetAuthenticator(config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) { return newAuthenticator(globalCache, term.IsTerminal, config, cluster) } func newAuthenticator(c *cache, isTerminalFunc func(int) bool, config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) { key := cacheKey(config, cluster) if a, ok := c.get(key); ok { return a, nil } gv, ok := apiVersions[config.APIVersion] if !ok { return nil, fmt.Errorf("exec plugin: invalid apiVersion %q", config.APIVersion) } connTracker := connrotation.NewConnectionTracker() defaultDialer := connrotation.NewDialerWithTracker( (&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext, connTracker, ) a := &Authenticator{ cmd: config.Command, args: config.Args, group: gv, cluster: cluster, provideClusterInfo: config.ProvideClusterInfo, installHint: config.InstallHint, sometimes: &sometimes{ threshold: 10, interval: time.Hour, clock: clock.RealClock{}, }, stdin: os.Stdin, stderr: os.Stderr, interactiveFunc: func() (bool, error) { return isInteractive(isTerminalFunc, config) }, now: time.Now, environ: os.Environ, connTracker: connTracker, } for _, env := range config.Env { a.env = append(a.env, env.Name+"="+env.Value) } // these functions are made comparable and stored in the cache so that repeated clientset // construction with the same rest.Config results in a single TLS cache and Authenticator a.getCert = &transport.GetCertHolder{GetCert: a.cert} a.dial = &transport.DialHolder{Dial: defaultDialer.DialContext} return c.put(key, a), nil } func isInteractive(isTerminalFunc func(int) bool, config *api.ExecConfig) (bool, error) { var shouldBeInteractive bool switch config.InteractiveMode { case api.NeverExecInteractiveMode: shouldBeInteractive = false case api.IfAvailableExecInteractiveMode: shouldBeInteractive = !config.StdinUnavailable && isTerminalFunc(int(os.Stdin.Fd())) case api.AlwaysExecInteractiveMode: if !isTerminalFunc(int(os.Stdin.Fd())) { return false, errors.New("standard input is not a terminal") } if config.StdinUnavailable { suffix := "" if len(config.StdinUnavailableMessage) > 0 { // only print extra ": <message>" if the user actually specified a message suffix = fmt.Sprintf(": %s", config.StdinUnavailableMessage) } return false, fmt.Errorf("standard input is unavailable%s", suffix) } shouldBeInteractive = true default: return false, fmt.Errorf("unknown interactiveMode: %q", config.InteractiveMode) } return shouldBeInteractive, nil } // Authenticator is a client credential provider that rotates credentials by executing a plugin. // The plugin input and output are defined by the API group client.authentication.k8s.io. type Authenticator struct { // Set by the config cmd string args []string group schema.GroupVersion env []string cluster *clientauthentication.Cluster provideClusterInfo bool // Used to avoid log spew by rate limiting install hint printing. We didn't do // this by interval based rate limiting alone since that way may have prevented // the install hint from showing up for kubectl users. sometimes *sometimes installHint string // Stubbable for testing stdin io.Reader stderr io.Writer interactiveFunc func() (bool, error) now func() time.Time environ func() []string // connTracker tracks all connections opened that we need to close when rotating a client certificate connTracker *connrotation.ConnectionTracker // Cached results. // // The mutex also guards calling the plugin. Since the plugin could be // interactive we want to make sure it's only called once. mu sync.Mutex cachedCreds *credentials exp time.Time // getCert makes Authenticator.cert comparable to support TLS config caching getCert *transport.GetCertHolder // dial is used for clients which do not specify a custom dialer // it is comparable to support TLS config caching dial *transport.DialHolder } type credentials struct { token string `datapolicy:"token"` cert *tls.Certificate `datapolicy:"secret-key"` } // UpdateTransportConfig updates the transport.Config to use credentials // returned by the plugin. func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { // If a bearer token is present in the request - avoid the GetCert callback when // setting up the transport, as that triggers the exec action if the server is // also configured to allow client certificates for authentication. For requests // like "kubectl get --token (token) pods" we should assume the intention is to // use the provided token for authentication. The same can be said for when the // user specifies basic auth or cert auth. if c.HasTokenAuth() || c.HasBasicAuth() || c.HasCertAuth() { return nil } c.Wrap(func(rt http.RoundTripper) http.RoundTripper { return &roundTripper{a, rt} }) if c.HasCertCallback() { return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set") } c.TLS.GetCertHolder = a.getCert // comparable for TLS config caching if c.DialHolder != nil { if c.DialHolder.Dial == nil { return errors.New("invalid transport.Config.DialHolder: wrapped Dial function is nil") } // if c has a custom dialer, we have to wrap it // TLS config caching is not supported for this config d := connrotation.NewDialerWithTracker(c.DialHolder.Dial, a.connTracker) c.DialHolder = &transport.DialHolder{Dial: d.DialContext} } else { c.DialHolder = a.dial // comparable for TLS config caching } return nil } var _ utilnet.RoundTripperWrapper = &roundTripper{} type roundTripper struct { a *Authenticator base http.RoundTripper } func (r *roundTripper) WrappedRoundTripper() http.RoundTripper { return r.base } func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { // If a user has already set credentials, use that. This makes commands like // "kubectl get --token (token) pods" work. if req.Header.Get("Authorization") != "" { return r.base.RoundTrip(req) } creds, err := r.a.getCreds() if err != nil { return nil, fmt.Errorf("getting credentials: %v", err) } if creds.token != "" { req.Header.Set("Authorization", "Bearer "+creds.token) } res, err := r.base.RoundTrip(req) if err != nil { return nil, err } if res.StatusCode == http.StatusUnauthorized { if err := r.a.maybeRefreshCreds(creds); err != nil { klog.Errorf("refreshing credentials: %v", err) } } return res, nil } func (a *Authenticator) credsExpired() bool { if a.exp.IsZero() { return false } return a.now().After(a.exp) } func (a *Authenticator) cert() (*tls.Certificate, error) { creds, err := a.getCreds() if err != nil { return nil, err } return creds.cert, nil } func (a *Authenticator) getCreds() (*credentials, error) { a.mu.Lock() defer a.mu.Unlock() if a.cachedCreds != nil && !a.credsExpired() { return a.cachedCreds, nil } if err := a.refreshCredsLocked(); err != nil { return nil, err } return a.cachedCreds, nil } // maybeRefreshCreds executes the plugin to force a rotation of the // credentials, unless they were rotated already. func (a *Authenticator) maybeRefreshCreds(creds *credentials) error { a.mu.Lock() defer a.mu.Unlock() // Since we're not making a new pointer to a.cachedCreds in getCreds, no // need to do deep comparison. if creds != a.cachedCreds { // Credentials already rotated. return nil } return a.refreshCredsLocked() } // refreshCredsLocked executes the plugin and reads the credentials from // stdout. It must be called while holding the Authenticator's mutex. func (a *Authenticator) refreshCredsLocked() error { interactive, err := a.interactiveFunc() if err != nil { return fmt.Errorf("exec plugin cannot support interactive mode: %w", err) } cred := &clientauthentication.ExecCredential{ Spec: clientauthentication.ExecCredentialSpec{ Interactive: interactive, }, } if a.provideClusterInfo { cred.Spec.Cluster = a.cluster } env := append(a.environ(), a.env...) data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred) if err != nil { return fmt.Errorf("encode ExecCredentials: %v", err) } env = append(env, fmt.Sprintf("%s=%s", execInfoEnv, data)) stdout := &bytes.Buffer{} cmd := exec.Command(a.cmd, a.args...) cmd.Env = env cmd.Stderr = a.stderr cmd.Stdout = stdout if interactive { cmd.Stdin = a.stdin } err = cmd.Run() incrementCallsMetric(err) if err != nil { return a.wrapCmdRunErrorLocked(err) } _, gvk, err := codecs.UniversalDecoder(a.group).Decode(stdout.Bytes(), nil, cred) if err != nil { return fmt.Errorf("decoding stdout: %v", err) } if gvk.Group != a.group.Group || gvk.Version != a.group.Version { return fmt.Errorf("exec plugin is configured to use API version %s, plugin returned version %s", a.group, schema.GroupVersion{Group: gvk.Group, Version: gvk.Version}) } if cred.Status == nil { return fmt.Errorf("exec plugin didn't return a status field") } if cred.Status.Token == "" && cred.Status.ClientCertificateData == "" && cred.Status.ClientKeyData == "" { return fmt.Errorf("exec plugin didn't return a token or cert/key pair") } if (cred.Status.ClientCertificateData == "") != (cred.Status.ClientKeyData == "") { return fmt.Errorf("exec plugin returned only certificate or key, not both") } if cred.Status.ExpirationTimestamp != nil { a.exp = cred.Status.ExpirationTimestamp.Time } else { a.exp = time.Time{} } newCreds := &credentials{ token: cred.Status.Token, } if cred.Status.ClientKeyData != "" && cred.Status.ClientCertificateData != "" { cert, err := tls.X509KeyPair([]byte(cred.Status.ClientCertificateData), []byte(cred.Status.ClientKeyData)) if err != nil { return fmt.Errorf("failed parsing client key/certificate: %v", err) } // Leaf is initialized to be nil: // https://golang.org/pkg/crypto/tls/#X509KeyPair // Leaf certificate is the first certificate: // https://golang.org/pkg/crypto/tls/#Certificate // Populating leaf is useful for quickly accessing the underlying x509 // certificate values. cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) if err != nil { return fmt.Errorf("failed parsing client leaf certificate: %v", err) } newCreds.cert = &cert } oldCreds := a.cachedCreds a.cachedCreds = newCreds // Only close all connections when TLS cert rotates. Token rotation doesn't // need the extra noise. if oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) { // Can be nil if the exec auth plugin only returned token auth. if oldCreds.cert != nil && oldCreds.cert.Leaf != nil { metrics.ClientCertRotationAge.Observe(time.Since(oldCreds.cert.Leaf.NotBefore)) } a.connTracker.CloseAll() } expiry := time.Time{} if a.cachedCreds.cert != nil && a.cachedCreds.cert.Leaf != nil { expiry = a.cachedCreds.cert.Leaf.NotAfter } expirationMetrics.set(a, expiry) return nil } // wrapCmdRunErrorLocked pulls out the code to construct a helpful error message // for when the exec plugin's binary fails to Run(). // // It must be called while holding the Authenticator's mutex. func (a *Authenticator) wrapCmdRunErrorLocked(err error) error { switch err.(type) { case *exec.Error: // Binary does not exist (see exec.Error). builder := strings.Builder{} fmt.Fprintf(&builder, "exec: executable %s not found", a.cmd) a.sometimes.Do(func() { fmt.Fprint(&builder, installHintVerboseHelp) if a.installHint != "" { fmt.Fprintf(&builder, "\n\n%s", a.installHint) } }) return errors.New(builder.String()) case *exec.ExitError: // Binary execution failed (see exec.Cmd.Run()). e := err.(*exec.ExitError) return fmt.Errorf( "exec: executable %s failed with exit code %d", a.cmd, e.ProcessState.ExitCode(), ) default: return fmt.Errorf("exec: %v", err) } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package exec import ( "errors" "io/fs" "os/exec" "reflect" "sync" "time" "k8s.io/klog/v2" "k8s.io/client-go/tools/metrics" ) // The following constants shadow the special values used in the prometheus metrics implementation. const ( // noError indicates that the plugin process was successfully started and exited with an exit // code of 0. noError = "no_error" // pluginExecutionError indicates that the plugin process was successfully started and then // it returned a non-zero exit code. pluginExecutionError = "plugin_execution_error" // pluginNotFoundError indicates that we could not find the exec plugin. pluginNotFoundError = "plugin_not_found_error" // clientInternalError indicates that we attempted to start the plugin process, but failed // for some reason. clientInternalError = "client_internal_error" // successExitCode represents an exec plugin invocation that was successful. successExitCode = 0 // failureExitCode represents an exec plugin invocation that was not successful. This code is // used in some failure modes (e.g., plugin not found, client internal error) so that someone // can more easily monitor all unsuccessful invocations. failureExitCode = 1 ) type certificateExpirationTracker struct { mu sync.RWMutex m map[*Authenticator]time.Time metricSet func(*time.Time) } var expirationMetrics = &certificateExpirationTracker{ m: map[*Authenticator]time.Time{}, metricSet: func(e *time.Time) { metrics.ClientCertExpiry.Set(e) }, } // set stores the given expiration time and updates the updates the certificate // expiry metric to the earliest expiration time. func (c *certificateExpirationTracker) set(a *Authenticator, t time.Time) { c.mu.Lock() defer c.mu.Unlock() c.m[a] = t earliest := time.Time{} for _, t := range c.m { if t.IsZero() { continue } if earliest.IsZero() || earliest.After(t) { earliest = t } } if earliest.IsZero() { c.metricSet(nil) } else { c.metricSet(&earliest) } } // incrementCallsMetric increments a global metrics counter for the number of calls to an exec // plugin, partitioned by exit code. The provided err should be the return value from // exec.Cmd.Run(). func incrementCallsMetric(err error) { execExitError := &exec.ExitError{} execError := &exec.Error{} pathError := &fs.PathError{} switch { case err == nil: // Binary execution succeeded. metrics.ExecPluginCalls.Increment(successExitCode, noError) case errors.As(err, &execExitError): // Binary execution failed (see "os/exec".Cmd.Run()). metrics.ExecPluginCalls.Increment(execExitError.ExitCode(), pluginExecutionError) case errors.As(err, &execError), errors.As(err, &pathError): // Binary does not exist (see exec.Error, fs.PathError). metrics.ExecPluginCalls.Increment(failureExitCode, pluginNotFoundError) default: // We don't know about this error type. klog.V(2).InfoS("unexpected exec plugin return error type", "type", reflect.TypeOf(err).String(), "err", err) metrics.ExecPluginCalls.Increment(failureExitCode, clientInternalError) } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/merge.go
vendor/k8s.io/client-go/tools/clientcmd/merge.go
/* Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package clientcmd import ( "fmt" "reflect" "strings" ) // recursively merges src into dst: // - non-pointer struct fields with any exported fields are recursively merged // - non-pointer struct fields with only unexported fields prefer src if the field is non-zero // - maps are shallow merged with src keys taking priority over dst // - non-zero src fields encountered during recursion that are not maps or structs overwrite and recursion stops func merge[T any](dst, src *T) error { if dst == nil { return fmt.Errorf("cannot merge into nil pointer") } if src == nil { return nil } return mergeValues(nil, reflect.ValueOf(dst).Elem(), reflect.ValueOf(src).Elem()) } func mergeValues(fieldNames []string, dst, src reflect.Value) error { dstType := dst.Type() // no-op if we can't read the src if !src.IsValid() { return nil } // sanity check types match if srcType := src.Type(); dstType != srcType { return fmt.Errorf("cannot merge mismatched types (%s, %s) at %s", dstType, srcType, strings.Join(fieldNames, ".")) } switch dstType.Kind() { case reflect.Struct: if hasExportedField(dstType) { // recursively merge for i, n := 0, dstType.NumField(); i < n; i++ { if err := mergeValues(append(fieldNames, dstType.Field(i).Name), dst.Field(i), src.Field(i)); err != nil { return err } } } else if dst.CanSet() { // If all fields are unexported, overwrite with src. // Using src.IsZero() would make more sense but that's not what mergo did. dst.Set(src) } case reflect.Map: if dst.CanSet() && !src.IsZero() { // initialize dst if needed if dst.IsZero() { dst.Set(reflect.MakeMap(dstType)) } // shallow-merge overwriting dst keys with src keys for _, mapKey := range src.MapKeys() { dst.SetMapIndex(mapKey, src.MapIndex(mapKey)) } } case reflect.Slice: if dst.CanSet() && src.Len() > 0 { // overwrite dst with non-empty src slice dst.Set(src) } case reflect.Pointer: if dst.CanSet() && !src.IsZero() { // overwrite dst with non-zero values for other types if dstType.Elem().Kind() == reflect.Struct { // use struct pointer as-is dst.Set(src) } else { // shallow-copy non-struct pointer (interfaces, primitives, etc) dst.Set(reflect.New(dstType.Elem())) dst.Elem().Set(src.Elem()) } } default: if dst.CanSet() && !src.IsZero() { // overwrite dst with non-zero values for other types dst.Set(src) } } return nil } // hasExportedField returns true if the given type has any exported fields, // or if it has any anonymous/embedded struct fields with exported fields func hasExportedField(dstType reflect.Type) bool { for i, n := 0, dstType.NumField(); i < n; i++ { field := dstType.Field(i) if field.Anonymous && field.Type.Kind() == reflect.Struct { if hasExportedField(dstType.Field(i).Type) { return true } } else if len(field.PkgPath) == 0 { return true } } return false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/flag.go
vendor/k8s.io/client-go/tools/clientcmd/flag.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package clientcmd // transformingStringValue implements pflag.Value to store string values, // allowing transforming them while being set type transformingStringValue struct { target *string transformer func(string) (string, error) } func newTransformingStringValue(val string, target *string, transformer func(string) (string, error)) *transformingStringValue { *target = val return &transformingStringValue{ target: target, transformer: transformer, } } func (t *transformingStringValue) Set(val string) error { val, err := t.transformer(val) if err != nil { return err } *t.target = val return nil } func (t *transformingStringValue) Type() string { return "string" } func (t *transformingStringValue) String() string { return string(*t.target) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/loader.go
vendor/k8s.io/client-go/tools/clientcmd/loader.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package clientcmd import ( "fmt" "os" "path/filepath" "reflect" goruntime "runtime" "strings" "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" restclient "k8s.io/client-go/rest" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest" "k8s.io/client-go/util/homedir" ) const ( RecommendedConfigPathFlag = "kubeconfig" RecommendedConfigPathEnvVar = "KUBECONFIG" RecommendedHomeDir = ".kube" RecommendedFileName = "config" RecommendedSchemaName = "schema" ) var ( RecommendedConfigDir = filepath.Join(homedir.HomeDir(), RecommendedHomeDir) RecommendedHomeFile = filepath.Join(RecommendedConfigDir, RecommendedFileName) RecommendedSchemaFile = filepath.Join(RecommendedConfigDir, RecommendedSchemaName) ) // currentMigrationRules returns a map that holds the history of recommended home directories used in previous versions. // Any future changes to RecommendedHomeFile and related are expected to add a migration rule here, in order to make // sure existing config files are migrated to their new locations properly. func currentMigrationRules() map[string]string { var oldRecommendedHomeFileName string if goruntime.GOOS == "windows" { oldRecommendedHomeFileName = RecommendedFileName } else { oldRecommendedHomeFileName = ".kubeconfig" } return map[string]string{ RecommendedHomeFile: filepath.Join(os.Getenv("HOME"), RecommendedHomeDir, oldRecommendedHomeFileName), } } type ClientConfigLoader interface { ConfigAccess // IsDefaultConfig returns true if the returned config matches the defaults. IsDefaultConfig(*restclient.Config) bool // Load returns the latest config Load() (*clientcmdapi.Config, error) } type KubeconfigGetter func() (*clientcmdapi.Config, error) type ClientConfigGetter struct { kubeconfigGetter KubeconfigGetter } // ClientConfigGetter implements the ClientConfigLoader interface. var _ ClientConfigLoader = &ClientConfigGetter{} func (g *ClientConfigGetter) Load() (*clientcmdapi.Config, error) { return g.kubeconfigGetter() } func (g *ClientConfigGetter) GetLoadingPrecedence() []string { return nil } func (g *ClientConfigGetter) GetStartingConfig() (*clientcmdapi.Config, error) { return g.kubeconfigGetter() } func (g *ClientConfigGetter) GetDefaultFilename() string { return "" } func (g *ClientConfigGetter) IsExplicitFile() bool { return false } func (g *ClientConfigGetter) GetExplicitFile() string { return "" } func (g *ClientConfigGetter) IsDefaultConfig(config *restclient.Config) bool { return false } // ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config // Callers can put the chain together however they want, but we'd recommend: // EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath // ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if this file is not present type ClientConfigLoadingRules struct { ExplicitPath string Precedence []string // MigrationRules is a map of destination files to source files. If a destination file is not present, then the source file is checked. // If the source file is present, then it is copied to the destination file BEFORE any further loading happens. MigrationRules map[string]string // DoNotResolvePaths indicates whether or not to resolve paths with respect to the originating files. This is phrased as a negative so // that a default object that doesn't set this will usually get the behavior it wants. DoNotResolvePaths bool // DefaultClientConfig is an optional field indicating what rules to use to calculate a default configuration. // This should match the overrides passed in to ClientConfig loader. DefaultClientConfig ClientConfig // WarnIfAllMissing indicates whether the configuration files pointed by KUBECONFIG environment variable are present or not. // In case of missing files, it warns the user about the missing files. WarnIfAllMissing bool // Warner is the warning log callback to use in case of missing files. Warner WarningHandler } // WarningHandler allows to set the logging function to use type WarningHandler func(error) func (handler WarningHandler) Warn(err error) { if handler == nil { klog.V(1).Info(err) } else { handler(err) } } type MissingConfigError struct { Missing []string } func (c MissingConfigError) Error() string { return fmt.Sprintf("Config not found: %s", strings.Join(c.Missing, ", ")) } // ClientConfigLoadingRules implements the ClientConfigLoader interface. var _ ClientConfigLoader = &ClientConfigLoadingRules{} // NewDefaultClientConfigLoadingRules returns a ClientConfigLoadingRules object with default fields filled in. You are not required to // use this constructor func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules { chain := []string{} warnIfAllMissing := false envVarFiles := os.Getenv(RecommendedConfigPathEnvVar) if len(envVarFiles) != 0 { fileList := filepath.SplitList(envVarFiles) // prevent the same path load multiple times chain = append(chain, deduplicate(fileList)...) warnIfAllMissing = true } else { chain = append(chain, RecommendedHomeFile) } return &ClientConfigLoadingRules{ Precedence: chain, MigrationRules: currentMigrationRules(), WarnIfAllMissing: warnIfAllMissing, } } // Load starts by running the MigrationRules and then // takes the loading rules and returns a Config object based on following rules. // // if the ExplicitPath, return the unmerged explicit file // Otherwise, return a merged config based on the Precedence slice // // A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. // Read errors or files with non-deserializable content produce errors. // The first file to set a particular map key wins and map key's value is never changed. // BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. // This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. // It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even // non-conflicting entries from the second file's "red-user" are discarded. // Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder // and only absolute file paths are returned. func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { if err := rules.Migrate(); err != nil { return nil, err } errlist := []error{} missingList := []string{} kubeConfigFiles := []string{} // Make sure a file we were explicitly told to use exists if len(rules.ExplicitPath) > 0 { if _, err := os.Stat(rules.ExplicitPath); os.IsNotExist(err) { return nil, err } kubeConfigFiles = append(kubeConfigFiles, rules.ExplicitPath) } else { kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) } kubeconfigs := []*clientcmdapi.Config{} // read and cache the config files so that we only look at them once for _, filename := range kubeConfigFiles { if len(filename) == 0 { // no work to do continue } config, err := LoadFromFile(filename) if os.IsNotExist(err) { // skip missing files // Add to the missing list to produce a warning missingList = append(missingList, filename) continue } if err != nil { errlist = append(errlist, fmt.Errorf("error loading config file \"%s\": %v", filename, err)) continue } kubeconfigs = append(kubeconfigs, config) } if rules.WarnIfAllMissing && len(missingList) > 0 && len(kubeconfigs) == 0 { rules.Warner.Warn(MissingConfigError{Missing: missingList}) } // first merge all of our maps mapConfig := clientcmdapi.NewConfig() for _, kubeconfig := range kubeconfigs { if err := merge(mapConfig, kubeconfig); err != nil { return nil, err } } // merge all of the struct values in the reverse order so that priority is given correctly // errors are not added to the list the second time nonMapConfig := clientcmdapi.NewConfig() for i := len(kubeconfigs) - 1; i >= 0; i-- { kubeconfig := kubeconfigs[i] if err := merge(nonMapConfig, kubeconfig); err != nil { return nil, err } } // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and // get the values we expect. config := clientcmdapi.NewConfig() if err := merge(config, mapConfig); err != nil { return nil, err } if err := merge(config, nonMapConfig); err != nil { return nil, err } if rules.ResolvePaths() { if err := ResolveLocalPaths(config); err != nil { errlist = append(errlist, err) } } return config, utilerrors.NewAggregate(errlist) } // Migrate uses the MigrationRules map. If a destination file is not present, then the source file is checked. // If the source file is present, then it is copied to the destination file BEFORE any further loading happens. func (rules *ClientConfigLoadingRules) Migrate() error { if rules.MigrationRules == nil { return nil } for destination, source := range rules.MigrationRules { if _, err := os.Stat(destination); err == nil { // if the destination already exists, do nothing continue } else if os.IsPermission(err) { // if we can't access the file, skip it continue } else if !os.IsNotExist(err) { // if we had an error other than non-existence, fail return err } if sourceInfo, err := os.Stat(source); err != nil { if os.IsNotExist(err) || os.IsPermission(err) { // if the source file doesn't exist or we can't access it, there's no work to do. continue } // if we had an error other than non-existence, fail return err } else if sourceInfo.IsDir() { return fmt.Errorf("cannot migrate %v to %v because it is a directory", source, destination) } data, err := os.ReadFile(source) if err != nil { return err } // destination is created with mode 0666 before umask err = os.WriteFile(destination, data, 0666) if err != nil { return err } } return nil } // GetLoadingPrecedence implements ConfigAccess func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string { if len(rules.ExplicitPath) > 0 { return []string{rules.ExplicitPath} } return rules.Precedence } // GetStartingConfig implements ConfigAccess func (rules *ClientConfigLoadingRules) GetStartingConfig() (*clientcmdapi.Config, error) { clientConfig := NewNonInteractiveDeferredLoadingClientConfig(rules, &ConfigOverrides{}) rawConfig, err := clientConfig.RawConfig() if os.IsNotExist(err) { return clientcmdapi.NewConfig(), nil } if err != nil { return nil, err } return &rawConfig, nil } // GetDefaultFilename implements ConfigAccess func (rules *ClientConfigLoadingRules) GetDefaultFilename() string { // Explicit file if we have one. if rules.IsExplicitFile() { return rules.GetExplicitFile() } // Otherwise, first existing file from precedence. for _, filename := range rules.GetLoadingPrecedence() { if _, err := os.Stat(filename); err == nil { return filename } } // If none exists, use the first from precedence. if len(rules.Precedence) > 0 { return rules.Precedence[0] } return "" } // IsExplicitFile implements ConfigAccess func (rules *ClientConfigLoadingRules) IsExplicitFile() bool { return len(rules.ExplicitPath) > 0 } // GetExplicitFile implements ConfigAccess func (rules *ClientConfigLoadingRules) GetExplicitFile() string { return rules.ExplicitPath } // IsDefaultConfig returns true if the provided configuration matches the default func (rules *ClientConfigLoadingRules) IsDefaultConfig(config *restclient.Config) bool { if rules.DefaultClientConfig == nil { return false } defaultConfig, err := rules.DefaultClientConfig.ClientConfig() if err != nil { return false } return reflect.DeepEqual(config, defaultConfig) } // LoadFromFile takes a filename and deserializes the contents into Config object func LoadFromFile(filename string) (*clientcmdapi.Config, error) { kubeconfigBytes, err := os.ReadFile(filename) if err != nil { return nil, err } config, err := Load(kubeconfigBytes) if err != nil { return nil, err } klog.V(6).Infoln("Config loaded from file: ", filename) // set LocationOfOrigin on every Cluster, User, and Context for key, obj := range config.AuthInfos { obj.LocationOfOrigin = filename config.AuthInfos[key] = obj } for key, obj := range config.Clusters { obj.LocationOfOrigin = filename config.Clusters[key] = obj } for key, obj := range config.Contexts { obj.LocationOfOrigin = filename config.Contexts[key] = obj } if config.AuthInfos == nil { config.AuthInfos = map[string]*clientcmdapi.AuthInfo{} } if config.Clusters == nil { config.Clusters = map[string]*clientcmdapi.Cluster{} } if config.Contexts == nil { config.Contexts = map[string]*clientcmdapi.Context{} } return config, nil } // Load takes a byte slice and deserializes the contents into Config object. // Encapsulates deserialization without assuming the source is a file. func Load(data []byte) (*clientcmdapi.Config, error) { config := clientcmdapi.NewConfig() // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) if len(data) == 0 { return config, nil } decoded, _, err := clientcmdlatest.Codec.Decode(data, &schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, config) if err != nil { return nil, err } return decoded.(*clientcmdapi.Config), nil } // WriteToFile serializes the config to yaml and writes it out to a file. If not present, it creates the file with the mode 0600. If it is present // it stomps the contents func WriteToFile(config clientcmdapi.Config, filename string) error { content, err := Write(config) if err != nil { return err } dir := filepath.Dir(filename) if _, err := os.Stat(dir); os.IsNotExist(err) { if err = os.MkdirAll(dir, 0755); err != nil { return err } } if err := os.WriteFile(filename, content, 0600); err != nil { return err } return nil } func lockFile(filename string) error { // TODO: find a way to do this with actual file locks. Will // probably need separate solution for windows and Linux. // Make sure the dir exists before we try to create a lock file. dir := filepath.Dir(filename) if _, err := os.Stat(dir); os.IsNotExist(err) { if err = os.MkdirAll(dir, 0755); err != nil { return err } } f, err := os.OpenFile(lockName(filename), os.O_CREATE|os.O_EXCL, 0) if err != nil { return err } f.Close() return nil } func unlockFile(filename string) error { return os.Remove(lockName(filename)) } func lockName(filename string) string { return filename + ".lock" } // Write serializes the config to yaml. // Encapsulates serialization without assuming the destination is a file. func Write(config clientcmdapi.Config) ([]byte, error) { return runtime.Encode(clientcmdlatest.Codec, &config) } func (rules ClientConfigLoadingRules) ResolvePaths() bool { return !rules.DoNotResolvePaths } // ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin // this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without // modification of its contents. func ResolveLocalPaths(config *clientcmdapi.Config) error { for _, cluster := range config.Clusters { if len(cluster.LocationOfOrigin) == 0 { continue } base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) if err != nil { return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) } if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { return err } } for _, authInfo := range config.AuthInfos { if len(authInfo.LocationOfOrigin) == 0 { continue } base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) if err != nil { return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) } if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { return err } } return nil } // RelativizeClusterLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already // absolute, but any existing path will be resolved relative to LocationOfOrigin func RelativizeClusterLocalPaths(cluster *clientcmdapi.Cluster) error { if len(cluster.LocationOfOrigin) == 0 { return fmt.Errorf("no location of origin for %s", cluster.Server) } base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) if err != nil { return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) } if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { return err } if err := RelativizePathWithNoBacksteps(GetClusterFileReferences(cluster), base); err != nil { return err } return nil } // RelativizeAuthInfoLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already // absolute, but any existing path will be resolved relative to LocationOfOrigin func RelativizeAuthInfoLocalPaths(authInfo *clientcmdapi.AuthInfo) error { if len(authInfo.LocationOfOrigin) == 0 { return fmt.Errorf("no location of origin for %v", authInfo) } base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) if err != nil { return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) } if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { return err } if err := RelativizePathWithNoBacksteps(GetAuthInfoFileReferences(authInfo), base); err != nil { return err } return nil } func RelativizeConfigPaths(config *clientcmdapi.Config, base string) error { return RelativizePathWithNoBacksteps(GetConfigFileReferences(config), base) } func ResolveConfigPaths(config *clientcmdapi.Config, base string) error { return ResolvePaths(GetConfigFileReferences(config), base) } func GetConfigFileReferences(config *clientcmdapi.Config) []*string { refs := []*string{} for _, cluster := range config.Clusters { refs = append(refs, GetClusterFileReferences(cluster)...) } for _, authInfo := range config.AuthInfos { refs = append(refs, GetAuthInfoFileReferences(authInfo)...) } return refs } func GetClusterFileReferences(cluster *clientcmdapi.Cluster) []*string { return []*string{&cluster.CertificateAuthority} } func GetAuthInfoFileReferences(authInfo *clientcmdapi.AuthInfo) []*string { s := []*string{&authInfo.ClientCertificate, &authInfo.ClientKey, &authInfo.TokenFile} // Only resolve exec command if it isn't PATH based. if authInfo.Exec != nil && strings.ContainsRune(authInfo.Exec.Command, filepath.Separator) { s = append(s, &authInfo.Exec.Command) } return s } // ResolvePaths updates the given refs to be absolute paths, relative to the given base directory func ResolvePaths(refs []*string, base string) error { for _, ref := range refs { // Don't resolve empty paths if len(*ref) > 0 { // Don't resolve absolute paths if !filepath.IsAbs(*ref) { *ref = filepath.Join(base, *ref) } } } return nil } // RelativizePathWithNoBacksteps updates the given refs to be relative paths, relative to the given base directory as long as they do not require backsteps. // Any path requiring a backstep is left as-is as long it is absolute. Any non-absolute path that can't be relativized produces an error func RelativizePathWithNoBacksteps(refs []*string, base string) error { for _, ref := range refs { // Don't relativize empty paths if len(*ref) > 0 { rel, err := MakeRelative(*ref, base) if err != nil { return err } // if we have a backstep, don't mess with the path if strings.HasPrefix(rel, "../") { if filepath.IsAbs(*ref) { continue } return fmt.Errorf("%v requires backsteps and is not absolute", *ref) } *ref = rel } } return nil } func MakeRelative(path, base string) (string, error) { if len(path) > 0 { rel, err := filepath.Rel(base, path) if err != nil { return path, err } return rel, nil } return path, nil } // deduplicate removes any duplicated values and returns a new slice, keeping the order unchanged func deduplicate(s []string) []string { encountered := map[string]bool{} ret := make([]string, 0) for i := range s { if encountered[s[i]] { continue } encountered[s[i]] = true ret = append(ret, s[i]) } return ret }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go
vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package clientcmd import ( "encoding/json" "fmt" "io" "os" "golang.org/x/term" clientauth "k8s.io/client-go/tools/auth" ) // AuthLoaders are used to build clientauth.Info objects. type AuthLoader interface { // LoadAuth takes a path to a config file and can then do anything it needs in order to return a valid clientauth.Info LoadAuth(path string) (*clientauth.Info, error) } // default implementation of an AuthLoader type defaultAuthLoader struct{} // LoadAuth for defaultAuthLoader simply delegates to clientauth.LoadFromFile func (*defaultAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { return clientauth.LoadFromFile(path) } type PromptingAuthLoader struct { reader io.Reader } // LoadAuth parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist. func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { // Prompt for user/pass and write a file if none exists. if _, err := os.Stat(path); os.IsNotExist(err) { authPtr, err := a.Prompt() if err != nil { return nil, err } auth := *authPtr data, err := json.Marshal(auth) if err != nil { return &auth, err } err = os.WriteFile(path, data, 0600) return &auth, err } authPtr, err := clientauth.LoadFromFile(path) if err != nil { return nil, err } return authPtr, nil } // Prompt pulls the user and password from a reader func (a *PromptingAuthLoader) Prompt() (*clientauth.Info, error) { var err error auth := &clientauth.Info{} auth.User, err = promptForString("Username", a.reader, true) if err != nil { return nil, err } auth.Password, err = promptForString("Password", nil, false) if err != nil { return nil, err } return auth, nil } func promptForString(field string, r io.Reader, show bool) (result string, err error) { fmt.Printf("Please enter %s: ", field) if show { _, err = fmt.Fscan(r, &result) } else { var data []byte if term.IsTerminal(int(os.Stdin.Fd())) { data, err = term.ReadPassword(int(os.Stdin.Fd())) result = string(data) } else { return "", fmt.Errorf("error reading input for %s", field) } } return result, err } // NewPromptingAuthLoader is an AuthLoader that parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist. func NewPromptingAuthLoader(reader io.Reader) *PromptingAuthLoader { return &PromptingAuthLoader{reader} } // NewDefaultAuthLoader returns a default implementation of an AuthLoader that only reads from a config file func NewDefaultAuthLoader() AuthLoader { return &defaultAuthLoader{} }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
vendor/k8s.io/client-go/tools/clientcmd/client_config.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package clientcmd import ( "fmt" "io" "net/http" "net/url" "os" "strings" "unicode" restclient "k8s.io/client-go/rest" clientauth "k8s.io/client-go/tools/auth" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/klog/v2" ) const ( // clusterExtensionKey is reserved in the cluster extensions list for exec plugin config. clusterExtensionKey = "client.authentication.k8s.io/exec" ) var ( // ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields // DEPRECATED will be replaced ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()} // DefaultClientConfig represents the legacy behavior of this package for defaulting // DEPRECATED will be replace DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{ ClusterDefaults: ClusterDefaults, }, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} ) // getDefaultServer returns a default setting for DefaultClientConfig // DEPRECATED func getDefaultServer() string { if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 { return server } return "http://localhost:8080" } // ClientConfig is used to make it easy to get an api server client type ClientConfig interface { // RawConfig returns the merged result of all overrides RawConfig() (clientcmdapi.Config, error) // ClientConfig returns a complete client config ClientConfig() (*restclient.Config, error) // Namespace returns the namespace resulting from the merged // result of all overrides and a boolean indicating if it was // overridden Namespace() (string, bool, error) // ConfigAccess returns the rules for loading/persisting the config. ConfigAccess() ConfigAccess } // OverridingClientConfig is used to enable overrriding the raw KubeConfig type OverridingClientConfig interface { ClientConfig // MergedRawConfig return the RawConfig merged with all overrides. MergedRawConfig() (clientcmdapi.Config, error) } type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderConfigPersister type promptedCredentials struct { username string password string `datapolicy:"password"` } // DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information type DirectClientConfig struct { config clientcmdapi.Config contextName string overrides *ConfigOverrides fallbackReader io.Reader configAccess ConfigAccess // promptedCredentials store the credentials input by the user promptedCredentials promptedCredentials } // NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) OverridingClientConfig { return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} } // NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) OverridingClientConfig { return &DirectClientConfig{config, contextName, overrides, nil, configAccess, promptedCredentials{}} } // NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) OverridingClientConfig { return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess, promptedCredentials{}} } // NewClientConfigFromBytes takes your kubeconfig and gives you back a ClientConfig func NewClientConfigFromBytes(configBytes []byte) (OverridingClientConfig, error) { config, err := Load(configBytes) if err != nil { return nil, err } return &DirectClientConfig{*config, "", &ConfigOverrides{}, nil, nil, promptedCredentials{}}, nil } // RESTConfigFromKubeConfig is a convenience method to give back a restconfig from your kubeconfig bytes. // For programmatic access, this is what you want 80% of the time func RESTConfigFromKubeConfig(configBytes []byte) (*restclient.Config, error) { clientConfig, err := NewClientConfigFromBytes(configBytes) if err != nil { return nil, err } return clientConfig.ClientConfig() } func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) { return config.config, nil } // MergedRawConfig returns the raw kube config merged with the overrides func (config *DirectClientConfig) MergedRawConfig() (clientcmdapi.Config, error) { if err := config.ConfirmUsable(); err != nil { return clientcmdapi.Config{}, err } merged := config.config.DeepCopy() // set the AuthInfo merged with overrides in the merged config mergedAuthInfo, err := config.getAuthInfo() if err != nil { return clientcmdapi.Config{}, err } mergedAuthInfoName, _ := config.getAuthInfoName() merged.AuthInfos[mergedAuthInfoName] = &mergedAuthInfo // set the Context merged with overrides in the merged config mergedContext, err := config.getContext() if err != nil { return clientcmdapi.Config{}, err } mergedContextName, _ := config.getContextName() merged.Contexts[mergedContextName] = &mergedContext merged.CurrentContext = mergedContextName // set the Cluster merged with overrides in the merged config configClusterInfo, err := config.getCluster() if err != nil { return clientcmdapi.Config{}, err } configClusterName, _ := config.getClusterName() merged.Clusters[configClusterName] = &configClusterInfo return *merged, nil } // ClientConfig implements ClientConfig func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { // check that getAuthInfo, getContext, and getCluster do not return an error. // Do this before checking if the current config is usable in the event that an // AuthInfo, Context, or Cluster config with user-defined names are not found. // This provides a user with the immediate cause for error if one is found configAuthInfo, err := config.getAuthInfo() if err != nil { return nil, err } _, err = config.getContext() if err != nil { return nil, err } configClusterInfo, err := config.getCluster() if err != nil { return nil, err } if err := config.ConfirmUsable(); err != nil { return nil, err } clientConfig := &restclient.Config{} clientConfig.Host = configClusterInfo.Server if configClusterInfo.ProxyURL != "" { u, err := parseProxyURL(configClusterInfo.ProxyURL) if err != nil { return nil, err } clientConfig.Proxy = http.ProxyURL(u) } clientConfig.DisableCompression = configClusterInfo.DisableCompression if config.overrides != nil && len(config.overrides.Timeout) > 0 { timeout, err := ParseTimeout(config.overrides.Timeout) if err != nil { return nil, err } clientConfig.Timeout = timeout } if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { u.RawQuery = "" u.Fragment = "" clientConfig.Host = u.String() } if len(configAuthInfo.Impersonate) > 0 { clientConfig.Impersonate = restclient.ImpersonationConfig{ UserName: configAuthInfo.Impersonate, UID: configAuthInfo.ImpersonateUID, Groups: configAuthInfo.ImpersonateGroups, Extra: configAuthInfo.ImpersonateUserExtra, } } // only try to read the auth information if we are secure if restclient.IsConfigTransportTLS(*clientConfig) { var err error var persister restclient.AuthProviderConfigPersister if config.configAccess != nil { authInfoName, _ := config.getAuthInfoName() persister = PersisterForUser(config.configAccess, authInfoName) } userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister, configClusterInfo) if err != nil { return nil, err } if err := merge(clientConfig, userAuthPartialConfig); err != nil { return nil, err } serverAuthPartialConfig := getServerIdentificationPartialConfig(configClusterInfo) if err := merge(clientConfig, serverAuthPartialConfig); err != nil { return nil, err } } return clientConfig, nil } // clientauth.Info object contain both user identification and server identification. We want different precedence orders for // both, so we have to split the objects and merge them separately. // getServerIdentificationPartialConfig extracts server identification information from configClusterInfo // (the final result of command line flags and merged .kubeconfig files). func getServerIdentificationPartialConfig(configClusterInfo clientcmdapi.Cluster) *restclient.Config { configClientConfig := &restclient.Config{} configClientConfig.CAFile = configClusterInfo.CertificateAuthority configClientConfig.CAData = configClusterInfo.CertificateAuthorityData configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify configClientConfig.ServerName = configClusterInfo.TLSServerName return configClientConfig } // getUserIdentificationPartialConfig extracts user identification information from configAuthInfo // (the final result of command line flags and merged .kubeconfig files); // if the information available there is insufficient, it prompts (if possible) for additional information. func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { mergedConfig := &restclient.Config{} // blindly overwrite existing values based on precedence if len(configAuthInfo.Token) > 0 { mergedConfig.BearerToken = configAuthInfo.Token mergedConfig.BearerTokenFile = configAuthInfo.TokenFile } else if len(configAuthInfo.TokenFile) > 0 { tokenBytes, err := os.ReadFile(configAuthInfo.TokenFile) if err != nil { return nil, err } mergedConfig.BearerToken = string(tokenBytes) mergedConfig.BearerTokenFile = configAuthInfo.TokenFile } if len(configAuthInfo.Impersonate) > 0 { mergedConfig.Impersonate = restclient.ImpersonationConfig{ UserName: configAuthInfo.Impersonate, UID: configAuthInfo.ImpersonateUID, Groups: configAuthInfo.ImpersonateGroups, Extra: configAuthInfo.ImpersonateUserExtra, } } if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { mergedConfig.CertFile = configAuthInfo.ClientCertificate mergedConfig.CertData = configAuthInfo.ClientCertificateData mergedConfig.KeyFile = configAuthInfo.ClientKey mergedConfig.KeyData = configAuthInfo.ClientKeyData } if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { mergedConfig.Username = configAuthInfo.Username mergedConfig.Password = configAuthInfo.Password } if configAuthInfo.AuthProvider != nil { mergedConfig.AuthProvider = configAuthInfo.AuthProvider mergedConfig.AuthConfigPersister = persistAuthConfig } if configAuthInfo.Exec != nil { mergedConfig.ExecProvider = configAuthInfo.Exec mergedConfig.ExecProvider.InstallHint = cleanANSIEscapeCodes(mergedConfig.ExecProvider.InstallHint) mergedConfig.ExecProvider.Config = configClusterInfo.Extensions[clusterExtensionKey] } // if there still isn't enough information to authenticate the user, try prompting if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) { if len(config.promptedCredentials.username) > 0 && len(config.promptedCredentials.password) > 0 { mergedConfig.Username = config.promptedCredentials.username mergedConfig.Password = config.promptedCredentials.password return mergedConfig, nil } prompter := NewPromptingAuthLoader(fallbackReader) promptedAuthInfo, err := prompter.Prompt() if err != nil { return nil, err } promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo) previouslyMergedConfig := mergedConfig mergedConfig = &restclient.Config{} if err := merge(mergedConfig, promptedConfig); err != nil { return nil, err } if err := merge(mergedConfig, previouslyMergedConfig); err != nil { return nil, err } config.promptedCredentials.username = mergedConfig.Username config.promptedCredentials.password = mergedConfig.Password } return mergedConfig, nil } // makeUserIdentificationFieldsConfig returns a client.Config capable of being merged for only user identification information func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config { config := &restclient.Config{} config.Username = info.User config.Password = info.Password config.CertFile = info.CertFile config.KeyFile = info.KeyFile config.BearerToken = info.BearerToken return config } func canIdentifyUser(config restclient.Config) bool { return len(config.Username) > 0 || (len(config.CertFile) > 0 || len(config.CertData) > 0) || len(config.BearerToken) > 0 || config.AuthProvider != nil || config.ExecProvider != nil } // cleanANSIEscapeCodes takes an arbitrary string and ensures that there are no // ANSI escape sequences that could put the terminal in a weird state (e.g., // "\e[1m" bolds text) func cleanANSIEscapeCodes(s string) string { // spaceControlCharacters includes tab, new line, vertical tab, new page, and // carriage return. These are in the unicode.Cc category, but that category also // contains ESC (U+001B) which we don't want. spaceControlCharacters := unicode.RangeTable{ R16: []unicode.Range16{ {Lo: 0x0009, Hi: 0x000D, Stride: 1}, }, } // Why not make this deny-only (instead of allow-only)? Because unicode.C // contains newline and tab characters that we want. allowedRanges := []*unicode.RangeTable{ unicode.L, unicode.M, unicode.N, unicode.P, unicode.S, unicode.Z, &spaceControlCharacters, } builder := strings.Builder{} for _, roon := range s { if unicode.IsOneOf(allowedRanges, roon) { builder.WriteRune(roon) // returns nil error, per go doc } else { fmt.Fprintf(&builder, "%U", roon) } } return builder.String() } // Namespace implements ClientConfig func (config *DirectClientConfig) Namespace() (string, bool, error) { if config.overrides != nil && config.overrides.Context.Namespace != "" { // In the event we have an empty config but we do have a namespace override, we should return // the namespace override instead of having config.ConfirmUsable() return an error. This allows // things like in-cluster clients to execute `kubectl get pods --namespace=foo` and have the // --namespace flag honored instead of being ignored. return config.overrides.Context.Namespace, true, nil } if err := config.ConfirmUsable(); err != nil { return "", false, err } configContext, err := config.getContext() if err != nil { return "", false, err } if len(configContext.Namespace) == 0 { return "default", false, nil } return configContext.Namespace, false, nil } // ConfigAccess implements ClientConfig func (config *DirectClientConfig) ConfigAccess() ConfigAccess { return config.configAccess } // ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, // but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. func (config *DirectClientConfig) ConfirmUsable() error { validationErrors := make([]error, 0) var contextName string if len(config.contextName) != 0 { contextName = config.contextName } else { contextName = config.config.CurrentContext } if len(contextName) > 0 { _, exists := config.config.Contexts[contextName] if !exists { validationErrors = append(validationErrors, &errContextNotFound{contextName}) } } authInfoName, _ := config.getAuthInfoName() authInfo, _ := config.getAuthInfo() validationErrors = append(validationErrors, validateAuthInfo(authInfoName, authInfo)...) clusterName, _ := config.getClusterName() cluster, _ := config.getCluster() validationErrors = append(validationErrors, validateClusterInfo(clusterName, cluster)...) // when direct client config is specified, and our only error is that no server is defined, we should // return a standard "no config" error if len(validationErrors) == 1 && validationErrors[0] == ErrEmptyCluster { return newErrConfigurationInvalid([]error{ErrEmptyConfig}) } return newErrConfigurationInvalid(validationErrors) } // getContextName returns the default, or user-set context name, and a boolean that indicates // whether the default context name has been overwritten by a user-set flag, or left as its default value func (config *DirectClientConfig) getContextName() (string, bool) { if config.overrides != nil && len(config.overrides.CurrentContext) != 0 { return config.overrides.CurrentContext, true } if len(config.contextName) != 0 { return config.contextName, false } return config.config.CurrentContext, false } // getAuthInfoName returns a string containing the current authinfo name for the current context, // and a boolean indicating whether the default authInfo name is overwritten by a user-set flag, or // left as its default value func (config *DirectClientConfig) getAuthInfoName() (string, bool) { if config.overrides != nil && len(config.overrides.Context.AuthInfo) != 0 { return config.overrides.Context.AuthInfo, true } context, _ := config.getContext() return context.AuthInfo, false } // getClusterName returns a string containing the default, or user-set cluster name, and a boolean // indicating whether the default clusterName has been overwritten by a user-set flag, or left as // its default value func (config *DirectClientConfig) getClusterName() (string, bool) { if config.overrides != nil && len(config.overrides.Context.Cluster) != 0 { return config.overrides.Context.Cluster, true } context, _ := config.getContext() return context.Cluster, false } // getContext returns the clientcmdapi.Context, or an error if a required context is not found. func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) { contexts := config.config.Contexts contextName, required := config.getContextName() mergedContext := clientcmdapi.NewContext() if configContext, exists := contexts[contextName]; exists { if err := merge(mergedContext, configContext); err != nil { return clientcmdapi.Context{}, err } } else if required { return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName) } if config.overrides != nil { if err := merge(mergedContext, &config.overrides.Context); err != nil { return clientcmdapi.Context{}, err } } return *mergedContext, nil } // getAuthInfo returns the clientcmdapi.AuthInfo, or an error if a required auth info is not found. func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) { authInfos := config.config.AuthInfos authInfoName, required := config.getAuthInfoName() mergedAuthInfo := clientcmdapi.NewAuthInfo() if configAuthInfo, exists := authInfos[authInfoName]; exists { if err := merge(mergedAuthInfo, configAuthInfo); err != nil { return clientcmdapi.AuthInfo{}, err } } else if required { return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName) } if config.overrides != nil { if err := merge(mergedAuthInfo, &config.overrides.AuthInfo); err != nil { return clientcmdapi.AuthInfo{}, err } } return *mergedAuthInfo, nil } // getCluster returns the clientcmdapi.Cluster, or an error if a required cluster is not found. func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) { clusterInfos := config.config.Clusters clusterInfoName, required := config.getClusterName() mergedClusterInfo := clientcmdapi.NewCluster() if config.overrides != nil { if err := merge(mergedClusterInfo, &config.overrides.ClusterDefaults); err != nil { return clientcmdapi.Cluster{}, err } } if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { if err := merge(mergedClusterInfo, configClusterInfo); err != nil { return clientcmdapi.Cluster{}, err } } else if required { return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName) } if config.overrides != nil { if err := merge(mergedClusterInfo, &config.overrides.ClusterInfo); err != nil { return clientcmdapi.Cluster{}, err } } // * An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data // otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set". // * An override of --certificate-authority should also override TLS skip settings and CA data, otherwise existing CA data will take precedence. if config.overrides != nil { caLen := len(config.overrides.ClusterInfo.CertificateAuthority) caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData) if config.overrides.ClusterInfo.InsecureSkipTLSVerify || caLen > 0 || caDataLen > 0 { mergedClusterInfo.InsecureSkipTLSVerify = config.overrides.ClusterInfo.InsecureSkipTLSVerify mergedClusterInfo.CertificateAuthority = config.overrides.ClusterInfo.CertificateAuthority mergedClusterInfo.CertificateAuthorityData = config.overrides.ClusterInfo.CertificateAuthorityData } // if the --tls-server-name has been set in overrides, use that value. // if the --server has been set in overrides, then use the value of --tls-server-name specified on the CLI too. This gives the property // that setting a --server will effectively clear the KUBECONFIG value of tls-server-name if it is specified on the command line which is // usually correct. if config.overrides.ClusterInfo.TLSServerName != "" || config.overrides.ClusterInfo.Server != "" { mergedClusterInfo.TLSServerName = config.overrides.ClusterInfo.TLSServerName } } return *mergedClusterInfo, nil } // inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment. // Can take options overrides for flags explicitly provided to the command inside the cluster container. type inClusterClientConfig struct { overrides *ConfigOverrides inClusterConfigProvider func() (*restclient.Config, error) } var _ ClientConfig = &inClusterClientConfig{} func (config *inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) { return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters") } func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error) { inClusterConfigProvider := config.inClusterConfigProvider if inClusterConfigProvider == nil { inClusterConfigProvider = restclient.InClusterConfig } icc, err := inClusterConfigProvider() if err != nil { return nil, err } // in-cluster configs only takes a host, token, or CA file // if any of them were individually provided, overwrite anything else if config.overrides != nil { if server := config.overrides.ClusterInfo.Server; len(server) > 0 { icc.Host = server } if len(config.overrides.AuthInfo.Token) > 0 || len(config.overrides.AuthInfo.TokenFile) > 0 { icc.BearerToken = config.overrides.AuthInfo.Token icc.BearerTokenFile = config.overrides.AuthInfo.TokenFile } if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 { icc.TLSClientConfig.CAFile = certificateAuthorityFile } } return icc, nil } func (config *inClusterClientConfig) Namespace() (string, bool, error) { // This way assumes you've set the POD_NAMESPACE environment variable using the downward API. // This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up if ns := os.Getenv("POD_NAMESPACE"); ns != "" { return ns, false, nil } // Fall back to the namespace associated with the service account token, if available if data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { if ns := strings.TrimSpace(string(data)); len(ns) > 0 { return ns, false, nil } } return "default", false, nil } func (config *inClusterClientConfig) ConfigAccess() ConfigAccess { return NewDefaultClientConfigLoadingRules() } // Possible returns true if loading an inside-kubernetes-cluster is possible. func (config *inClusterClientConfig) Possible() bool { fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token") return os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "" && err == nil && !fi.IsDir() } // BuildConfigFromFlags is a helper function that builds configs from a master // url or a kubeconfig filepath. These are passed in as command line flags for cluster // components. Warnings should reflect this usage. If neither masterUrl or kubeconfigPath // are passed in we fallback to inClusterConfig. If inClusterConfig fails, we fallback // to the default config. func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) { if kubeconfigPath == "" && masterUrl == "" { klog.Warning("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") kubeconfig, err := restclient.InClusterConfig() if err == nil { return kubeconfig, nil } klog.Warning("error creating inClusterConfig, falling back to default config: ", err) } return NewNonInteractiveDeferredLoadingClientConfig( &ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}).ClientConfig() } // BuildConfigFromKubeconfigGetter is a helper function that builds configs from a master // url and a kubeconfigGetter. func BuildConfigFromKubeconfigGetter(masterUrl string, kubeconfigGetter KubeconfigGetter) (*restclient.Config, error) { // TODO: We do not need a DeferredLoader here. Refactor code and see if we can use DirectClientConfig here. cc := NewNonInteractiveDeferredLoadingClientConfig( &ClientConfigGetter{kubeconfigGetter: kubeconfigGetter}, &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}) return cc.ClientConfig() }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/validation.go
vendor/k8s.io/client-go/tools/clientcmd/validation.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package clientcmd import ( "errors" "fmt" "os" "reflect" "strings" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/validation" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) var ( ErrNoContext = errors.New("no context chosen") ErrEmptyConfig = NewEmptyConfigError("no configuration has been provided, try setting KUBERNETES_MASTER environment variable") // message is for consistency with old behavior ErrEmptyCluster = errors.New("cluster has no server defined") ) // NewEmptyConfigError returns an error wrapping the given message which IsEmptyConfig() will recognize as an empty config error func NewEmptyConfigError(message string) error { return &errEmptyConfig{message} } type errEmptyConfig struct { message string } func (e *errEmptyConfig) Error() string { return e.message } type errContextNotFound struct { ContextName string } func (e *errContextNotFound) Error() string { return fmt.Sprintf("context was not found for specified context: %v", e.ContextName) } // IsContextNotFound returns a boolean indicating whether the error is known to // report that a context was not found func IsContextNotFound(err error) bool { if err == nil { return false } if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext { return true } return strings.Contains(err.Error(), "context was not found for specified context") } // IsEmptyConfig returns true if the provided error indicates the provided configuration // is empty. func IsEmptyConfig(err error) bool { switch t := err.(type) { case errConfigurationInvalid: if len(t) != 1 { return false } _, ok := t[0].(*errEmptyConfig) return ok } _, ok := err.(*errEmptyConfig) return ok } // errConfigurationInvalid is a set of errors indicating the configuration is invalid. type errConfigurationInvalid []error // errConfigurationInvalid implements error and Aggregate var _ error = errConfigurationInvalid{} var _ utilerrors.Aggregate = errConfigurationInvalid{} func newErrConfigurationInvalid(errs []error) error { switch len(errs) { case 0: return nil default: return errConfigurationInvalid(errs) } } // Error implements the error interface func (e errConfigurationInvalid) Error() string { return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error()) } // Errors implements the utilerrors.Aggregate interface func (e errConfigurationInvalid) Errors() []error { return e } // Is implements the utilerrors.Aggregate interface func (e errConfigurationInvalid) Is(target error) bool { return e.visit(func(err error) bool { return errors.Is(err, target) }) } func (e errConfigurationInvalid) visit(f func(err error) bool) bool { for _, err := range e { switch err := err.(type) { case errConfigurationInvalid: if match := err.visit(f); match { return match } case utilerrors.Aggregate: for _, nestedErr := range err.Errors() { if match := f(nestedErr); match { return match } } default: if match := f(err); match { return match } } } return false } // IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid. func IsConfigurationInvalid(err error) bool { switch err.(type) { case *errContextNotFound, errConfigurationInvalid: return true } return IsContextNotFound(err) } // Validate checks for errors in the Config. It does not return early so that it can find as many errors as possible. func Validate(config clientcmdapi.Config) error { validationErrors := make([]error, 0) if clientcmdapi.IsConfigEmpty(&config) { return newErrConfigurationInvalid([]error{ErrEmptyConfig}) } if len(config.CurrentContext) != 0 { if _, exists := config.Contexts[config.CurrentContext]; !exists { validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext}) } } for contextName, context := range config.Contexts { validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) } for authInfoName, authInfo := range config.AuthInfos { validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...) } for clusterName, clusterInfo := range config.Clusters { validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...) } return newErrConfigurationInvalid(validationErrors) } // ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, // but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { validationErrors := make([]error, 0) if clientcmdapi.IsConfigEmpty(&config) { return newErrConfigurationInvalid([]error{ErrEmptyConfig}) } var contextName string if len(passedContextName) != 0 { contextName = passedContextName } else { contextName = config.CurrentContext } if len(contextName) == 0 { return ErrNoContext } context, exists := config.Contexts[contextName] if !exists { validationErrors = append(validationErrors, &errContextNotFound{contextName}) } if exists { validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) // Default to empty users and clusters and let the validation function report an error. authInfo := config.AuthInfos[context.AuthInfo] if authInfo == nil { authInfo = &clientcmdapi.AuthInfo{} } validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *authInfo)...) cluster := config.Clusters[context.Cluster] if cluster == nil { cluster = &clientcmdapi.Cluster{} } validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *cluster)...) } return newErrConfigurationInvalid(validationErrors) } // validateClusterInfo looks for conflicts and errors in the cluster info func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error { validationErrors := make([]error, 0) emptyCluster := clientcmdapi.NewCluster() if reflect.DeepEqual(*emptyCluster, clusterInfo) { return []error{ErrEmptyCluster} } if len(clusterInfo.Server) == 0 { if len(clusterName) == 0 { validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined")) } else { validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) } } if proxyURL := clusterInfo.ProxyURL; proxyURL != "" { if _, err := parseProxyURL(proxyURL); err != nil { validationErrors = append(validationErrors, fmt.Errorf("invalid 'proxy-url' %q for cluster %q: %w", proxyURL, clusterName, err)) } } // Make sure CA data and CA file aren't both specified if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName)) } if len(clusterInfo.CertificateAuthority) != 0 { clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %w", clusterInfo.CertificateAuthority, clusterName, err)) } else { defer clientCertCA.Close() } } return validationErrors } // validateAuthInfo looks for conflicts and errors in the auth info func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error { validationErrors := make([]error, 0) usingAuthPath := false methods := make([]string, 0, 3) if len(authInfo.Token) != 0 { methods = append(methods, "token") } if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { methods = append(methods, "basicAuth") } if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { // Make sure cert data and file aren't both specified if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName)) } // Make sure key data and file aren't both specified if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) } // Make sure a key is specified if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName)) } if len(authInfo.ClientCertificate) != 0 { clientCertFile, err := os.Open(authInfo.ClientCertificate) if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %w", authInfo.ClientCertificate, authInfoName, err)) } else { defer clientCertFile.Close() } } if len(authInfo.ClientKey) != 0 { clientKeyFile, err := os.Open(authInfo.ClientKey) if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %w", authInfo.ClientKey, authInfoName, err)) } else { defer clientKeyFile.Close() } } } if authInfo.Exec != nil { if authInfo.AuthProvider != nil { validationErrors = append(validationErrors, fmt.Errorf("authProvider cannot be provided in combination with an exec plugin for %s", authInfoName)) } if len(authInfo.Exec.Command) == 0 { validationErrors = append(validationErrors, fmt.Errorf("command must be specified for %v to use exec authentication plugin", authInfoName)) } if len(authInfo.Exec.APIVersion) == 0 { validationErrors = append(validationErrors, fmt.Errorf("apiVersion must be specified for %v to use exec authentication plugin", authInfoName)) } for _, v := range authInfo.Exec.Env { if len(v.Name) == 0 { validationErrors = append(validationErrors, fmt.Errorf("env variable name must be specified for %v to use exec authentication plugin", authInfoName)) } } switch authInfo.Exec.InteractiveMode { case "": validationErrors = append(validationErrors, fmt.Errorf("interactiveMode must be specified for %v to use exec authentication plugin", authInfoName)) case clientcmdapi.NeverExecInteractiveMode, clientcmdapi.IfAvailableExecInteractiveMode, clientcmdapi.AlwaysExecInteractiveMode: // These are valid default: validationErrors = append(validationErrors, fmt.Errorf("invalid interactiveMode for %v: %q", authInfoName, authInfo.Exec.InteractiveMode)) } } // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case if (len(methods) > 1) && (!usingAuthPath) { validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) } // ImpersonateUID, ImpersonateGroups or ImpersonateUserExtra should be requested with a user if (len(authInfo.ImpersonateUID) > 0 || len(authInfo.ImpersonateGroups) > 0 || len(authInfo.ImpersonateUserExtra) > 0) && (len(authInfo.Impersonate) == 0) { validationErrors = append(validationErrors, fmt.Errorf("requesting uid, groups or user-extra for %v without impersonating a user", authInfoName)) } return validationErrors } // validateContext looks for errors in the context. It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error { validationErrors := make([]error, 0) if len(contextName) == 0 { validationErrors = append(validationErrors, fmt.Errorf("empty context name for %#v is not allowed", context)) } if len(context.AuthInfo) == 0 { validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName)) } else if _, exists := config.AuthInfos[context.AuthInfo]; !exists { validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName)) } if len(context.Cluster) == 0 { validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName)) } else if _, exists := config.Clusters[context.Cluster]; !exists { validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName)) } if len(context.Namespace) != 0 { if len(validation.IsDNS1123Label(context.Namespace)) != 0 { validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName)) } } return validationErrors }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/config.go
vendor/k8s.io/client-go/tools/clientcmd/config.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package clientcmd import ( "errors" "os" "path/filepath" "reflect" "sort" "k8s.io/klog/v2" restclient "k8s.io/client-go/rest" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) // ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files type ConfigAccess interface { // GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config GetLoadingPrecedence() []string // GetStartingConfig returns the config that subcommands should being operating against. It may or may not be merged depending on loading rules GetStartingConfig() (*clientcmdapi.Config, error) // GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one. GetDefaultFilename() string // IsExplicitFile indicates whether or not this command is interested in exactly one file. This implementation only ever does that via a flag, but implementations that handle local, global, and flags may have more IsExplicitFile() bool // GetExplicitFile returns the particular file this command is operating against. This implementation only ever has one, but implementations that handle local, global, and flags may have more GetExplicitFile() string } type PathOptions struct { // GlobalFile is the full path to the file to load as the global (final) option GlobalFile string // EnvVar is the env var name that points to the list of kubeconfig files to load EnvVar string // ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file ExplicitFileFlag string // GlobalFileSubpath is an optional value used for displaying help GlobalFileSubpath string LoadingRules *ClientConfigLoadingRules } var ( // UseModifyConfigLock ensures that access to kubeconfig file using ModifyConfig method // is being guarded by a lock file. // This variable is intentionaly made public so other consumers of this library // can modify its default behavior, but be caution when disabling it since // this will make your code not threadsafe. UseModifyConfigLock = true ) func (o *PathOptions) GetEnvVarFiles() []string { if len(o.EnvVar) == 0 { return []string{} } envVarValue := os.Getenv(o.EnvVar) if len(envVarValue) == 0 { return []string{} } fileList := filepath.SplitList(envVarValue) // prevent the same path load multiple times return deduplicate(fileList) } func (o *PathOptions) GetLoadingPrecedence() []string { if o.IsExplicitFile() { return []string{o.GetExplicitFile()} } if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { return envVarFiles } return []string{o.GlobalFile} } func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) { // don't mutate the original loadingRules := *o.LoadingRules loadingRules.Precedence = o.GetLoadingPrecedence() clientConfig := NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &ConfigOverrides{}) rawConfig, err := clientConfig.RawConfig() if os.IsNotExist(err) { return clientcmdapi.NewConfig(), nil } if err != nil { return nil, err } return &rawConfig, nil } func (o *PathOptions) GetDefaultFilename() string { if o.IsExplicitFile() { return o.GetExplicitFile() } if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { if len(envVarFiles) == 1 { return envVarFiles[0] } // if any of the envvar files already exists, return it for _, envVarFile := range envVarFiles { if _, err := os.Stat(envVarFile); err == nil { return envVarFile } } // otherwise, return the last one in the list return envVarFiles[len(envVarFiles)-1] } return o.GlobalFile } func (o *PathOptions) IsExplicitFile() bool { return len(o.LoadingRules.ExplicitPath) > 0 } func (o *PathOptions) GetExplicitFile() string { return o.LoadingRules.ExplicitPath } func NewDefaultPathOptions() *PathOptions { ret := &PathOptions{ GlobalFile: RecommendedHomeFile, EnvVar: RecommendedConfigPathEnvVar, ExplicitFileFlag: RecommendedConfigPathFlag, GlobalFileSubpath: filepath.Join(RecommendedHomeDir, RecommendedFileName), LoadingRules: NewDefaultClientConfigLoadingRules(), } ret.LoadingRules.DoNotResolvePaths = true return ret } // ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or // uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. // Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values // (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, // that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any // modified element. func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error { if UseModifyConfigLock { possibleSources := configAccess.GetLoadingPrecedence() // sort the possible kubeconfig files so we always "lock" in the same order // to avoid deadlock (note: this can fail w/ symlinks, but... come on). sort.Strings(possibleSources) for _, filename := range possibleSources { if err := lockFile(filename); err != nil { return err } defer unlockFile(filename) } } startingConfig, err := configAccess.GetStartingConfig() if err != nil { return err } // We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file. // Special case the test for current context and preferences since those always write to the default file. if reflect.DeepEqual(*startingConfig, newConfig) { // nothing to do return nil } if startingConfig.CurrentContext != newConfig.CurrentContext { if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil { return err } } if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) { if err := writePreferences(configAccess, newConfig.Preferences); err != nil { return err } } // Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions for key, cluster := range newConfig.Clusters { startingCluster, exists := startingConfig.Clusters[key] if !reflect.DeepEqual(cluster, startingCluster) || !exists { destinationFile := cluster.LocationOfOrigin if len(destinationFile) == 0 { destinationFile = configAccess.GetDefaultFilename() } configToWrite, err := getConfigFromFile(destinationFile) if err != nil { return err } t := *cluster configToWrite.Clusters[key] = &t configToWrite.Clusters[key].LocationOfOrigin = destinationFile if relativizePaths { if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil { return err } } if err := WriteToFile(*configToWrite, destinationFile); err != nil { return err } } } // seenConfigs stores a map of config source filenames to computed config objects seenConfigs := map[string]*clientcmdapi.Config{} for key, context := range newConfig.Contexts { startingContext, exists := startingConfig.Contexts[key] if !reflect.DeepEqual(context, startingContext) || !exists { destinationFile := context.LocationOfOrigin if len(destinationFile) == 0 { destinationFile = configAccess.GetDefaultFilename() } // we only obtain a fresh config object from its source file // if we have not seen it already - this prevents us from // reading and writing to the same number of files repeatedly // when multiple / all contexts share the same destination file. configToWrite, seen := seenConfigs[destinationFile] if !seen { var err error configToWrite, err = getConfigFromFile(destinationFile) if err != nil { return err } seenConfigs[destinationFile] = configToWrite } configToWrite.Contexts[key] = context } } // actually persist config object changes for destinationFile, configToWrite := range seenConfigs { if err := WriteToFile(*configToWrite, destinationFile); err != nil { return err } } for key, authInfo := range newConfig.AuthInfos { startingAuthInfo, exists := startingConfig.AuthInfos[key] if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists { destinationFile := authInfo.LocationOfOrigin if len(destinationFile) == 0 { destinationFile = configAccess.GetDefaultFilename() } configToWrite, err := getConfigFromFile(destinationFile) if err != nil { return err } t := *authInfo configToWrite.AuthInfos[key] = &t configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile if relativizePaths { if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil { return err } } if err := WriteToFile(*configToWrite, destinationFile); err != nil { return err } } } for key, cluster := range startingConfig.Clusters { if _, exists := newConfig.Clusters[key]; !exists { destinationFile := cluster.LocationOfOrigin if len(destinationFile) == 0 { destinationFile = configAccess.GetDefaultFilename() } configToWrite, err := getConfigFromFile(destinationFile) if err != nil { return err } delete(configToWrite.Clusters, key) if err := WriteToFile(*configToWrite, destinationFile); err != nil { return err } } } for key, context := range startingConfig.Contexts { if _, exists := newConfig.Contexts[key]; !exists { destinationFile := context.LocationOfOrigin if len(destinationFile) == 0 { destinationFile = configAccess.GetDefaultFilename() } configToWrite, err := getConfigFromFile(destinationFile) if err != nil { return err } delete(configToWrite.Contexts, key) if err := WriteToFile(*configToWrite, destinationFile); err != nil { return err } } } for key, authInfo := range startingConfig.AuthInfos { if _, exists := newConfig.AuthInfos[key]; !exists { destinationFile := authInfo.LocationOfOrigin if len(destinationFile) == 0 { destinationFile = configAccess.GetDefaultFilename() } configToWrite, err := getConfigFromFile(destinationFile) if err != nil { return err } delete(configToWrite.AuthInfos, key) if err := WriteToFile(*configToWrite, destinationFile); err != nil { return err } } } return nil } func PersisterForUser(configAccess ConfigAccess, user string) restclient.AuthProviderConfigPersister { return &persister{configAccess, user} } type persister struct { configAccess ConfigAccess user string } func (p *persister) Persist(config map[string]string) error { newConfig, err := p.configAccess.GetStartingConfig() if err != nil { return err } authInfo, ok := newConfig.AuthInfos[p.user] if ok && authInfo.AuthProvider != nil { authInfo.AuthProvider.Config = config return ModifyConfig(p.configAccess, *newConfig, false) } return nil } // writeCurrentContext takes three possible paths. // If newCurrentContext is the same as the startingConfig's current context, then we exit. // If newCurrentContext has a value, then that value is written into the default destination file. // If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error { if startingConfig, err := configAccess.GetStartingConfig(); err != nil { return err } else if startingConfig.CurrentContext == newCurrentContext { return nil } if configAccess.IsExplicitFile() { file := configAccess.GetExplicitFile() currConfig, err := getConfigFromFile(file) if err != nil { return err } currConfig.CurrentContext = newCurrentContext if err := WriteToFile(*currConfig, file); err != nil { return err } return nil } if len(newCurrentContext) > 0 { destinationFile := configAccess.GetDefaultFilename() config, err := getConfigFromFile(destinationFile) if err != nil { return err } config.CurrentContext = newCurrentContext if err := WriteToFile(*config, destinationFile); err != nil { return err } return nil } // we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it for _, file := range configAccess.GetLoadingPrecedence() { if _, err := os.Stat(file); err == nil { currConfig, err := getConfigFromFile(file) if err != nil { return err } if len(currConfig.CurrentContext) > 0 { currConfig.CurrentContext = newCurrentContext if err := WriteToFile(*currConfig, file); err != nil { return err } return nil } } } return errors.New("no config found to write context") } func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error { if startingConfig, err := configAccess.GetStartingConfig(); err != nil { return err } else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) { return nil } if configAccess.IsExplicitFile() { file := configAccess.GetExplicitFile() currConfig, err := getConfigFromFile(file) if err != nil { return err } currConfig.Preferences = newPrefs if err := WriteToFile(*currConfig, file); err != nil { return err } return nil } for _, file := range configAccess.GetLoadingPrecedence() { currConfig, err := getConfigFromFile(file) if err != nil { return err } if !reflect.DeepEqual(currConfig.Preferences, newPrefs) { currConfig.Preferences = newPrefs if err := WriteToFile(*currConfig, file); err != nil { return err } return nil } } return errors.New("no config found to write preferences") } // getConfigFromFile tries to read a kubeconfig file and if it can't, returns an error. One exception, missing files result in empty configs, not an error. func getConfigFromFile(filename string) (*clientcmdapi.Config, error) { config, err := LoadFromFile(filename) if err != nil && !os.IsNotExist(err) { return nil, err } if config == nil { config = clientcmdapi.NewConfig() } return config, nil } // GetConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit. One exception, missing files result in empty configs, not an exit func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config { config, err := getConfigFromFile(filename) if err != nil { klog.FatalDepth(1, err) } return config }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package clientcmd import ( "io" "sync" "k8s.io/klog/v2" restclient "k8s.io/client-go/rest" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) // DeferredLoadingClientConfig is a ClientConfig interface that is backed by a client config loader. // It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that // the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before // the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid // passing extraneous information down a call stack type DeferredLoadingClientConfig struct { loader ClientConfigLoader overrides *ConfigOverrides fallbackReader io.Reader clientConfig ClientConfig loadingLock sync.Mutex // provided for testing icc InClusterConfig } // InClusterConfig abstracts details of whether the client is running in a cluster for testing. type InClusterConfig interface { ClientConfig Possible() bool } // NewNonInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}} } // NewInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name and the fallback auth reader func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader} } func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) { config.loadingLock.Lock() defer config.loadingLock.Unlock() if config.clientConfig != nil { return config.clientConfig, nil } mergedConfig, err := config.loader.Load() if err != nil { return nil, err } var currentContext string if config.overrides != nil { currentContext = config.overrides.CurrentContext } if config.fallbackReader != nil { config.clientConfig = NewInteractiveClientConfig(*mergedConfig, currentContext, config.overrides, config.fallbackReader, config.loader) } else { config.clientConfig = NewNonInteractiveClientConfig(*mergedConfig, currentContext, config.overrides, config.loader) } return config.clientConfig, nil } func (config *DeferredLoadingClientConfig) RawConfig() (clientcmdapi.Config, error) { mergedConfig, err := config.createClientConfig() if err != nil { return clientcmdapi.Config{}, err } return mergedConfig.RawConfig() } // ClientConfig implements ClientConfig func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, error) { mergedClientConfig, err := config.createClientConfig() if err != nil { return nil, err } // load the configuration and return on non-empty errors and if the // content differs from the default config mergedConfig, err := mergedClientConfig.ClientConfig() switch { case err != nil: if !IsEmptyConfig(err) { // return on any error except empty config return nil, err } case mergedConfig != nil: // the configuration is valid, but if this is equal to the defaults we should try // in-cluster configuration if !config.loader.IsDefaultConfig(mergedConfig) { return mergedConfig, nil } } // check for in-cluster configuration and use it if config.icc.Possible() { klog.V(4).Infof("Using in-cluster configuration") return config.icc.ClientConfig() } // return the result of the merged client config return mergedConfig, err } // Namespace implements KubeConfig func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) { mergedKubeConfig, err := config.createClientConfig() if err != nil { return "", false, err } ns, overridden, err := mergedKubeConfig.Namespace() // if we get an error and it is not empty config, or if the merged config defined an explicit namespace, or // if in-cluster config is not possible, return immediately if (err != nil && !IsEmptyConfig(err)) || overridden || !config.icc.Possible() { // return on any error except empty config return ns, overridden, err } if len(ns) > 0 { // if we got a non-default namespace from the kubeconfig, use it if ns != "default" { return ns, false, nil } // if we got a default namespace, determine whether it was explicit or implicit if raw, err := mergedKubeConfig.RawConfig(); err == nil { // determine the current context currentContext := raw.CurrentContext if config.overrides != nil && len(config.overrides.CurrentContext) > 0 { currentContext = config.overrides.CurrentContext } if context := raw.Contexts[currentContext]; context != nil && len(context.Namespace) > 0 { return ns, false, nil } } } klog.V(4).Infof("Using in-cluster namespace") // allow the namespace from the service account token directory to be used. return config.icc.Namespace() } // ConfigAccess implements ClientConfig func (config *DeferredLoadingClientConfig) ConfigAccess() ConfigAccess { return config.loader }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/overrides.go
vendor/k8s.io/client-go/tools/clientcmd/overrides.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package clientcmd import ( "strconv" "strings" "github.com/spf13/pflag" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) // ConfigOverrides holds values that should override whatever information is pulled from the actual Config object. You can't // simply use an actual Config object, because Configs hold maps, but overrides are restricted to "at most one" type ConfigOverrides struct { AuthInfo clientcmdapi.AuthInfo // ClusterDefaults are applied before the configured cluster info is loaded. ClusterDefaults clientcmdapi.Cluster ClusterInfo clientcmdapi.Cluster Context clientcmdapi.Context CurrentContext string Timeout string } // ConfigOverrideFlags holds the flag names to be used for binding command line flags. Notice that this structure tightly // corresponds to ConfigOverrides type ConfigOverrideFlags struct { AuthOverrideFlags AuthOverrideFlags ClusterOverrideFlags ClusterOverrideFlags ContextOverrideFlags ContextOverrideFlags CurrentContext FlagInfo Timeout FlagInfo } // AuthOverrideFlags holds the flag names to be used for binding command line flags for AuthInfo objects type AuthOverrideFlags struct { ClientCertificate FlagInfo ClientKey FlagInfo Token FlagInfo Impersonate FlagInfo ImpersonateUID FlagInfo ImpersonateGroups FlagInfo Username FlagInfo Password FlagInfo } // ContextOverrideFlags holds the flag names to be used for binding command line flags for Cluster objects type ContextOverrideFlags struct { ClusterName FlagInfo AuthInfoName FlagInfo Namespace FlagInfo } // ClusterOverride holds the flag names to be used for binding command line flags for Cluster objects type ClusterOverrideFlags struct { APIServer FlagInfo APIVersion FlagInfo CertificateAuthority FlagInfo InsecureSkipTLSVerify FlagInfo TLSServerName FlagInfo ProxyURL FlagInfo DisableCompression FlagInfo } // FlagInfo contains information about how to register a flag. This struct is useful if you want to provide a way for an extender to // get back a set of recommended flag names, descriptions, and defaults, but allow for customization by an extender. This makes for // coherent extension, without full prescription type FlagInfo struct { // LongName is the long string for a flag. If this is empty, then the flag will not be bound LongName string // ShortName is the single character for a flag. If this is empty, then there will be no short flag ShortName string // Default is the default value for the flag Default string // Description is the description for the flag Description string } // AddSecretAnnotation add secret flag to Annotation. func (f FlagInfo) AddSecretAnnotation(flags *pflag.FlagSet) FlagInfo { flags.SetAnnotation(f.LongName, "classified", []string{"true"}) return f } // BindStringFlag binds the flag based on the provided info. If LongName == "", nothing is registered func (f FlagInfo) BindStringFlag(flags *pflag.FlagSet, target *string) FlagInfo { // you can't register a flag without a long name if len(f.LongName) > 0 { flags.StringVarP(target, f.LongName, f.ShortName, f.Default, f.Description) } return f } // BindTransformingStringFlag binds the flag based on the provided info. If LongName == "", nothing is registered func (f FlagInfo) BindTransformingStringFlag(flags *pflag.FlagSet, target *string, transformer func(string) (string, error)) FlagInfo { // you can't register a flag without a long name if len(f.LongName) > 0 { flags.VarP(newTransformingStringValue(f.Default, target, transformer), f.LongName, f.ShortName, f.Description) } return f } // BindStringSliceFlag binds the flag based on the provided info. If LongName == "", nothing is registered func (f FlagInfo) BindStringArrayFlag(flags *pflag.FlagSet, target *[]string) FlagInfo { // you can't register a flag without a long name if len(f.LongName) > 0 { sliceVal := []string{} if len(f.Default) > 0 { sliceVal = []string{f.Default} } flags.StringArrayVarP(target, f.LongName, f.ShortName, sliceVal, f.Description) } return f } // BindBoolFlag binds the flag based on the provided info. If LongName == "", nothing is registered func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) FlagInfo { // you can't register a flag without a long name if len(f.LongName) > 0 { // try to parse Default as a bool. If it fails, assume false boolVal, err := strconv.ParseBool(f.Default) if err != nil { boolVal = false } flags.BoolVarP(target, f.LongName, f.ShortName, boolVal, f.Description) } return f } const ( FlagClusterName = "cluster" FlagAuthInfoName = "user" FlagContext = "context" FlagNamespace = "namespace" FlagAPIServer = "server" FlagTLSServerName = "tls-server-name" FlagInsecure = "insecure-skip-tls-verify" FlagCertFile = "client-certificate" FlagKeyFile = "client-key" FlagCAFile = "certificate-authority" FlagEmbedCerts = "embed-certs" FlagBearerToken = "token" FlagImpersonate = "as" FlagImpersonateUID = "as-uid" FlagImpersonateGroup = "as-group" FlagUsername = "username" FlagPassword = "password" FlagTimeout = "request-timeout" FlagProxyURL = "proxy-url" FlagDisableCompression = "disable-compression" ) // RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags { return ConfigOverrideFlags{ AuthOverrideFlags: RecommendedAuthOverrideFlags(prefix), ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix), ContextOverrideFlags: RecommendedContextOverrideFlags(prefix), CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"}, Timeout: FlagInfo{prefix + FlagTimeout, "", "0", "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests."}, } } // RecommendedAuthOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing func RecommendedAuthOverrideFlags(prefix string) AuthOverrideFlags { return AuthOverrideFlags{ ClientCertificate: FlagInfo{prefix + FlagCertFile, "", "", "Path to a client certificate file for TLS"}, ClientKey: FlagInfo{prefix + FlagKeyFile, "", "", "Path to a client key file for TLS"}, Token: FlagInfo{prefix + FlagBearerToken, "", "", "Bearer token for authentication to the API server"}, Impersonate: FlagInfo{prefix + FlagImpersonate, "", "", "Username to impersonate for the operation"}, ImpersonateUID: FlagInfo{prefix + FlagImpersonateUID, "", "", "UID to impersonate for the operation"}, ImpersonateGroups: FlagInfo{prefix + FlagImpersonateGroup, "", "", "Group to impersonate for the operation, this flag can be repeated to specify multiple groups."}, Username: FlagInfo{prefix + FlagUsername, "", "", "Username for basic authentication to the API server"}, Password: FlagInfo{prefix + FlagPassword, "", "", "Password for basic authentication to the API server"}, } } // RecommendedClusterOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags { return ClusterOverrideFlags{ APIServer: FlagInfo{prefix + FlagAPIServer, "", "", "The address and port of the Kubernetes API server"}, CertificateAuthority: FlagInfo{prefix + FlagCAFile, "", "", "Path to a cert file for the certificate authority"}, InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure"}, TLSServerName: FlagInfo{prefix + FlagTLSServerName, "", "", "If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used."}, ProxyURL: FlagInfo{prefix + FlagProxyURL, "", "", "If provided, this URL will be used to connect via proxy"}, DisableCompression: FlagInfo{prefix + FlagDisableCompression, "", "", "If true, opt-out of response compression for all requests to the server"}, } } // RecommendedContextOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing func RecommendedContextOverrideFlags(prefix string) ContextOverrideFlags { return ContextOverrideFlags{ ClusterName: FlagInfo{prefix + FlagClusterName, "", "", "The name of the kubeconfig cluster to use"}, AuthInfoName: FlagInfo{prefix + FlagAuthInfoName, "", "", "The name of the kubeconfig user to use"}, Namespace: FlagInfo{prefix + FlagNamespace, "n", "", "If present, the namespace scope for this CLI request"}, } } // BindOverrideFlags is a convenience method to bind the specified flags to their associated variables func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNames ConfigOverrideFlags) { BindAuthInfoFlags(&overrides.AuthInfo, flags, flagNames.AuthOverrideFlags) BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags) BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags) flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext) flagNames.Timeout.BindStringFlag(flags, &overrides.Timeout) } // BindAuthInfoFlags is a convenience method to bind the specified flags to their associated variables func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, flagNames AuthOverrideFlags) { flagNames.ClientCertificate.BindStringFlag(flags, &authInfo.ClientCertificate).AddSecretAnnotation(flags) flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey).AddSecretAnnotation(flags) flagNames.Token.BindStringFlag(flags, &authInfo.Token).AddSecretAnnotation(flags) flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate).AddSecretAnnotation(flags) flagNames.ImpersonateUID.BindStringFlag(flags, &authInfo.ImpersonateUID).AddSecretAnnotation(flags) flagNames.ImpersonateGroups.BindStringArrayFlag(flags, &authInfo.ImpersonateGroups).AddSecretAnnotation(flags) flagNames.Username.BindStringFlag(flags, &authInfo.Username).AddSecretAnnotation(flags) flagNames.Password.BindStringFlag(flags, &authInfo.Password).AddSecretAnnotation(flags) } // BindClusterFlags is a convenience method to bind the specified flags to their associated variables func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, flagNames ClusterOverrideFlags) { flagNames.APIServer.BindStringFlag(flags, &clusterInfo.Server) flagNames.CertificateAuthority.BindStringFlag(flags, &clusterInfo.CertificateAuthority) flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify) flagNames.TLSServerName.BindStringFlag(flags, &clusterInfo.TLSServerName) flagNames.ProxyURL.BindStringFlag(flags, &clusterInfo.ProxyURL) flagNames.DisableCompression.BindBoolFlag(flags, &clusterInfo.DisableCompression) } // BindFlags is a convenience method to bind the specified flags to their associated variables func BindContextFlags(contextInfo *clientcmdapi.Context, flags *pflag.FlagSet, flagNames ContextOverrideFlags) { flagNames.ClusterName.BindStringFlag(flags, &contextInfo.Cluster) flagNames.AuthInfoName.BindStringFlag(flags, &contextInfo.AuthInfo) flagNames.Namespace.BindTransformingStringFlag(flags, &contextInfo.Namespace, RemoveNamespacesPrefix) } // RemoveNamespacesPrefix is a transformer that strips "ns/", "namespace/" and "namespaces/" prefixes case-insensitively func RemoveNamespacesPrefix(value string) (string, error) { for _, prefix := range []string{"namespaces/", "namespace/", "ns/"} { if len(value) > len(prefix) && strings.EqualFold(value[0:len(prefix)], prefix) { value = value[len(prefix):] break } } return value, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/doc.go
vendor/k8s.io/client-go/tools/clientcmd/doc.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Package clientcmd provides one stop shopping for building a working client from a fixed config, from a .kubeconfig file, from command line flags, or from any merged combination. Sample usage from merged .kubeconfig files (local directory, home directory) loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() // if you want to change the loading rules (which files in which order), you can do so here configOverrides := &clientcmd.ConfigOverrides{} // if you want to change override values or bind them to flags, there are methods to help you kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) config, err := kubeConfig.ClientConfig() if err != nil { // Do something } client, err := metav1.New(config) // ... */ package clientcmd // import "k8s.io/client-go/tools/clientcmd"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/helpers.go
vendor/k8s.io/client-go/tools/clientcmd/helpers.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package clientcmd import ( "fmt" "net/url" "strconv" "time" ) // ParseTimeout returns a parsed duration from a string // A duration string value must be a positive integer, optionally followed by a corresponding time unit (s|m|h). func ParseTimeout(duration string) (time.Duration, error) { if i, err := strconv.ParseInt(duration, 10, 64); err == nil && i >= 0 { return (time.Duration(i) * time.Second), nil } if requestTimeout, err := time.ParseDuration(duration); err == nil { return requestTimeout, nil } return 0, fmt.Errorf("Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)") } func parseProxyURL(proxyURL string) (*url.URL, error) { u, err := url.Parse(proxyURL) if err != nil { return nil, fmt.Errorf("could not parse: %v", proxyURL) } switch u.Scheme { case "http", "https", "socks5": default: return nil, fmt.Errorf("unsupported scheme %q, must be http, https, or socks5", u.Scheme) } return u, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by deepcopy-gen. DO NOT EDIT. package api import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthInfo) DeepCopyInto(out *AuthInfo) { *out = *in if in.ClientCertificateData != nil { in, out := &in.ClientCertificateData, &out.ClientCertificateData *out = make([]byte, len(*in)) copy(*out, *in) } if in.ClientKeyData != nil { in, out := &in.ClientKeyData, &out.ClientKeyData *out = make([]byte, len(*in)) copy(*out, *in) } if in.ImpersonateGroups != nil { in, out := &in.ImpersonateGroups, &out.ImpersonateGroups *out = make([]string, len(*in)) copy(*out, *in) } if in.ImpersonateUserExtra != nil { in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra *out = make(map[string][]string, len(*in)) for key, val := range *in { var outVal []string if val == nil { (*out)[key] = nil } else { in, out := &val, &outVal *out = make([]string, len(*in)) copy(*out, *in) } (*out)[key] = outVal } } if in.AuthProvider != nil { in, out := &in.AuthProvider, &out.AuthProvider *out = new(AuthProviderConfig) (*in).DeepCopyInto(*out) } if in.Exec != nil { in, out := &in.Exec, &out.Exec *out = new(ExecConfig) (*in).DeepCopyInto(*out) } if in.Extensions != nil { in, out := &in.Extensions, &out.Extensions *out = make(map[string]runtime.Object, len(*in)) for key, val := range *in { if val == nil { (*out)[key] = nil } else { (*out)[key] = val.DeepCopyObject() } } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInfo. func (in *AuthInfo) DeepCopy() *AuthInfo { if in == nil { return nil } out := new(AuthInfo) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthProviderConfig) DeepCopyInto(out *AuthProviderConfig) { *out = *in if in.Config != nil { in, out := &in.Config, &out.Config *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthProviderConfig. func (in *AuthProviderConfig) DeepCopy() *AuthProviderConfig { if in == nil { return nil } out := new(AuthProviderConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Cluster) DeepCopyInto(out *Cluster) { *out = *in if in.CertificateAuthorityData != nil { in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData *out = make([]byte, len(*in)) copy(*out, *in) } if in.Extensions != nil { in, out := &in.Extensions, &out.Extensions *out = make(map[string]runtime.Object, len(*in)) for key, val := range *in { if val == nil { (*out)[key] = nil } else { (*out)[key] = val.DeepCopyObject() } } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. func (in *Cluster) DeepCopy() *Cluster { if in == nil { return nil } out := new(Cluster) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Config) DeepCopyInto(out *Config) { *out = *in in.Preferences.DeepCopyInto(&out.Preferences) if in.Clusters != nil { in, out := &in.Clusters, &out.Clusters *out = make(map[string]*Cluster, len(*in)) for key, val := range *in { var outVal *Cluster if val == nil { (*out)[key] = nil } else { in, out := &val, &outVal *out = new(Cluster) (*in).DeepCopyInto(*out) } (*out)[key] = outVal } } if in.AuthInfos != nil { in, out := &in.AuthInfos, &out.AuthInfos *out = make(map[string]*AuthInfo, len(*in)) for key, val := range *in { var outVal *AuthInfo if val == nil { (*out)[key] = nil } else { in, out := &val, &outVal *out = new(AuthInfo) (*in).DeepCopyInto(*out) } (*out)[key] = outVal } } if in.Contexts != nil { in, out := &in.Contexts, &out.Contexts *out = make(map[string]*Context, len(*in)) for key, val := range *in { var outVal *Context if val == nil { (*out)[key] = nil } else { in, out := &val, &outVal *out = new(Context) (*in).DeepCopyInto(*out) } (*out)[key] = outVal } } if in.Extensions != nil { in, out := &in.Extensions, &out.Extensions *out = make(map[string]runtime.Object, len(*in)) for key, val := range *in { if val == nil { (*out)[key] = nil } else { (*out)[key] = val.DeepCopyObject() } } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. func (in *Config) DeepCopy() *Config { if in == nil { return nil } out := new(Config) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *Config) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Context) DeepCopyInto(out *Context) { *out = *in if in.Extensions != nil { in, out := &in.Extensions, &out.Extensions *out = make(map[string]runtime.Object, len(*in)) for key, val := range *in { if val == nil { (*out)[key] = nil } else { (*out)[key] = val.DeepCopyObject() } } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Context. func (in *Context) DeepCopy() *Context { if in == nil { return nil } out := new(Context) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecConfig) DeepCopyInto(out *ExecConfig) { *out = *in if in.Args != nil { in, out := &in.Args, &out.Args *out = make([]string, len(*in)) copy(*out, *in) } if in.Env != nil { in, out := &in.Env, &out.Env *out = make([]ExecEnvVar, len(*in)) copy(*out, *in) } if in.Config != nil { out.Config = in.Config.DeepCopyObject() } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig. func (in *ExecConfig) DeepCopy() *ExecConfig { if in == nil { return nil } out := new(ExecConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar. func (in *ExecEnvVar) DeepCopy() *ExecEnvVar { if in == nil { return nil } out := new(ExecEnvVar) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Preferences) DeepCopyInto(out *Preferences) { *out = *in if in.Extensions != nil { in, out := &in.Extensions, &out.Extensions *out = make(map[string]runtime.Object, len(*in)) for key, val := range *in { if val == nil { (*out)[key] = nil } else { (*out)[key] = val.DeepCopyObject() } } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preferences. func (in *Preferences) DeepCopy() *Preferences { if in == nil { return nil } out := new(Preferences) in.DeepCopyInto(out) return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
vendor/k8s.io/client-go/tools/clientcmd/api/types.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package api import ( "fmt" "k8s.io/apimachinery/pkg/runtime" ) // Where possible, json tags match the cli argument names. // Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. // Config holds the information needed to build connect to remote kubernetes clusters as a given user // IMPORTANT if you add fields to this struct, please update IsConfigEmpty() // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type Config struct { // Legacy field from pkg/api/types.go TypeMeta. // TODO(jlowdermilk): remove this after eliminating downstream dependencies. // +k8s:conversion-gen=false // +optional Kind string `json:"kind,omitempty"` // Legacy field from pkg/api/types.go TypeMeta. // TODO(jlowdermilk): remove this after eliminating downstream dependencies. // +k8s:conversion-gen=false // +optional APIVersion string `json:"apiVersion,omitempty"` // Preferences holds general information to be use for cli interactions Preferences Preferences `json:"preferences"` // Clusters is a map of referencable names to cluster configs Clusters map[string]*Cluster `json:"clusters"` // AuthInfos is a map of referencable names to user configs AuthInfos map[string]*AuthInfo `json:"users"` // Contexts is a map of referencable names to context configs Contexts map[string]*Context `json:"contexts"` // CurrentContext is the name of the context that you would like to use by default CurrentContext string `json:"current-context"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions map[string]runtime.Object `json:"extensions,omitempty"` } // IMPORTANT if you add fields to this struct, please update IsConfigEmpty() type Preferences struct { // +optional Colors bool `json:"colors,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions map[string]runtime.Object `json:"extensions,omitempty"` } // Cluster contains information about how to communicate with a kubernetes cluster type Cluster struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. // +k8s:conversion-gen=false LocationOfOrigin string `json:"-"` // Server is the address of the kubernetes cluster (https://hostname:port). Server string `json:"server"` // TLSServerName is used to check server certificate. If TLSServerName is empty, the hostname used to contact the server is used. // +optional TLSServerName string `json:"tls-server-name,omitempty"` // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. // +optional InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` // CertificateAuthority is the path to a cert file for the certificate authority. // +optional CertificateAuthority string `json:"certificate-authority,omitempty"` // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority // +optional CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` // ProxyURL is the URL to the proxy to be used for all requests made by this // client. URLs with "http", "https", and "socks5" schemes are supported. If // this configuration is not provided or the empty string, the client // attempts to construct a proxy configuration from http_proxy and // https_proxy environment variables. If these environment variables are not // set, the client does not attempt to proxy requests. // // socks5 proxying does not currently support spdy streaming endpoints (exec, // attach, port forward). // +optional ProxyURL string `json:"proxy-url,omitempty"` // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. // +optional DisableCompression bool `json:"disable-compression,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions map[string]runtime.Object `json:"extensions,omitempty"` } // AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. type AuthInfo struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. // +k8s:conversion-gen=false LocationOfOrigin string `json:"-"` // ClientCertificate is the path to a client cert file for TLS. // +optional ClientCertificate string `json:"client-certificate,omitempty"` // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate // +optional ClientCertificateData []byte `json:"client-certificate-data,omitempty"` // ClientKey is the path to a client key file for TLS. // +optional ClientKey string `json:"client-key,omitempty"` // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey // +optional ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"` // Token is the bearer token for authentication to the kubernetes cluster. // +optional Token string `json:"token,omitempty" datapolicy:"token"` // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. // +optional TokenFile string `json:"tokenFile,omitempty"` // Impersonate is the username to act-as. // +optional Impersonate string `json:"act-as,omitempty"` // ImpersonateUID is the uid to impersonate. // +optional ImpersonateUID string `json:"act-as-uid,omitempty"` // ImpersonateGroups is the groups to impersonate. // +optional ImpersonateGroups []string `json:"act-as-groups,omitempty"` // ImpersonateUserExtra contains additional information for impersonated user. // +optional ImpersonateUserExtra map[string][]string `json:"act-as-user-extra,omitempty"` // Username is the username for basic authentication to the kubernetes cluster. // +optional Username string `json:"username,omitempty"` // Password is the password for basic authentication to the kubernetes cluster. // +optional Password string `json:"password,omitempty" datapolicy:"password"` // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. // +optional AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` // Exec specifies a custom exec-based authentication plugin for the kubernetes cluster. // +optional Exec *ExecConfig `json:"exec,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions map[string]runtime.Object `json:"extensions,omitempty"` } // Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) type Context struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. // +k8s:conversion-gen=false LocationOfOrigin string `json:"-"` // Cluster is the name of the cluster for this context Cluster string `json:"cluster"` // AuthInfo is the name of the authInfo for this context AuthInfo string `json:"user"` // Namespace is the default namespace to use on unspecified requests // +optional Namespace string `json:"namespace,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions map[string]runtime.Object `json:"extensions,omitempty"` } // AuthProviderConfig holds the configuration for a specified auth provider. type AuthProviderConfig struct { Name string `json:"name"` // +optional Config map[string]string `json:"config,omitempty"` } var _ fmt.Stringer = new(AuthProviderConfig) var _ fmt.GoStringer = new(AuthProviderConfig) // GoString implements fmt.GoStringer and sanitizes sensitive fields of // AuthProviderConfig to prevent accidental leaking via logs. func (c AuthProviderConfig) GoString() string { return c.String() } // String implements fmt.Stringer and sanitizes sensitive fields of // AuthProviderConfig to prevent accidental leaking via logs. func (c AuthProviderConfig) String() string { cfg := "<nil>" if c.Config != nil { cfg = "--- REDACTED ---" } return fmt.Sprintf("api.AuthProviderConfig{Name: %q, Config: map[string]string{%s}}", c.Name, cfg) } // ExecConfig specifies a command to provide client credentials. The command is exec'd // and outputs structured stdout holding credentials. // // See the client.authentication.k8s.io API group for specifications of the exact input // and output format type ExecConfig struct { // Command to execute. Command string `json:"command"` // Arguments to pass to the command when executing it. // +optional Args []string `json:"args"` // Env defines additional environment variables to expose to the process. These // are unioned with the host's environment, as well as variables client-go uses // to pass argument to the plugin. // +optional Env []ExecEnvVar `json:"env"` // Preferred input version of the ExecInfo. The returned ExecCredentials MUST use // the same encoding version as the input. APIVersion string `json:"apiVersion,omitempty"` // This text is shown to the user when the executable doesn't seem to be // present. For example, `brew install foo-cli` might be a good InstallHint for // foo-cli on Mac OS systems. InstallHint string `json:"installHint,omitempty"` // ProvideClusterInfo determines whether or not to provide cluster information, // which could potentially contain very large CA data, to this exec plugin as a // part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set // to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for // reading this environment variable. ProvideClusterInfo bool `json:"provideClusterInfo"` // Config holds additional config data that is specific to the exec // plugin with regards to the cluster being authenticated to. // // This data is sourced from the clientcmd Cluster object's extensions[exec] field: // // clusters: // - name: my-cluster // cluster: // ... // extensions: // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config // extension: // audience: 06e3fbd18de8 # arbitrary config // // In some environments, the user config may be exactly the same across many clusters // (i.e. call this exec plugin) minus some details that are specific to each cluster // such as the audience. This field allows the per cluster config to be directly // specified with the cluster info. Using this field to store secret data is not // recommended as one of the prime benefits of exec plugins is that no secrets need // to be stored directly in the kubeconfig. // +k8s:conversion-gen=false Config runtime.Object `json:"-"` // InteractiveMode determines this plugin's relationship with standard input. Valid // values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this // exec plugin wants to use standard input if it is available), or "Always" (this exec // plugin requires standard input to function). See ExecInteractiveMode values for more // details. // // If APIVersion is client.authentication.k8s.io/v1alpha1 or // client.authentication.k8s.io/v1beta1, then this field is optional and defaults // to "IfAvailable" when unset. Otherwise, this field is required. // +optional InteractiveMode ExecInteractiveMode `json:"interactiveMode,omitempty"` // StdinUnavailable indicates whether the exec authenticator can pass standard // input through to this exec plugin. For example, a higher level entity might be using // standard input for something else and therefore it would not be safe for the exec // plugin to use standard input. This is kept here in order to keep all of the exec configuration // together, but it is never serialized. // +k8s:conversion-gen=false StdinUnavailable bool `json:"-"` // StdinUnavailableMessage is an optional message to be displayed when the exec authenticator // cannot successfully run this exec plugin because it needs to use standard input and // StdinUnavailable is true. For example, a process that is already using standard input to // read user instructions might set this to "used by my-program to read user instructions". // +k8s:conversion-gen=false StdinUnavailableMessage string `json:"-"` } var _ fmt.Stringer = new(ExecConfig) var _ fmt.GoStringer = new(ExecConfig) // GoString implements fmt.GoStringer and sanitizes sensitive fields of // ExecConfig to prevent accidental leaking via logs. func (c ExecConfig) GoString() string { return c.String() } // String implements fmt.Stringer and sanitizes sensitive fields of ExecConfig // to prevent accidental leaking via logs. func (c ExecConfig) String() string { var args []string if len(c.Args) > 0 { args = []string{"--- REDACTED ---"} } env := "[]ExecEnvVar(nil)" if len(c.Env) > 0 { env = "[]ExecEnvVar{--- REDACTED ---}" } config := "runtime.Object(nil)" if c.Config != nil { config = "runtime.Object(--- REDACTED ---)" } return fmt.Sprintf("api.ExecConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q, ProvideClusterInfo: %t, Config: %s, StdinUnavailable: %t}", c.Command, args, env, c.APIVersion, c.ProvideClusterInfo, config, c.StdinUnavailable) } // ExecEnvVar is used for setting environment variables when executing an exec-based // credential plugin. type ExecEnvVar struct { Name string `json:"name"` Value string `json:"value"` } // ExecInteractiveMode is a string that describes an exec plugin's relationship with standard input. type ExecInteractiveMode string const ( // NeverExecInteractiveMode declares that this exec plugin never needs to use standard // input, and therefore the exec plugin will be run regardless of whether standard input is // available for user input. NeverExecInteractiveMode ExecInteractiveMode = "Never" // IfAvailableExecInteractiveMode declares that this exec plugin would like to use standard input // if it is available, but can still operate if standard input is not available. Therefore, the // exec plugin will be run regardless of whether stdin is available for user input. If standard // input is available for user input, then it will be provided to this exec plugin. IfAvailableExecInteractiveMode ExecInteractiveMode = "IfAvailable" // AlwaysExecInteractiveMode declares that this exec plugin requires standard input in order to // run, and therefore the exec plugin will only be run if standard input is available for user // input. If standard input is not available for user input, then the exec plugin will not be run // and an error will be returned by the exec plugin runner. AlwaysExecInteractiveMode ExecInteractiveMode = "Always" ) // NewConfig is a convenience function that returns a new Config object with non-nil maps func NewConfig() *Config { return &Config{ Preferences: *NewPreferences(), Clusters: make(map[string]*Cluster), AuthInfos: make(map[string]*AuthInfo), Contexts: make(map[string]*Context), Extensions: make(map[string]runtime.Object), } } // NewContext is a convenience function that returns a new Context // object with non-nil maps func NewContext() *Context { return &Context{Extensions: make(map[string]runtime.Object)} } // NewCluster is a convenience function that returns a new Cluster // object with non-nil maps func NewCluster() *Cluster { return &Cluster{Extensions: make(map[string]runtime.Object)} } // NewAuthInfo is a convenience function that returns a new AuthInfo // object with non-nil maps func NewAuthInfo() *AuthInfo { return &AuthInfo{ Extensions: make(map[string]runtime.Object), ImpersonateUserExtra: make(map[string][]string), } } // NewPreferences is a convenience function that returns a new // Preferences object with non-nil maps func NewPreferences() *Preferences { return &Preferences{Extensions: make(map[string]runtime.Object)} }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/api/register.go
vendor/k8s.io/client-go/tools/clientcmd/api/register.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package api import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) // SchemeGroupVersion is group version used to register these objects // TODO this should be in the "kubeconfig" group var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal} var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) AddToScheme = SchemeBuilder.AddToScheme ) func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Config{}, ) return nil } func (obj *Config) GetObjectKind() schema.ObjectKind { return obj } func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } func (obj *Config) GroupVersionKind() schema.GroupVersionKind { return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // +k8s:deepcopy-gen=package package api // import "k8s.io/client-go/tools/clientcmd/api"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package api import ( "encoding/base64" "errors" "fmt" "os" "path/filepath" "reflect" "strings" ) func init() { sDec, _ := base64.StdEncoding.DecodeString("REDACTED+") redactedBytes = []byte(string(sDec)) sDec, _ = base64.StdEncoding.DecodeString("DATA+OMITTED") dataOmittedBytes = []byte(string(sDec)) } // IsConfigEmpty returns true if the config is empty. func IsConfigEmpty(config *Config) bool { return len(config.AuthInfos) == 0 && len(config.Clusters) == 0 && len(config.Contexts) == 0 && len(config.CurrentContext) == 0 && len(config.Preferences.Extensions) == 0 && !config.Preferences.Colors && len(config.Extensions) == 0 } // MinifyConfig read the current context and uses that to keep only the relevant pieces of config // This is useful for making secrets based on kubeconfig files func MinifyConfig(config *Config) error { if len(config.CurrentContext) == 0 { return errors.New("current-context must exist in order to minify") } currContext, exists := config.Contexts[config.CurrentContext] if !exists { return fmt.Errorf("cannot locate context %v", config.CurrentContext) } newContexts := map[string]*Context{} newContexts[config.CurrentContext] = currContext newClusters := map[string]*Cluster{} if len(currContext.Cluster) > 0 { if _, exists := config.Clusters[currContext.Cluster]; !exists { return fmt.Errorf("cannot locate cluster %v", currContext.Cluster) } newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster] } newAuthInfos := map[string]*AuthInfo{} if len(currContext.AuthInfo) > 0 { if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists { return fmt.Errorf("cannot locate user %v", currContext.AuthInfo) } newAuthInfos[currContext.AuthInfo] = config.AuthInfos[currContext.AuthInfo] } config.AuthInfos = newAuthInfos config.Clusters = newClusters config.Contexts = newContexts return nil } var ( dataOmittedBytes []byte redactedBytes []byte ) // ShortenConfig redacts raw data entries from the config object for a human-readable view. func ShortenConfig(config *Config) { // trick json encoder into printing a human-readable string in the raw data // by base64 decoding what we want to print. Relies on implementation of // http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte for key, authInfo := range config.AuthInfos { if len(authInfo.ClientKeyData) > 0 { authInfo.ClientKeyData = dataOmittedBytes } if len(authInfo.ClientCertificateData) > 0 { authInfo.ClientCertificateData = dataOmittedBytes } if len(authInfo.Token) > 0 { authInfo.Token = "REDACTED" } config.AuthInfos[key] = authInfo } for key, cluster := range config.Clusters { if len(cluster.CertificateAuthorityData) > 0 { cluster.CertificateAuthorityData = dataOmittedBytes } config.Clusters[key] = cluster } } // FlattenConfig changes the config object into a self-contained config (useful for making secrets) func FlattenConfig(config *Config) error { for key, authInfo := range config.AuthInfos { baseDir, err := MakeAbs(filepath.Dir(authInfo.LocationOfOrigin), "") if err != nil { return err } if err := FlattenContent(&authInfo.ClientCertificate, &authInfo.ClientCertificateData, baseDir); err != nil { return err } if err := FlattenContent(&authInfo.ClientKey, &authInfo.ClientKeyData, baseDir); err != nil { return err } config.AuthInfos[key] = authInfo } for key, cluster := range config.Clusters { baseDir, err := MakeAbs(filepath.Dir(cluster.LocationOfOrigin), "") if err != nil { return err } if err := FlattenContent(&cluster.CertificateAuthority, &cluster.CertificateAuthorityData, baseDir); err != nil { return err } config.Clusters[key] = cluster } return nil } func FlattenContent(path *string, contents *[]byte, baseDir string) error { if len(*path) != 0 { if len(*contents) > 0 { return errors.New("cannot have values for both path and contents") } var err error absPath := ResolvePath(*path, baseDir) *contents, err = os.ReadFile(absPath) if err != nil { return err } *path = "" } return nil } // ResolvePath returns the path as an absolute paths, relative to the given base directory func ResolvePath(path string, base string) string { // Don't resolve empty paths if len(path) > 0 { // Don't resolve absolute paths if !filepath.IsAbs(path) { return filepath.Join(base, path) } } return path } func MakeAbs(path, base string) (string, error) { if filepath.IsAbs(path) { return path, nil } if len(base) == 0 { cwd, err := os.Getwd() if err != nil { return "", err } base = cwd } return filepath.Join(base, path), nil } // RedactSecrets replaces any sensitive values with REDACTED func RedactSecrets(config *Config) error { return redactSecrets(reflect.ValueOf(config), false) } func redactSecrets(curr reflect.Value, redact bool) error { redactedBytes = []byte("REDACTED") if !curr.IsValid() { return nil } actualCurrValue := curr if curr.Kind() == reflect.Ptr { actualCurrValue = curr.Elem() } switch actualCurrValue.Kind() { case reflect.Map: for _, v := range actualCurrValue.MapKeys() { err := redactSecrets(actualCurrValue.MapIndex(v), false) if err != nil { return err } } return nil case reflect.String: if redact { if !actualCurrValue.IsZero() { actualCurrValue.SetString("REDACTED") } } return nil case reflect.Slice: if actualCurrValue.Type() == reflect.TypeOf([]byte{}) && redact { if !actualCurrValue.IsNil() { actualCurrValue.SetBytes(redactedBytes) } return nil } for i := 0; i < actualCurrValue.Len(); i++ { err := redactSecrets(actualCurrValue.Index(i), false) if err != nil { return err } } return nil case reflect.Struct: for fieldIndex := 0; fieldIndex < actualCurrValue.NumField(); fieldIndex++ { currFieldValue := actualCurrValue.Field(fieldIndex) currFieldType := actualCurrValue.Type().Field(fieldIndex) currYamlTag := currFieldType.Tag.Get("datapolicy") currFieldTypeYamlName := strings.Split(currYamlTag, ",")[0] if currFieldTypeYamlName != "" { err := redactSecrets(currFieldValue, true) if err != nil { return err } } else { err := redactSecrets(currFieldValue, false) if err != nil { return err } } } return nil default: return nil } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package latest import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/apimachinery/pkg/runtime/serializer/versioning" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/tools/clientcmd/api/v1" ) // Version is the string that represents the current external default version. const Version = "v1" var ExternalVersion = schema.GroupVersion{Group: "", Version: "v1"} // OldestVersion is the string that represents the oldest server version supported, // for client code that wants to hardcode the lowest common denominator. const OldestVersion = "v1" // Versions is the list of versions that are recognized in code. The order provided // may be assumed to be least feature rich to most feature rich, and clients may // choose to prefer the latter items in the list over the former items when presented // with a set of versions to choose. var Versions = []string{"v1"} var ( Codec runtime.Codec Scheme *runtime.Scheme ) func init() { Scheme = runtime.NewScheme() utilruntime.Must(api.AddToScheme(Scheme)) utilruntime.Must(v1.AddToScheme(Scheme)) yamlSerializer := json.NewSerializerWithOptions(json.DefaultMetaFactory, Scheme, Scheme, json.SerializerOptions{Yaml: true}) Codec = versioning.NewDefaultingCodecForScheme( Scheme, yamlSerializer, yamlSerializer, schema.GroupVersion{Version: Version}, runtime.InternalGroupVersioner, ) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go
vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by deepcopy-gen. DO NOT EDIT. package v1 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthInfo) DeepCopyInto(out *AuthInfo) { *out = *in if in.ClientCertificateData != nil { in, out := &in.ClientCertificateData, &out.ClientCertificateData *out = make([]byte, len(*in)) copy(*out, *in) } if in.ClientKeyData != nil { in, out := &in.ClientKeyData, &out.ClientKeyData *out = make([]byte, len(*in)) copy(*out, *in) } if in.ImpersonateGroups != nil { in, out := &in.ImpersonateGroups, &out.ImpersonateGroups *out = make([]string, len(*in)) copy(*out, *in) } if in.ImpersonateUserExtra != nil { in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra *out = make(map[string][]string, len(*in)) for key, val := range *in { var outVal []string if val == nil { (*out)[key] = nil } else { in, out := &val, &outVal *out = make([]string, len(*in)) copy(*out, *in) } (*out)[key] = outVal } } if in.AuthProvider != nil { in, out := &in.AuthProvider, &out.AuthProvider *out = new(AuthProviderConfig) (*in).DeepCopyInto(*out) } if in.Exec != nil { in, out := &in.Exec, &out.Exec *out = new(ExecConfig) (*in).DeepCopyInto(*out) } if in.Extensions != nil { in, out := &in.Extensions, &out.Extensions *out = make([]NamedExtension, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInfo. func (in *AuthInfo) DeepCopy() *AuthInfo { if in == nil { return nil } out := new(AuthInfo) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthProviderConfig) DeepCopyInto(out *AuthProviderConfig) { *out = *in if in.Config != nil { in, out := &in.Config, &out.Config *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthProviderConfig. func (in *AuthProviderConfig) DeepCopy() *AuthProviderConfig { if in == nil { return nil } out := new(AuthProviderConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Cluster) DeepCopyInto(out *Cluster) { *out = *in if in.CertificateAuthorityData != nil { in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData *out = make([]byte, len(*in)) copy(*out, *in) } if in.Extensions != nil { in, out := &in.Extensions, &out.Extensions *out = make([]NamedExtension, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. func (in *Cluster) DeepCopy() *Cluster { if in == nil { return nil } out := new(Cluster) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Config) DeepCopyInto(out *Config) { *out = *in in.Preferences.DeepCopyInto(&out.Preferences) if in.Clusters != nil { in, out := &in.Clusters, &out.Clusters *out = make([]NamedCluster, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.AuthInfos != nil { in, out := &in.AuthInfos, &out.AuthInfos *out = make([]NamedAuthInfo, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Contexts != nil { in, out := &in.Contexts, &out.Contexts *out = make([]NamedContext, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Extensions != nil { in, out := &in.Extensions, &out.Extensions *out = make([]NamedExtension, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. func (in *Config) DeepCopy() *Config { if in == nil { return nil } out := new(Config) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *Config) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Context) DeepCopyInto(out *Context) { *out = *in if in.Extensions != nil { in, out := &in.Extensions, &out.Extensions *out = make([]NamedExtension, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Context. func (in *Context) DeepCopy() *Context { if in == nil { return nil } out := new(Context) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecConfig) DeepCopyInto(out *ExecConfig) { *out = *in if in.Args != nil { in, out := &in.Args, &out.Args *out = make([]string, len(*in)) copy(*out, *in) } if in.Env != nil { in, out := &in.Env, &out.Env *out = make([]ExecEnvVar, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig. func (in *ExecConfig) DeepCopy() *ExecConfig { if in == nil { return nil } out := new(ExecConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar. func (in *ExecEnvVar) DeepCopy() *ExecEnvVar { if in == nil { return nil } out := new(ExecEnvVar) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamedAuthInfo) DeepCopyInto(out *NamedAuthInfo) { *out = *in in.AuthInfo.DeepCopyInto(&out.AuthInfo) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedAuthInfo. func (in *NamedAuthInfo) DeepCopy() *NamedAuthInfo { if in == nil { return nil } out := new(NamedAuthInfo) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamedCluster) DeepCopyInto(out *NamedCluster) { *out = *in in.Cluster.DeepCopyInto(&out.Cluster) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCluster. func (in *NamedCluster) DeepCopy() *NamedCluster { if in == nil { return nil } out := new(NamedCluster) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamedContext) DeepCopyInto(out *NamedContext) { *out = *in in.Context.DeepCopyInto(&out.Context) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedContext. func (in *NamedContext) DeepCopy() *NamedContext { if in == nil { return nil } out := new(NamedContext) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamedExtension) DeepCopyInto(out *NamedExtension) { *out = *in in.Extension.DeepCopyInto(&out.Extension) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedExtension. func (in *NamedExtension) DeepCopy() *NamedExtension { if in == nil { return nil } out := new(NamedExtension) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Preferences) DeepCopyInto(out *Preferences) { *out = *in if in.Extensions != nil { in, out := &in.Extensions, &out.Extensions *out = make([]NamedExtension, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preferences. func (in *Preferences) DeepCopy() *Preferences { if in == nil { return nil } out := new(Preferences) in.DeepCopyInto(out) return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1 import ( "k8s.io/apimachinery/pkg/runtime" ) // Where possible, json tags match the cli argument names. // Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. // Config holds the information needed to build connect to remote kubernetes clusters as a given user // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type Config struct { // Legacy field from pkg/api/types.go TypeMeta. // TODO(jlowdermilk): remove this after eliminating downstream dependencies. // +k8s:conversion-gen=false // +optional Kind string `json:"kind,omitempty"` // Legacy field from pkg/api/types.go TypeMeta. // TODO(jlowdermilk): remove this after eliminating downstream dependencies. // +k8s:conversion-gen=false // +optional APIVersion string `json:"apiVersion,omitempty"` // Preferences holds general information to be use for cli interactions Preferences Preferences `json:"preferences"` // Clusters is a map of referencable names to cluster configs Clusters []NamedCluster `json:"clusters"` // AuthInfos is a map of referencable names to user configs AuthInfos []NamedAuthInfo `json:"users"` // Contexts is a map of referencable names to context configs Contexts []NamedContext `json:"contexts"` // CurrentContext is the name of the context that you would like to use by default CurrentContext string `json:"current-context"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions []NamedExtension `json:"extensions,omitempty"` } type Preferences struct { // +optional Colors bool `json:"colors,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions []NamedExtension `json:"extensions,omitempty"` } // Cluster contains information about how to communicate with a kubernetes cluster type Cluster struct { // Server is the address of the kubernetes cluster (https://hostname:port). Server string `json:"server"` // TLSServerName is used to check server certificate. If TLSServerName is empty, the hostname used to contact the server is used. // +optional TLSServerName string `json:"tls-server-name,omitempty"` // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. // +optional InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` // CertificateAuthority is the path to a cert file for the certificate authority. // +optional CertificateAuthority string `json:"certificate-authority,omitempty"` // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority // +optional CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` // ProxyURL is the URL to the proxy to be used for all requests made by this // client. URLs with "http", "https", and "socks5" schemes are supported. If // this configuration is not provided or the empty string, the client // attempts to construct a proxy configuration from http_proxy and // https_proxy environment variables. If these environment variables are not // set, the client does not attempt to proxy requests. // // socks5 proxying does not currently support spdy streaming endpoints (exec, // attach, port forward). // +optional ProxyURL string `json:"proxy-url,omitempty"` // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. // +optional DisableCompression bool `json:"disable-compression,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions []NamedExtension `json:"extensions,omitempty"` } // AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. type AuthInfo struct { // ClientCertificate is the path to a client cert file for TLS. // +optional ClientCertificate string `json:"client-certificate,omitempty"` // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate // +optional ClientCertificateData []byte `json:"client-certificate-data,omitempty"` // ClientKey is the path to a client key file for TLS. // +optional ClientKey string `json:"client-key,omitempty"` // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey // +optional ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"` // Token is the bearer token for authentication to the kubernetes cluster. // +optional Token string `json:"token,omitempty" datapolicy:"token"` // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. // +optional TokenFile string `json:"tokenFile,omitempty"` // Impersonate is the username to impersonate. The name matches the flag. // +optional Impersonate string `json:"as,omitempty"` // ImpersonateUID is the uid to impersonate. // +optional ImpersonateUID string `json:"as-uid,omitempty"` // ImpersonateGroups is the groups to impersonate. // +optional ImpersonateGroups []string `json:"as-groups,omitempty"` // ImpersonateUserExtra contains additional information for impersonated user. // +optional ImpersonateUserExtra map[string][]string `json:"as-user-extra,omitempty"` // Username is the username for basic authentication to the kubernetes cluster. // +optional Username string `json:"username,omitempty"` // Password is the password for basic authentication to the kubernetes cluster. // +optional Password string `json:"password,omitempty" datapolicy:"password"` // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. // +optional AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` // Exec specifies a custom exec-based authentication plugin for the kubernetes cluster. // +optional Exec *ExecConfig `json:"exec,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions []NamedExtension `json:"extensions,omitempty"` } // Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) type Context struct { // Cluster is the name of the cluster for this context Cluster string `json:"cluster"` // AuthInfo is the name of the authInfo for this context AuthInfo string `json:"user"` // Namespace is the default namespace to use on unspecified requests // +optional Namespace string `json:"namespace,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions []NamedExtension `json:"extensions,omitempty"` } // NamedCluster relates nicknames to cluster information type NamedCluster struct { // Name is the nickname for this Cluster Name string `json:"name"` // Cluster holds the cluster information Cluster Cluster `json:"cluster"` } // NamedContext relates nicknames to context information type NamedContext struct { // Name is the nickname for this Context Name string `json:"name"` // Context holds the context information Context Context `json:"context"` } // NamedAuthInfo relates nicknames to auth information type NamedAuthInfo struct { // Name is the nickname for this AuthInfo Name string `json:"name"` // AuthInfo holds the auth information AuthInfo AuthInfo `json:"user"` } // NamedExtension relates nicknames to extension information type NamedExtension struct { // Name is the nickname for this Extension Name string `json:"name"` // Extension holds the extension information Extension runtime.RawExtension `json:"extension"` } // AuthProviderConfig holds the configuration for a specified auth provider. type AuthProviderConfig struct { Name string `json:"name"` Config map[string]string `json:"config"` } // ExecConfig specifies a command to provide client credentials. The command is exec'd // and outputs structured stdout holding credentials. // // See the client.authentication.k8s.io API group for specifications of the exact input // and output format type ExecConfig struct { // Command to execute. Command string `json:"command"` // Arguments to pass to the command when executing it. // +optional Args []string `json:"args"` // Env defines additional environment variables to expose to the process. These // are unioned with the host's environment, as well as variables client-go uses // to pass argument to the plugin. // +optional Env []ExecEnvVar `json:"env"` // Preferred input version of the ExecInfo. The returned ExecCredentials MUST use // the same encoding version as the input. APIVersion string `json:"apiVersion,omitempty"` // This text is shown to the user when the executable doesn't seem to be // present. For example, `brew install foo-cli` might be a good InstallHint for // foo-cli on Mac OS systems. InstallHint string `json:"installHint,omitempty"` // ProvideClusterInfo determines whether or not to provide cluster information, // which could potentially contain very large CA data, to this exec plugin as a // part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set // to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for // reading this environment variable. ProvideClusterInfo bool `json:"provideClusterInfo"` // InteractiveMode determines this plugin's relationship with standard input. Valid // values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this // exec plugin wants to use standard input if it is available), or "Always" (this exec // plugin requires standard input to function). See ExecInteractiveMode values for more // details. // // If APIVersion is client.authentication.k8s.io/v1alpha1 or // client.authentication.k8s.io/v1beta1, then this field is optional and defaults // to "IfAvailable" when unset. Otherwise, this field is required. //+optional InteractiveMode ExecInteractiveMode `json:"interactiveMode,omitempty"` } // ExecEnvVar is used for setting environment variables when executing an exec-based // credential plugin. type ExecEnvVar struct { Name string `json:"name"` Value string `json:"value"` } // ExecInteractiveMode is a string that describes an exec plugin's relationship with standard input. type ExecInteractiveMode string const ( // NeverExecInteractiveMode declares that this exec plugin never needs to use standard // input, and therefore the exec plugin will be run regardless of whether standard input is // available for user input. NeverExecInteractiveMode ExecInteractiveMode = "Never" // IfAvailableExecInteractiveMode declares that this exec plugin would like to use standard input // if it is available, but can still operate if standard input is not available. Therefore, the // exec plugin will be run regardless of whether stdin is available for user input. If standard // input is available for user input, then it will be provided to this exec plugin. IfAvailableExecInteractiveMode ExecInteractiveMode = "IfAvailable" // AlwaysExecInteractiveMode declares that this exec plugin requires standard input in order to // run, and therefore the exec plugin will only be run if standard input is available for user // input. If standard input is not available for user input, then the exec plugin will not be run // and an error will be returned by the exec plugin runner. AlwaysExecInteractiveMode ExecInteractiveMode = "Always" )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by conversion-gen. DO NOT EDIT. package v1 import ( unsafe "unsafe" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" api "k8s.io/client-go/tools/clientcmd/api" ) func init() { localSchemeBuilder.Register(RegisterConversions) } // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { if err := s.AddGeneratedConversionFunc((*AuthInfo)(nil), (*api.AuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_AuthInfo_To_api_AuthInfo(a.(*AuthInfo), b.(*api.AuthInfo), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*api.AuthInfo)(nil), (*AuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_api_AuthInfo_To_v1_AuthInfo(a.(*api.AuthInfo), b.(*AuthInfo), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*AuthProviderConfig)(nil), (*api.AuthProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_AuthProviderConfig_To_api_AuthProviderConfig(a.(*AuthProviderConfig), b.(*api.AuthProviderConfig), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*api.AuthProviderConfig)(nil), (*AuthProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_api_AuthProviderConfig_To_v1_AuthProviderConfig(a.(*api.AuthProviderConfig), b.(*AuthProviderConfig), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*Cluster)(nil), (*api.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_Cluster_To_api_Cluster(a.(*Cluster), b.(*api.Cluster), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*api.Cluster)(nil), (*Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_api_Cluster_To_v1_Cluster(a.(*api.Cluster), b.(*Cluster), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*Config)(nil), (*api.Config)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_Config_To_api_Config(a.(*Config), b.(*api.Config), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*api.Config)(nil), (*Config)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_api_Config_To_v1_Config(a.(*api.Config), b.(*Config), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*Context)(nil), (*api.Context)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_Context_To_api_Context(a.(*Context), b.(*api.Context), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*api.Context)(nil), (*Context)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_api_Context_To_v1_Context(a.(*api.Context), b.(*Context), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*ExecConfig)(nil), (*api.ExecConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_ExecConfig_To_api_ExecConfig(a.(*ExecConfig), b.(*api.ExecConfig), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*api.ExecConfig)(nil), (*ExecConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_api_ExecConfig_To_v1_ExecConfig(a.(*api.ExecConfig), b.(*ExecConfig), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*ExecEnvVar)(nil), (*api.ExecEnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_ExecEnvVar_To_api_ExecEnvVar(a.(*ExecEnvVar), b.(*api.ExecEnvVar), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*api.ExecEnvVar)(nil), (*ExecEnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_api_ExecEnvVar_To_v1_ExecEnvVar(a.(*api.ExecEnvVar), b.(*ExecEnvVar), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*Preferences)(nil), (*api.Preferences)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_Preferences_To_api_Preferences(a.(*Preferences), b.(*api.Preferences), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*api.Preferences)(nil), (*Preferences)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_api_Preferences_To_v1_Preferences(a.(*api.Preferences), b.(*Preferences), scope) }); err != nil { return err } if err := s.AddConversionFunc((*map[string]*api.AuthInfo)(nil), (*[]NamedAuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_Map_string_To_Pointer_api_AuthInfo_To_Slice_v1_NamedAuthInfo(a.(*map[string]*api.AuthInfo), b.(*[]NamedAuthInfo), scope) }); err != nil { return err } if err := s.AddConversionFunc((*map[string]*api.Cluster)(nil), (*[]NamedCluster)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_Map_string_To_Pointer_api_Cluster_To_Slice_v1_NamedCluster(a.(*map[string]*api.Cluster), b.(*[]NamedCluster), scope) }); err != nil { return err } if err := s.AddConversionFunc((*map[string]*api.Context)(nil), (*[]NamedContext)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_Map_string_To_Pointer_api_Context_To_Slice_v1_NamedContext(a.(*map[string]*api.Context), b.(*[]NamedContext), scope) }); err != nil { return err } if err := s.AddConversionFunc((*map[string]runtime.Object)(nil), (*[]NamedExtension)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(a.(*map[string]runtime.Object), b.(*[]NamedExtension), scope) }); err != nil { return err } if err := s.AddConversionFunc((*[]NamedAuthInfo)(nil), (*map[string]*api.AuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_Slice_v1_NamedAuthInfo_To_Map_string_To_Pointer_api_AuthInfo(a.(*[]NamedAuthInfo), b.(*map[string]*api.AuthInfo), scope) }); err != nil { return err } if err := s.AddConversionFunc((*[]NamedCluster)(nil), (*map[string]*api.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_Slice_v1_NamedCluster_To_Map_string_To_Pointer_api_Cluster(a.(*[]NamedCluster), b.(*map[string]*api.Cluster), scope) }); err != nil { return err } if err := s.AddConversionFunc((*[]NamedContext)(nil), (*map[string]*api.Context)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_Slice_v1_NamedContext_To_Map_string_To_Pointer_api_Context(a.(*[]NamedContext), b.(*map[string]*api.Context), scope) }); err != nil { return err } if err := s.AddConversionFunc((*[]NamedExtension)(nil), (*map[string]runtime.Object)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(a.(*[]NamedExtension), b.(*map[string]runtime.Object), scope) }); err != nil { return err } return nil } func autoConvert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error { out.ClientCertificate = in.ClientCertificate out.ClientCertificateData = *(*[]byte)(unsafe.Pointer(&in.ClientCertificateData)) out.ClientKey = in.ClientKey out.ClientKeyData = *(*[]byte)(unsafe.Pointer(&in.ClientKeyData)) out.Token = in.Token out.TokenFile = in.TokenFile out.Impersonate = in.Impersonate out.ImpersonateUID = in.ImpersonateUID out.ImpersonateGroups = *(*[]string)(unsafe.Pointer(&in.ImpersonateGroups)) out.ImpersonateUserExtra = *(*map[string][]string)(unsafe.Pointer(&in.ImpersonateUserExtra)) out.Username = in.Username out.Password = in.Password out.AuthProvider = (*api.AuthProviderConfig)(unsafe.Pointer(in.AuthProvider)) if in.Exec != nil { in, out := &in.Exec, &out.Exec *out = new(api.ExecConfig) if err := Convert_v1_ExecConfig_To_api_ExecConfig(*in, *out, s); err != nil { return err } } else { out.Exec = nil } if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { return err } return nil } // Convert_v1_AuthInfo_To_api_AuthInfo is an autogenerated conversion function. func Convert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error { return autoConvert_v1_AuthInfo_To_api_AuthInfo(in, out, s) } func autoConvert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error { // INFO: in.LocationOfOrigin opted out of conversion generation out.ClientCertificate = in.ClientCertificate out.ClientCertificateData = *(*[]byte)(unsafe.Pointer(&in.ClientCertificateData)) out.ClientKey = in.ClientKey out.ClientKeyData = *(*[]byte)(unsafe.Pointer(&in.ClientKeyData)) out.Token = in.Token out.TokenFile = in.TokenFile out.Impersonate = in.Impersonate out.ImpersonateUID = in.ImpersonateUID out.ImpersonateGroups = *(*[]string)(unsafe.Pointer(&in.ImpersonateGroups)) out.ImpersonateUserExtra = *(*map[string][]string)(unsafe.Pointer(&in.ImpersonateUserExtra)) out.Username = in.Username out.Password = in.Password out.AuthProvider = (*AuthProviderConfig)(unsafe.Pointer(in.AuthProvider)) if in.Exec != nil { in, out := &in.Exec, &out.Exec *out = new(ExecConfig) if err := Convert_api_ExecConfig_To_v1_ExecConfig(*in, *out, s); err != nil { return err } } else { out.Exec = nil } if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { return err } return nil } // Convert_api_AuthInfo_To_v1_AuthInfo is an autogenerated conversion function. func Convert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error { return autoConvert_api_AuthInfo_To_v1_AuthInfo(in, out, s) } func autoConvert_v1_AuthProviderConfig_To_api_AuthProviderConfig(in *AuthProviderConfig, out *api.AuthProviderConfig, s conversion.Scope) error { out.Name = in.Name out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config)) return nil } // Convert_v1_AuthProviderConfig_To_api_AuthProviderConfig is an autogenerated conversion function. func Convert_v1_AuthProviderConfig_To_api_AuthProviderConfig(in *AuthProviderConfig, out *api.AuthProviderConfig, s conversion.Scope) error { return autoConvert_v1_AuthProviderConfig_To_api_AuthProviderConfig(in, out, s) } func autoConvert_api_AuthProviderConfig_To_v1_AuthProviderConfig(in *api.AuthProviderConfig, out *AuthProviderConfig, s conversion.Scope) error { out.Name = in.Name out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config)) return nil } // Convert_api_AuthProviderConfig_To_v1_AuthProviderConfig is an autogenerated conversion function. func Convert_api_AuthProviderConfig_To_v1_AuthProviderConfig(in *api.AuthProviderConfig, out *AuthProviderConfig, s conversion.Scope) error { return autoConvert_api_AuthProviderConfig_To_v1_AuthProviderConfig(in, out, s) } func autoConvert_v1_Cluster_To_api_Cluster(in *Cluster, out *api.Cluster, s conversion.Scope) error { out.Server = in.Server out.TLSServerName = in.TLSServerName out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify out.CertificateAuthority = in.CertificateAuthority out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL out.DisableCompression = in.DisableCompression if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { return err } return nil } // Convert_v1_Cluster_To_api_Cluster is an autogenerated conversion function. func Convert_v1_Cluster_To_api_Cluster(in *Cluster, out *api.Cluster, s conversion.Scope) error { return autoConvert_v1_Cluster_To_api_Cluster(in, out, s) } func autoConvert_api_Cluster_To_v1_Cluster(in *api.Cluster, out *Cluster, s conversion.Scope) error { // INFO: in.LocationOfOrigin opted out of conversion generation out.Server = in.Server out.TLSServerName = in.TLSServerName out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify out.CertificateAuthority = in.CertificateAuthority out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL out.DisableCompression = in.DisableCompression if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { return err } return nil } // Convert_api_Cluster_To_v1_Cluster is an autogenerated conversion function. func Convert_api_Cluster_To_v1_Cluster(in *api.Cluster, out *Cluster, s conversion.Scope) error { return autoConvert_api_Cluster_To_v1_Cluster(in, out, s) } func autoConvert_v1_Config_To_api_Config(in *Config, out *api.Config, s conversion.Scope) error { // INFO: in.Kind opted out of conversion generation // INFO: in.APIVersion opted out of conversion generation if err := Convert_v1_Preferences_To_api_Preferences(&in.Preferences, &out.Preferences, s); err != nil { return err } if err := Convert_Slice_v1_NamedCluster_To_Map_string_To_Pointer_api_Cluster(&in.Clusters, &out.Clusters, s); err != nil { return err } if err := Convert_Slice_v1_NamedAuthInfo_To_Map_string_To_Pointer_api_AuthInfo(&in.AuthInfos, &out.AuthInfos, s); err != nil { return err } if err := Convert_Slice_v1_NamedContext_To_Map_string_To_Pointer_api_Context(&in.Contexts, &out.Contexts, s); err != nil { return err } out.CurrentContext = in.CurrentContext if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { return err } return nil } // Convert_v1_Config_To_api_Config is an autogenerated conversion function. func Convert_v1_Config_To_api_Config(in *Config, out *api.Config, s conversion.Scope) error { return autoConvert_v1_Config_To_api_Config(in, out, s) } func autoConvert_api_Config_To_v1_Config(in *api.Config, out *Config, s conversion.Scope) error { // INFO: in.Kind opted out of conversion generation // INFO: in.APIVersion opted out of conversion generation if err := Convert_api_Preferences_To_v1_Preferences(&in.Preferences, &out.Preferences, s); err != nil { return err } if err := Convert_Map_string_To_Pointer_api_Cluster_To_Slice_v1_NamedCluster(&in.Clusters, &out.Clusters, s); err != nil { return err } if err := Convert_Map_string_To_Pointer_api_AuthInfo_To_Slice_v1_NamedAuthInfo(&in.AuthInfos, &out.AuthInfos, s); err != nil { return err } if err := Convert_Map_string_To_Pointer_api_Context_To_Slice_v1_NamedContext(&in.Contexts, &out.Contexts, s); err != nil { return err } out.CurrentContext = in.CurrentContext if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { return err } return nil } // Convert_api_Config_To_v1_Config is an autogenerated conversion function. func Convert_api_Config_To_v1_Config(in *api.Config, out *Config, s conversion.Scope) error { return autoConvert_api_Config_To_v1_Config(in, out, s) } func autoConvert_v1_Context_To_api_Context(in *Context, out *api.Context, s conversion.Scope) error { out.Cluster = in.Cluster out.AuthInfo = in.AuthInfo out.Namespace = in.Namespace if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { return err } return nil } // Convert_v1_Context_To_api_Context is an autogenerated conversion function. func Convert_v1_Context_To_api_Context(in *Context, out *api.Context, s conversion.Scope) error { return autoConvert_v1_Context_To_api_Context(in, out, s) } func autoConvert_api_Context_To_v1_Context(in *api.Context, out *Context, s conversion.Scope) error { // INFO: in.LocationOfOrigin opted out of conversion generation out.Cluster = in.Cluster out.AuthInfo = in.AuthInfo out.Namespace = in.Namespace if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { return err } return nil } // Convert_api_Context_To_v1_Context is an autogenerated conversion function. func Convert_api_Context_To_v1_Context(in *api.Context, out *Context, s conversion.Scope) error { return autoConvert_api_Context_To_v1_Context(in, out, s) } func autoConvert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecConfig, s conversion.Scope) error { out.Command = in.Command out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) out.Env = *(*[]api.ExecEnvVar)(unsafe.Pointer(&in.Env)) out.APIVersion = in.APIVersion out.InstallHint = in.InstallHint out.ProvideClusterInfo = in.ProvideClusterInfo out.InteractiveMode = api.ExecInteractiveMode(in.InteractiveMode) return nil } // Convert_v1_ExecConfig_To_api_ExecConfig is an autogenerated conversion function. func Convert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecConfig, s conversion.Scope) error { return autoConvert_v1_ExecConfig_To_api_ExecConfig(in, out, s) } func autoConvert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecConfig, s conversion.Scope) error { out.Command = in.Command out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) out.Env = *(*[]ExecEnvVar)(unsafe.Pointer(&in.Env)) out.APIVersion = in.APIVersion out.InstallHint = in.InstallHint out.ProvideClusterInfo = in.ProvideClusterInfo // INFO: in.Config opted out of conversion generation out.InteractiveMode = ExecInteractiveMode(in.InteractiveMode) // INFO: in.StdinUnavailable opted out of conversion generation // INFO: in.StdinUnavailableMessage opted out of conversion generation return nil } // Convert_api_ExecConfig_To_v1_ExecConfig is an autogenerated conversion function. func Convert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecConfig, s conversion.Scope) error { return autoConvert_api_ExecConfig_To_v1_ExecConfig(in, out, s) } func autoConvert_v1_ExecEnvVar_To_api_ExecEnvVar(in *ExecEnvVar, out *api.ExecEnvVar, s conversion.Scope) error { out.Name = in.Name out.Value = in.Value return nil } // Convert_v1_ExecEnvVar_To_api_ExecEnvVar is an autogenerated conversion function. func Convert_v1_ExecEnvVar_To_api_ExecEnvVar(in *ExecEnvVar, out *api.ExecEnvVar, s conversion.Scope) error { return autoConvert_v1_ExecEnvVar_To_api_ExecEnvVar(in, out, s) } func autoConvert_api_ExecEnvVar_To_v1_ExecEnvVar(in *api.ExecEnvVar, out *ExecEnvVar, s conversion.Scope) error { out.Name = in.Name out.Value = in.Value return nil } // Convert_api_ExecEnvVar_To_v1_ExecEnvVar is an autogenerated conversion function. func Convert_api_ExecEnvVar_To_v1_ExecEnvVar(in *api.ExecEnvVar, out *ExecEnvVar, s conversion.Scope) error { return autoConvert_api_ExecEnvVar_To_v1_ExecEnvVar(in, out, s) } func autoConvert_v1_Preferences_To_api_Preferences(in *Preferences, out *api.Preferences, s conversion.Scope) error { out.Colors = in.Colors if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { return err } return nil } // Convert_v1_Preferences_To_api_Preferences is an autogenerated conversion function. func Convert_v1_Preferences_To_api_Preferences(in *Preferences, out *api.Preferences, s conversion.Scope) error { return autoConvert_v1_Preferences_To_api_Preferences(in, out, s) } func autoConvert_api_Preferences_To_v1_Preferences(in *api.Preferences, out *Preferences, s conversion.Scope) error { out.Colors = in.Colors if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { return err } return nil } // Convert_api_Preferences_To_v1_Preferences is an autogenerated conversion function. func Convert_api_Preferences_To_v1_Preferences(in *api.Preferences, out *Preferences, s conversion.Scope) error { return autoConvert_api_Preferences_To_v1_Preferences(in, out, s) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go
vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1 import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) // SchemeGroupVersion is group version used to register these objects // TODO this should be in the "kubeconfig" group var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} var ( // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. SchemeBuilder runtime.SchemeBuilder localSchemeBuilder = &SchemeBuilder AddToScheme = localSchemeBuilder.AddToScheme ) func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) } func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Config{}, ) return nil } func (obj *Config) GetObjectKind() schema.ObjectKind { return obj } func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } func (obj *Config) GroupVersionKind() schema.GroupVersionKind { return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go
vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1 import ( "fmt" "sort" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/clientcmd/api" ) func Convert_Slice_v1_NamedCluster_To_Map_string_To_Pointer_api_Cluster(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error { for _, curr := range *in { newCluster := api.NewCluster() if err := Convert_v1_Cluster_To_api_Cluster(&curr.Cluster, newCluster, s); err != nil { return err } if *out == nil { *out = make(map[string]*api.Cluster) } if (*out)[curr.Name] == nil { (*out)[curr.Name] = newCluster } else { return fmt.Errorf("error converting *[]NamedCluster into *map[string]*api.Cluster: duplicate name \"%v\" in list: %v", curr.Name, *in) } } return nil } func Convert_Map_string_To_Pointer_api_Cluster_To_Slice_v1_NamedCluster(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error { allKeys := make([]string, 0, len(*in)) for key := range *in { allKeys = append(allKeys, key) } sort.Strings(allKeys) for _, key := range allKeys { newCluster := (*in)[key] oldCluster := Cluster{} if err := Convert_api_Cluster_To_v1_Cluster(newCluster, &oldCluster, s); err != nil { return err } namedCluster := NamedCluster{key, oldCluster} *out = append(*out, namedCluster) } return nil } func Convert_Slice_v1_NamedAuthInfo_To_Map_string_To_Pointer_api_AuthInfo(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error { for _, curr := range *in { newAuthInfo := api.NewAuthInfo() if err := Convert_v1_AuthInfo_To_api_AuthInfo(&curr.AuthInfo, newAuthInfo, s); err != nil { return err } if *out == nil { *out = make(map[string]*api.AuthInfo) } if (*out)[curr.Name] == nil { (*out)[curr.Name] = newAuthInfo } else { return fmt.Errorf("error converting *[]NamedAuthInfo into *map[string]*api.AuthInfo: duplicate name \"%v\" in list: %v", curr.Name, *in) } } return nil } func Convert_Map_string_To_Pointer_api_AuthInfo_To_Slice_v1_NamedAuthInfo(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error { allKeys := make([]string, 0, len(*in)) for key := range *in { allKeys = append(allKeys, key) } sort.Strings(allKeys) for _, key := range allKeys { newAuthInfo := (*in)[key] oldAuthInfo := AuthInfo{} if err := Convert_api_AuthInfo_To_v1_AuthInfo(newAuthInfo, &oldAuthInfo, s); err != nil { return err } namedAuthInfo := NamedAuthInfo{key, oldAuthInfo} *out = append(*out, namedAuthInfo) } return nil } func Convert_Slice_v1_NamedContext_To_Map_string_To_Pointer_api_Context(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error { for _, curr := range *in { newContext := api.NewContext() if err := Convert_v1_Context_To_api_Context(&curr.Context, newContext, s); err != nil { return err } if *out == nil { *out = make(map[string]*api.Context) } if (*out)[curr.Name] == nil { (*out)[curr.Name] = newContext } else { return fmt.Errorf("error converting *[]NamedContext into *map[string]*api.Context: duplicate name \"%v\" in list: %v", curr.Name, *in) } } return nil } func Convert_Map_string_To_Pointer_api_Context_To_Slice_v1_NamedContext(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error { allKeys := make([]string, 0, len(*in)) for key := range *in { allKeys = append(allKeys, key) } sort.Strings(allKeys) for _, key := range allKeys { newContext := (*in)[key] oldContext := Context{} if err := Convert_api_Context_To_v1_Context(newContext, &oldContext, s); err != nil { return err } namedContext := NamedContext{key, oldContext} *out = append(*out, namedContext) } return nil } func Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(in *[]NamedExtension, out *map[string]runtime.Object, s conversion.Scope) error { for _, curr := range *in { var newExtension runtime.Object if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&curr.Extension, &newExtension, s); err != nil { return err } if *out == nil { *out = make(map[string]runtime.Object) } if (*out)[curr.Name] == nil { (*out)[curr.Name] = newExtension } else { return fmt.Errorf("error converting *[]NamedExtension into *map[string]runtime.Object: duplicate name \"%v\" in list: %v", curr.Name, *in) } } return nil } func Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(in *map[string]runtime.Object, out *[]NamedExtension, s conversion.Scope) error { allKeys := make([]string, 0, len(*in)) for key := range *in { allKeys = append(allKeys, key) } sort.Strings(allKeys) for _, key := range allKeys { newExtension := (*in)[key] oldExtension := runtime.RawExtension{} if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&newExtension, &oldExtension, s); err != nil { return err } namedExtension := NamedExtension{key, oldExtension} *out = append(*out, namedExtension) } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go
vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1 import ( "k8s.io/apimachinery/pkg/runtime" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { return RegisterDefaults(scheme) } func SetDefaults_ExecConfig(exec *ExecConfig) { if len(exec.InteractiveMode) == 0 { switch exec.APIVersion { case "client.authentication.k8s.io/v1beta1", "client.authentication.k8s.io/v1alpha1": // default to IfAvailableExecInteractiveMode for backwards compatibility exec.InteractiveMode = IfAvailableExecInteractiveMode default: // require other versions to explicitly declare whether they want stdin or not } } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go
vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by defaulter-gen. DO NOT EDIT. package v1 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // RegisterDefaults adds defaulters functions to the given scheme. // Public to allow building arbitrary schemes. // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { scheme.AddTypeDefaultingFunc(&Config{}, func(obj interface{}) { SetObjectDefaults_Config(obj.(*Config)) }) return nil } func SetObjectDefaults_Config(in *Config) { for i := range in.AuthInfos { a := &in.AuthInfos[i] if a.AuthInfo.Exec != nil { SetDefaults_ExecConfig(a.AuthInfo.Exec) } } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // +k8s:conversion-gen=k8s.io/client-go/tools/clientcmd/api // +k8s:deepcopy-gen=package // +k8s:defaulter-gen=Kind package v1 // import "k8s.io/client-go/tools/clientcmd/api/v1"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/auth/clientauth.go
vendor/k8s.io/client-go/tools/auth/clientauth.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Package auth defines a file format for holding authentication information needed by clients of Kubernetes. Typically, a Kubernetes cluster will put auth info for the admin in a known location when it is created, and will (soon) put it in a known location within a Container's file tree for Containers that need access to the Kubernetes API. Having a defined format allows: - clients to be implemented in multiple languages - applications which link clients to be portable across clusters with different authentication styles (e.g. some may use SSL Client certs, others may not, etc) - when the format changes, applications only need to update this code. The file format is json, marshalled from a struct authcfg.Info. Client libraries in other languages should use the same format. It is not intended to store general preferences, such as default namespace, output options, etc. CLIs (such as kubectl) and UIs should develop their own format and may wish to inline the authcfg.Info type. The authcfg.Info is just a file format. It is distinct from client.Config which holds options for creating a client.Client. Helper functions are provided in this package to fill in a client.Client from an authcfg.Info. Example: import ( "pkg/client" "pkg/client/auth" ) info, err := auth.LoadFromFile(filename) if err != nil { // handle error } clientConfig = client.Config{} clientConfig.Host = "example.com:4901" clientConfig = info.MergeWithConfig() client := client.New(clientConfig) client.Pods(ns).List() */ package auth // TODO: need a way to rotate Tokens. Therefore, need a way for client object to be reset when the authcfg is updated. import ( "encoding/json" "os" restclient "k8s.io/client-go/rest" ) // Info holds Kubernetes API authorization config. It is intended // to be read/written from a file as a JSON object. type Info struct { User string Password string `datapolicy:"password"` CAFile string CertFile string KeyFile string BearerToken string `datapolicy:"token"` Insecure *bool } // LoadFromFile parses an Info object from a file path. // If the file does not exist, then os.IsNotExist(err) == true func LoadFromFile(path string) (*Info, error) { var info Info if _, err := os.Stat(path); os.IsNotExist(err) { return nil, err } data, err := os.ReadFile(path) if err != nil { return nil, err } err = json.Unmarshal(data, &info) if err != nil { return nil, err } return &info, err } // MergeWithConfig returns a copy of a client.Config with values from the Info. // The fields of client.Config with a corresponding field in the Info are set // with the value from the Info. func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error) { var config = c config.Username = info.User config.Password = info.Password config.CAFile = info.CAFile config.CertFile = info.CertFile config.KeyFile = info.KeyFile config.BearerToken = info.BearerToken if info.Insecure != nil { config.Insecure = *info.Insecure } return config, nil } // Complete returns true if the Kubernetes API authorization info is complete. func (info Info) Complete() bool { return len(info.User) > 0 || len(info.CertFile) > 0 || len(info.BearerToken) > 0 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/mutation_cache.go
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "fmt" "strconv" "sync" "time" "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" utilcache "k8s.io/apimachinery/pkg/util/cache" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" ) // MutationCache is able to take the result of update operations and stores them in an LRU // that can be used to provide a more current view of a requested object. It requires interpreting // resourceVersions for comparisons. // Implementations must be thread-safe. // TODO find a way to layer this into an informer/lister type MutationCache interface { GetByKey(key string) (interface{}, bool, error) ByIndex(indexName, indexKey string) ([]interface{}, error) Mutation(interface{}) } // ResourceVersionComparator is able to compare object versions. type ResourceVersionComparator interface { CompareResourceVersion(lhs, rhs runtime.Object) int } // NewIntegerResourceVersionMutationCache returns a MutationCache that understands how to // deal with objects that have a resource version that: // // - is an integer // - increases when updated // - is comparable across the same resource in a namespace // // Most backends will have these semantics. Indexer may be nil. ttl controls how long an item // remains in the mutation cache before it is removed. // // If includeAdds is true, objects in the mutation cache will be returned even if they don't exist // in the underlying store. This is only safe if your use of the cache can handle mutation entries // remaining in the cache for up to ttl when mutations and deletes occur very closely in time. func NewIntegerResourceVersionMutationCache(backingCache Store, indexer Indexer, ttl time.Duration, includeAdds bool) MutationCache { return &mutationCache{ backingCache: backingCache, indexer: indexer, mutationCache: utilcache.NewLRUExpireCache(100), comparator: etcdObjectVersioner{}, ttl: ttl, includeAdds: includeAdds, } } // mutationCache doesn't guarantee that it returns values added via Mutation since they can page out and // since you can't distinguish between, "didn't observe create" and "was deleted after create", // if the key is missing from the backing cache, we always return it as missing type mutationCache struct { lock sync.Mutex backingCache Store indexer Indexer mutationCache *utilcache.LRUExpireCache includeAdds bool ttl time.Duration comparator ResourceVersionComparator } // GetByKey is never guaranteed to return back the value set in Mutation. It could be paged out, it could // be older than another copy, the backingCache may be more recent or, you might have written twice into the same key. // You get a value that was valid at some snapshot of time and will always return the newer of backingCache and mutationCache. func (c *mutationCache) GetByKey(key string) (interface{}, bool, error) { c.lock.Lock() defer c.lock.Unlock() obj, exists, err := c.backingCache.GetByKey(key) if err != nil { return nil, false, err } if !exists { if !c.includeAdds { // we can't distinguish between, "didn't observe create" and "was deleted after create", so // if the key is missing, we always return it as missing return nil, false, nil } obj, exists = c.mutationCache.Get(key) if !exists { return nil, false, nil } } objRuntime, ok := obj.(runtime.Object) if !ok { return obj, true, nil } return c.newerObject(key, objRuntime), true, nil } // ByIndex returns the newer objects that match the provided index and indexer key. // Will return an error if no indexer was provided. func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, error) { c.lock.Lock() defer c.lock.Unlock() if c.indexer == nil { return nil, fmt.Errorf("no indexer has been provided to the mutation cache") } keys, err := c.indexer.IndexKeys(name, indexKey) if err != nil { return nil, err } var items []interface{} keySet := sets.NewString() for _, key := range keys { keySet.Insert(key) obj, exists, err := c.indexer.GetByKey(key) if err != nil { return nil, err } if !exists { continue } if objRuntime, ok := obj.(runtime.Object); ok { items = append(items, c.newerObject(key, objRuntime)) } else { items = append(items, obj) } } if c.includeAdds { fn := c.indexer.GetIndexers()[name] // Keys() is returned oldest to newest, so full traversal does not alter the LRU behavior for _, key := range c.mutationCache.Keys() { updated, ok := c.mutationCache.Get(key) if !ok { continue } if keySet.Has(key.(string)) { continue } elements, err := fn(updated) if err != nil { klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) continue } for _, inIndex := range elements { if inIndex != indexKey { continue } items = append(items, updated) break } } } return items, nil } // newerObject checks the mutation cache for a newer object and returns one if found. If the // mutated object is older than the backing object, it is removed from the Must be // called while the lock is held. func (c *mutationCache) newerObject(key string, backing runtime.Object) runtime.Object { mutatedObj, exists := c.mutationCache.Get(key) if !exists { return backing } mutatedObjRuntime, ok := mutatedObj.(runtime.Object) if !ok { return backing } if c.comparator.CompareResourceVersion(backing, mutatedObjRuntime) >= 0 { c.mutationCache.Remove(key) return backing } return mutatedObjRuntime } // Mutation adds a change to the cache that can be returned in GetByKey if it is newer than the backingCache // copy. If you call Mutation twice with the same object on different threads, one will win, but its not defined // which one. This doesn't affect correctness, since the GetByKey guaranteed of "later of these two caches" is // preserved, but you may not get the version of the object you want. The object you get is only guaranteed to // "one that was valid at some point in time", not "the one that I want". func (c *mutationCache) Mutation(obj interface{}) { c.lock.Lock() defer c.lock.Unlock() key, err := DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { // this is a "nice to have", so failures shouldn't do anything weird utilruntime.HandleError(err) return } if objRuntime, ok := obj.(runtime.Object); ok { if mutatedObj, exists := c.mutationCache.Get(key); exists { if mutatedObjRuntime, ok := mutatedObj.(runtime.Object); ok { if c.comparator.CompareResourceVersion(objRuntime, mutatedObjRuntime) < 0 { return } } } } c.mutationCache.Add(key, obj, c.ttl) } // etcdObjectVersioner implements versioning and extracting etcd node information // for objects that have an embedded ObjectMeta or ListMeta field. type etcdObjectVersioner struct{} // ObjectResourceVersion implements Versioner func (a etcdObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) { accessor, err := meta.Accessor(obj) if err != nil { return 0, err } version := accessor.GetResourceVersion() if len(version) == 0 { return 0, nil } return strconv.ParseUint(version, 10, 64) } // CompareResourceVersion compares etcd resource versions. Outside this API they are all strings, // but etcd resource versions are special, they're actually ints, so we can easily compare them. func (a etcdObjectVersioner) CompareResourceVersion(lhs, rhs runtime.Object) int { lhsVersion, err := a.ObjectResourceVersion(lhs) if err != nil { // coder error panic(err) } rhsVersion, err := a.ObjectResourceVersion(rhs) if err != nil { // coder error panic(err) } if lhsVersion == rhsVersion { return 0 } if lhsVersion < rhsVersion { return -1 } return 1 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache // FakeCustomStore lets you define custom functions for store operations. type FakeCustomStore struct { AddFunc func(obj interface{}) error UpdateFunc func(obj interface{}) error DeleteFunc func(obj interface{}) error ListFunc func() []interface{} ListKeysFunc func() []string GetFunc func(obj interface{}) (item interface{}, exists bool, err error) GetByKeyFunc func(key string) (item interface{}, exists bool, err error) ReplaceFunc func(list []interface{}, resourceVersion string) error ResyncFunc func() error } // Add calls the custom Add function if defined func (f *FakeCustomStore) Add(obj interface{}) error { if f.AddFunc != nil { return f.AddFunc(obj) } return nil } // Update calls the custom Update function if defined func (f *FakeCustomStore) Update(obj interface{}) error { if f.UpdateFunc != nil { return f.UpdateFunc(obj) } return nil } // Delete calls the custom Delete function if defined func (f *FakeCustomStore) Delete(obj interface{}) error { if f.DeleteFunc != nil { return f.DeleteFunc(obj) } return nil } // List calls the custom List function if defined func (f *FakeCustomStore) List() []interface{} { if f.ListFunc != nil { return f.ListFunc() } return nil } // ListKeys calls the custom ListKeys function if defined func (f *FakeCustomStore) ListKeys() []string { if f.ListKeysFunc != nil { return f.ListKeysFunc() } return nil } // Get calls the custom Get function if defined func (f *FakeCustomStore) Get(obj interface{}) (item interface{}, exists bool, err error) { if f.GetFunc != nil { return f.GetFunc(obj) } return nil, false, nil } // GetByKey calls the custom GetByKey function if defined func (f *FakeCustomStore) GetByKey(key string) (item interface{}, exists bool, err error) { if f.GetByKeyFunc != nil { return f.GetByKeyFunc(key) } return nil, false, nil } // Replace calls the custom Replace function if defined func (f *FakeCustomStore) Replace(list []interface{}, resourceVersion string) error { if f.ReplaceFunc != nil { return f.ReplaceFunc(list, resourceVersion) } return nil } // Resync calls the custom Resync function if defined func (f *FakeCustomStore) Resync() error { if f.ResyncFunc != nil { return f.ResyncFunc() } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/index.go
vendor/k8s.io/client-go/tools/cache/index.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "fmt" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/util/sets" ) // Indexer extends Store with multiple indices and restricts each // accumulator to simply hold the current object (and be empty after // Delete). // // There are three kinds of strings here: // 1. a storage key, as defined in the Store interface, // 2. a name of an index, and // 3. an "indexed value", which is produced by an IndexFunc and // can be a field value or any other string computed from the object. type Indexer interface { Store // Index returns the stored objects whose set of indexed values // intersects the set of indexed values of the given object, for // the named index Index(indexName string, obj interface{}) ([]interface{}, error) // IndexKeys returns the storage keys of the stored objects whose // set of indexed values for the named index includes the given // indexed value IndexKeys(indexName, indexedValue string) ([]string, error) // ListIndexFuncValues returns all the indexed values of the given index ListIndexFuncValues(indexName string) []string // ByIndex returns the stored objects whose set of indexed values // for the named index includes the given indexed value ByIndex(indexName, indexedValue string) ([]interface{}, error) // GetIndexers return the indexers GetIndexers() Indexers // AddIndexers adds more indexers to this store. This supports adding indexes after the store already has items. AddIndexers(newIndexers Indexers) error } // IndexFunc knows how to compute the set of indexed values for an object. type IndexFunc func(obj interface{}) ([]string, error) // IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns // unique values for every object. This conversion can create errors when more than one key is found. You // should prefer to make proper key and index functions. func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { return func(obj interface{}) (string, error) { indexKeys, err := indexFunc(obj) if err != nil { return "", err } if len(indexKeys) > 1 { return "", fmt.Errorf("too many keys: %v", indexKeys) } if len(indexKeys) == 0 { return "", fmt.Errorf("unexpected empty indexKeys") } return indexKeys[0], nil } } const ( // NamespaceIndex is the lookup name for the most common index function, which is to index by the namespace field. NamespaceIndex string = "namespace" ) // MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) { meta, err := meta.Accessor(obj) if err != nil { return []string{""}, fmt.Errorf("object has no meta: %v", err) } return []string{meta.GetNamespace()}, nil } // Index maps the indexed value to a set of keys in the store that match on that value type Index map[string]sets.String // Indexers maps a name to an IndexFunc type Indexers map[string]IndexFunc // Indices maps a name to an Index type Indices map[string]Index
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/shared_informer.go
vendor/k8s.io/client-go/tools/cache/shared_informer.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "errors" "fmt" "sync" "time" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache/synctrack" "k8s.io/utils/buffer" "k8s.io/utils/clock" "k8s.io/klog/v2" clientgofeaturegate "k8s.io/client-go/features" ) // SharedInformer provides eventually consistent linkage of its // clients to the authoritative state of a given collection of // objects. An object is identified by its API group, kind/resource, // namespace (if any), and name; the `ObjectMeta.UID` is not part of // an object's ID as far as this contract is concerned. One // SharedInformer provides linkage to objects of a particular API // group and kind/resource. The linked object collection of a // SharedInformer may be further restricted to one namespace (if // applicable) and/or by label selector and/or field selector. // // The authoritative state of an object is what apiservers provide // access to, and an object goes through a strict sequence of states. // An object state is either (1) present with a ResourceVersion and // other appropriate content or (2) "absent". // // A SharedInformer maintains a local cache --- exposed by GetStore(), // by GetIndexer() in the case of an indexed informer, and possibly by // machinery involved in creating and/or accessing the informer --- of // the state of each relevant object. This cache is eventually // consistent with the authoritative state. This means that, unless // prevented by persistent communication problems, if ever a // particular object ID X is authoritatively associated with a state S // then for every SharedInformer I whose collection includes (X, S) // eventually either (1) I's cache associates X with S or a later // state of X, (2) I is stopped, or (3) the authoritative state // service for X terminates. To be formally complete, we say that the // absent state meets any restriction by label selector or field // selector. // // For a given informer and relevant object ID X, the sequence of // states that appears in the informer's cache is a subsequence of the // states authoritatively associated with X. That is, some states // might never appear in the cache but ordering among the appearing // states is correct. Note, however, that there is no promise about // ordering between states seen for different objects. // // The local cache starts out empty, and gets populated and updated // during `Run()`. // // As a simple example, if a collection of objects is henceforth // unchanging, a SharedInformer is created that links to that // collection, and that SharedInformer is `Run()` then that // SharedInformer's cache eventually holds an exact copy of that // collection (unless it is stopped too soon, the authoritative state // service ends, or communication problems between the two // persistently thwart achievement). // // As another simple example, if the local cache ever holds a // non-absent state for some object ID and the object is eventually // removed from the authoritative state then eventually the object is // removed from the local cache (unless the SharedInformer is stopped // too soon, the authoritative state service ends, or communication // problems persistently thwart the desired result). // // The keys in the Store are of the form namespace/name for namespaced // objects, and are simply the name for non-namespaced objects. // Clients can use `MetaNamespaceKeyFunc(obj)` to extract the key for // a given object, and `SplitMetaNamespaceKey(key)` to split a key // into its constituent parts. // // Every query against the local cache is answered entirely from one // snapshot of the cache's state. Thus, the result of a `List` call // will not contain two entries with the same namespace and name. // // A client is identified here by a ResourceEventHandler. For every // update to the SharedInformer's local cache and for every client // added before `Run()`, eventually either the SharedInformer is // stopped or the client is notified of the update. A client added // after `Run()` starts gets a startup batch of notifications of // additions of the objects existing in the cache at the time that // client was added; also, for every update to the SharedInformer's // local cache after that client was added, eventually either the // SharedInformer is stopped or that client is notified of that // update. Client notifications happen after the corresponding cache // update and, in the case of a SharedIndexInformer, after the // corresponding index updates. It is possible that additional cache // and index updates happen before such a prescribed notification. // For a given SharedInformer and client, the notifications are // delivered sequentially. For a given SharedInformer, client, and // object ID, the notifications are delivered in order. Because // `ObjectMeta.UID` has no role in identifying objects, it is possible // that when (1) object O1 with ID (e.g. namespace and name) X and // `ObjectMeta.UID` U1 in the SharedInformer's local cache is deleted // and later (2) another object O2 with ID X and ObjectMeta.UID U2 is // created the informer's clients are not notified of (1) and (2) but // rather are notified only of an update from O1 to O2. Clients that // need to detect such cases might do so by comparing the `ObjectMeta.UID` // field of the old and the new object in the code that handles update // notifications (i.e. `OnUpdate` method of ResourceEventHandler). // // A client must process each notification promptly; a SharedInformer // is not engineered to deal well with a large backlog of // notifications to deliver. Lengthy processing should be passed off // to something else, for example through a // `client-go/util/workqueue`. // // A delete notification exposes the last locally known non-absent // state, except that its ResourceVersion is replaced with a // ResourceVersion in which the object is actually absent. type SharedInformer interface { // AddEventHandler adds an event handler to the shared informer using // the shared informer's resync period. Events to a single handler are // delivered sequentially, but there is no coordination between // different handlers. // It returns a registration handle for the handler that can be used to // remove the handler again, or to tell if the handler is synced (has // seen every item in the initial list). AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) // AddEventHandlerWithResyncPeriod adds an event handler to the // shared informer with the requested resync period; zero means // this handler does not care about resyncs. The resync operation // consists of delivering to the handler an update notification // for every object in the informer's local cache; it does not add // any interactions with the authoritative storage. Some // informers do no resyncs at all, not even for handlers added // with a non-zero resyncPeriod. For an informer that does // resyncs, and for each handler that requests resyncs, that // informer develops a nominal resync period that is no shorter // than the requested period but may be longer. The actual time // between any two resyncs may be longer than the nominal period // because the implementation takes time to do work and there may // be competing load and scheduling noise. // It returns a registration handle for the handler that can be used to remove // the handler again and an error if the handler cannot be added. AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error) // RemoveEventHandler removes a formerly added event handler given by // its registration handle. // This function is guaranteed to be idempotent, and thread-safe. RemoveEventHandler(handle ResourceEventHandlerRegistration) error // GetStore returns the informer's local cache as a Store. GetStore() Store // GetController is deprecated, it does nothing useful GetController() Controller // Run starts and runs the shared informer, returning after it stops. // The informer will be stopped when stopCh is closed. Run(stopCh <-chan struct{}) // HasSynced returns true if the shared informer's store has been // informed by at least one full LIST of the authoritative state // of the informer's object collection. This is unrelated to "resync". // // Note that this doesn't tell you if an individual handler is synced!! // For that, please call HasSynced on the handle returned by // AddEventHandler. HasSynced() bool // LastSyncResourceVersion is the resource version observed when last synced with the underlying // store. The value returned is not synchronized with access to the underlying store and is not // thread-safe. LastSyncResourceVersion() string // The WatchErrorHandler is called whenever ListAndWatch drops the // connection with an error. After calling this handler, the informer // will backoff and retry. // // The default implementation looks at the error type and tries to log // the error message at an appropriate level. // // There's only one handler, so if you call this multiple times, last one // wins; calling after the informer has been started returns an error. // // The handler is intended for visibility, not to e.g. pause the consumers. // The handler should return quickly - any expensive processing should be // offloaded. SetWatchErrorHandler(handler WatchErrorHandler) error // The TransformFunc is called for each object which is about to be stored. // // This function is intended for you to take the opportunity to // remove, transform, or normalize fields. One use case is to strip unused // metadata fields out of objects to save on RAM cost. // // Must be set before starting the informer. // // Please see the comment on TransformFunc for more details. SetTransform(handler TransformFunc) error // IsStopped reports whether the informer has already been stopped. // Adding event handlers to already stopped informers is not possible. // An informer already stopped will never be started again. IsStopped() bool } // Opaque interface representing the registration of ResourceEventHandler for // a SharedInformer. Must be supplied back to the same SharedInformer's // `RemoveEventHandler` to unregister the handlers. // // Also used to tell if the handler is synced (has had all items in the initial // list delivered). type ResourceEventHandlerRegistration interface { // HasSynced reports if both the parent has synced and all pre-sync // events have been delivered. HasSynced() bool } // SharedIndexInformer provides add and get Indexers ability based on SharedInformer. type SharedIndexInformer interface { SharedInformer // AddIndexers add indexers to the informer before it starts. AddIndexers(indexers Indexers) error GetIndexer() Indexer } // NewSharedInformer creates a new instance for the ListerWatcher. See NewSharedIndexInformerWithOptions for full details. func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer { return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{}) } // NewSharedIndexInformer creates a new instance for the ListerWatcher and specified Indexers. See // NewSharedIndexInformerWithOptions for full details. func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { return NewSharedIndexInformerWithOptions( lw, exampleObject, SharedIndexInformerOptions{ ResyncPeriod: defaultEventHandlerResyncPeriod, Indexers: indexers, }, ) } // NewSharedIndexInformerWithOptions creates a new instance for the ListerWatcher. // The created informer will not do resyncs if options.ResyncPeriod is zero. Otherwise: for each // handler that with a non-zero requested resync period, whether added // before or after the informer starts, the nominal resync period is // the requested resync period rounded up to a multiple of the // informer's resync checking period. Such an informer's resync // checking period is established when the informer starts running, // and is the maximum of (a) the minimum of the resync periods // requested before the informer starts and the // options.ResyncPeriod given here and (b) the constant // `minimumResyncPeriod` defined in this file. func NewSharedIndexInformerWithOptions(lw ListerWatcher, exampleObject runtime.Object, options SharedIndexInformerOptions) SharedIndexInformer { realClock := &clock.RealClock{} return &sharedIndexInformer{ indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers), processor: &sharedProcessor{clock: realClock}, listerWatcher: lw, objectType: exampleObject, objectDescription: options.ObjectDescription, resyncCheckPeriod: options.ResyncPeriod, defaultEventHandlerResyncPeriod: options.ResyncPeriod, clock: realClock, cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), } } // SharedIndexInformerOptions configures a sharedIndexInformer. type SharedIndexInformerOptions struct { // ResyncPeriod is the default event handler resync period and resync check // period. If unset/unspecified, these are defaulted to 0 (do not resync). ResyncPeriod time.Duration // Indexers is the sharedIndexInformer's indexers. If unset/unspecified, no indexers are configured. Indexers Indexers // ObjectDescription is the sharedIndexInformer's object description. This is passed through to the // underlying Reflector's type description. ObjectDescription string } // InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. type InformerSynced func() bool const ( // syncedPollPeriod controls how often you look at the status of your sync funcs syncedPollPeriod = 100 * time.Millisecond // initialBufferSize is the initial number of event notifications that can be buffered. initialBufferSize = 1024 ) // WaitForNamedCacheSync is a wrapper around WaitForCacheSync that generates log messages // indicating that the caller identified by name is waiting for syncs, followed by // either a successful or failed sync. func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { klog.Infof("Waiting for caches to sync for %s", controllerName) if !WaitForCacheSync(stopCh, cacheSyncs...) { utilruntime.HandleError(fmt.Errorf("unable to sync caches for %s", controllerName)) return false } klog.Infof("Caches are synced for %s", controllerName) return true } // WaitForCacheSync waits for caches to populate. It returns true if it was successful, false // if the controller should shutdown // callers should prefer WaitForNamedCacheSync() func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { err := wait.PollImmediateUntil(syncedPollPeriod, func() (bool, error) { for _, syncFunc := range cacheSyncs { if !syncFunc() { return false, nil } } return true, nil }, stopCh) if err != nil { return false } return true } // `*sharedIndexInformer` implements SharedIndexInformer and has three // main components. One is an indexed local cache, `indexer Indexer`. // The second main component is a Controller that pulls // objects/notifications using the ListerWatcher and pushes them into // a DeltaFIFO --- whose knownObjects is the informer's local cache // --- while concurrently Popping Deltas values from that fifo and // processing them with `sharedIndexInformer::HandleDeltas`. Each // invocation of HandleDeltas, which is done with the fifo's lock // held, processes each Delta in turn. For each Delta this both // updates the local cache and stuffs the relevant notification into // the sharedProcessor. The third main component is that // sharedProcessor, which is responsible for relaying those // notifications to each of the informer's clients. type sharedIndexInformer struct { indexer Indexer controller Controller processor *sharedProcessor cacheMutationDetector MutationDetector listerWatcher ListerWatcher // objectType is an example object of the type this informer is expected to handle. If set, an event // with an object with a mismatching type is dropped instead of being delivered to listeners. objectType runtime.Object // objectDescription is the description of this informer's objects. This typically defaults to objectDescription string // resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call // shouldResync to check if any of our listeners need a resync. resyncCheckPeriod time.Duration // defaultEventHandlerResyncPeriod is the default resync period for any handlers added via // AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default // value). defaultEventHandlerResyncPeriod time.Duration // clock allows for testability clock clock.Clock started, stopped bool startedLock sync.Mutex // blockDeltas gives a way to stop all event distribution so that a late event handler // can safely join the shared informer. blockDeltas sync.Mutex // Called whenever the ListAndWatch drops the connection with an error. watchErrorHandler WatchErrorHandler transform TransformFunc } // dummyController hides the fact that a SharedInformer is different from a dedicated one // where a caller can `Run`. The run method is disconnected in this case, because higher // level logic will decide when to start the SharedInformer and related controller. // Because returning information back is always asynchronous, the legacy callers shouldn't // notice any change in behavior. type dummyController struct { informer *sharedIndexInformer } func (v *dummyController) Run(stopCh <-chan struct{}) { } func (v *dummyController) HasSynced() bool { return v.informer.HasSynced() } func (v *dummyController) LastSyncResourceVersion() string { if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InformerResourceVersion) { return v.informer.LastSyncResourceVersion() } return "" } type updateNotification struct { oldObj interface{} newObj interface{} } type addNotification struct { newObj interface{} isInInitialList bool } type deleteNotification struct { oldObj interface{} } func (s *sharedIndexInformer) SetWatchErrorHandler(handler WatchErrorHandler) error { s.startedLock.Lock() defer s.startedLock.Unlock() if s.started { return fmt.Errorf("informer has already started") } s.watchErrorHandler = handler return nil } func (s *sharedIndexInformer) SetTransform(handler TransformFunc) error { s.startedLock.Lock() defer s.startedLock.Unlock() if s.started { return fmt.Errorf("informer has already started") } s.transform = handler return nil } func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() if s.HasStarted() { klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed") return } func() { s.startedLock.Lock() defer s.startedLock.Unlock() fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: s.indexer, EmitDeltaTypeReplaced: true, Transformer: s.transform, }) cfg := &Config{ Queue: fifo, ListerWatcher: s.listerWatcher, ObjectType: s.objectType, ObjectDescription: s.objectDescription, FullResyncPeriod: s.resyncCheckPeriod, RetryOnError: false, ShouldResync: s.processor.shouldResync, Process: s.HandleDeltas, WatchErrorHandler: s.watchErrorHandler, } s.controller = New(cfg) s.controller.(*controller).clock = s.clock s.started = true }() // Separate stop channel because Processor should be stopped strictly after controller processorStopCh := make(chan struct{}) var wg wait.Group defer wg.Wait() // Wait for Processor to stop defer close(processorStopCh) // Tell Processor to stop wg.StartWithChannel(processorStopCh, s.cacheMutationDetector.Run) wg.StartWithChannel(processorStopCh, s.processor.run) defer func() { s.startedLock.Lock() defer s.startedLock.Unlock() s.stopped = true // Don't want any new listeners }() s.controller.Run(stopCh) } func (s *sharedIndexInformer) HasStarted() bool { s.startedLock.Lock() defer s.startedLock.Unlock() return s.started } func (s *sharedIndexInformer) HasSynced() bool { s.startedLock.Lock() defer s.startedLock.Unlock() if s.controller == nil { return false } return s.controller.HasSynced() } func (s *sharedIndexInformer) LastSyncResourceVersion() string { s.startedLock.Lock() defer s.startedLock.Unlock() if s.controller == nil { return "" } return s.controller.LastSyncResourceVersion() } func (s *sharedIndexInformer) GetStore() Store { return s.indexer } func (s *sharedIndexInformer) GetIndexer() Indexer { return s.indexer } func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error { s.startedLock.Lock() defer s.startedLock.Unlock() if s.stopped { return fmt.Errorf("indexer was not added because it has stopped already") } return s.indexer.AddIndexers(indexers) } func (s *sharedIndexInformer) GetController() Controller { return &dummyController{informer: s} } func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) { return s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod) } func determineResyncPeriod(desired, check time.Duration) time.Duration { if desired == 0 { return desired } if check == 0 { klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) return 0 } if desired < check { klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) return check } return desired } const minimumResyncPeriod = 1 * time.Second func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error) { s.startedLock.Lock() defer s.startedLock.Unlock() if s.stopped { return nil, fmt.Errorf("handler %v was not added to shared informer because it has stopped already", handler) } if resyncPeriod > 0 { if resyncPeriod < minimumResyncPeriod { klog.Warningf("resyncPeriod %v is too small. Changing it to the minimum allowed value of %v", resyncPeriod, minimumResyncPeriod) resyncPeriod = minimumResyncPeriod } if resyncPeriod < s.resyncCheckPeriod { if s.started { klog.Warningf("resyncPeriod %v is smaller than resyncCheckPeriod %v and the informer has already started. Changing it to %v", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) resyncPeriod = s.resyncCheckPeriod } else { // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update // resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners // accordingly s.resyncCheckPeriod = resyncPeriod s.processor.resyncCheckPeriodChanged(resyncPeriod) } } } listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize, s.HasSynced) if !s.started { return s.processor.addListener(listener), nil } // in order to safely join, we have to // 1. stop sending add/update/delete notifications // 2. do a list against the store // 3. send synthetic "Add" events to the new handler // 4. unblock s.blockDeltas.Lock() defer s.blockDeltas.Unlock() handle := s.processor.addListener(listener) for _, item := range s.indexer.List() { // Note that we enqueue these notifications with the lock held // and before returning the handle. That means there is never a // chance for anyone to call the handle's HasSynced method in a // state when it would falsely return true (i.e., when the // shared informer is synced but it has not observed an Add // with isInitialList being true, nor when the thread // processing notifications somehow goes faster than this // thread adding them and the counter is temporarily zero). listener.add(addNotification{newObj: item, isInInitialList: true}) } return handle, nil } func (s *sharedIndexInformer) HandleDeltas(obj interface{}, isInInitialList bool) error { s.blockDeltas.Lock() defer s.blockDeltas.Unlock() if deltas, ok := obj.(Deltas); ok { return processDeltas(s, s.indexer, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") } // Conforms to ResourceEventHandler func (s *sharedIndexInformer) OnAdd(obj interface{}, isInInitialList bool) { // Invocation of this function is locked under s.blockDeltas, so it is // save to distribute the notification s.cacheMutationDetector.AddObject(obj) s.processor.distribute(addNotification{newObj: obj, isInInitialList: isInInitialList}, false) } // Conforms to ResourceEventHandler func (s *sharedIndexInformer) OnUpdate(old, new interface{}) { isSync := false // If is a Sync event, isSync should be true // If is a Replaced event, isSync is true if resource version is unchanged. // If RV is unchanged: this is a Sync/Replaced event, so isSync is true if accessor, err := meta.Accessor(new); err == nil { if oldAccessor, err := meta.Accessor(old); err == nil { // Events that didn't change resourceVersion are treated as resync events // and only propagated to listeners that requested resync isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion() } } // Invocation of this function is locked under s.blockDeltas, so it is // save to distribute the notification s.cacheMutationDetector.AddObject(new) s.processor.distribute(updateNotification{oldObj: old, newObj: new}, isSync) } // Conforms to ResourceEventHandler func (s *sharedIndexInformer) OnDelete(old interface{}) { // Invocation of this function is locked under s.blockDeltas, so it is // save to distribute the notification s.processor.distribute(deleteNotification{oldObj: old}, false) } // IsStopped reports whether the informer has already been stopped func (s *sharedIndexInformer) IsStopped() bool { s.startedLock.Lock() defer s.startedLock.Unlock() return s.stopped } func (s *sharedIndexInformer) RemoveEventHandler(handle ResourceEventHandlerRegistration) error { s.startedLock.Lock() defer s.startedLock.Unlock() // in order to safely remove, we have to // 1. stop sending add/update/delete notifications // 2. remove and stop listener // 3. unblock s.blockDeltas.Lock() defer s.blockDeltas.Unlock() return s.processor.removeListener(handle) } // sharedProcessor has a collection of processorListener and can // distribute a notification object to its listeners. There are two // kinds of distribute operations. The sync distributions go to a // subset of the listeners that (a) is recomputed in the occasional // calls to shouldResync and (b) every listener is initially put in. // The non-sync distributions go to every listener. type sharedProcessor struct { listenersStarted bool listenersLock sync.RWMutex // Map from listeners to whether or not they are currently syncing listeners map[*processorListener]bool clock clock.Clock wg wait.Group } func (p *sharedProcessor) getListener(registration ResourceEventHandlerRegistration) *processorListener { p.listenersLock.RLock() defer p.listenersLock.RUnlock() if p.listeners == nil { return nil } if result, ok := registration.(*processorListener); ok { if _, exists := p.listeners[result]; exists { return result } } return nil } func (p *sharedProcessor) addListener(listener *processorListener) ResourceEventHandlerRegistration { p.listenersLock.Lock() defer p.listenersLock.Unlock() if p.listeners == nil { p.listeners = make(map[*processorListener]bool) } p.listeners[listener] = true if p.listenersStarted { p.wg.Start(listener.run) p.wg.Start(listener.pop) } return listener } func (p *sharedProcessor) removeListener(handle ResourceEventHandlerRegistration) error { p.listenersLock.Lock() defer p.listenersLock.Unlock() listener, ok := handle.(*processorListener) if !ok { return fmt.Errorf("invalid key type %t", handle) } else if p.listeners == nil { // No listeners are registered, do nothing return nil } else if _, exists := p.listeners[listener]; !exists { // Listener is not registered, just do nothing return nil } delete(p.listeners, listener) if p.listenersStarted { close(listener.addCh) } return nil } func (p *sharedProcessor) distribute(obj interface{}, sync bool) { p.listenersLock.RLock() defer p.listenersLock.RUnlock() for listener, isSyncing := range p.listeners { switch { case !sync: // non-sync messages are delivered to every listener listener.add(obj) case isSyncing: // sync messages are delivered to every syncing listener listener.add(obj) default: // skipping a sync obj for a non-syncing listener } } } func (p *sharedProcessor) run(stopCh <-chan struct{}) { func() { p.listenersLock.RLock() defer p.listenersLock.RUnlock() for listener := range p.listeners { p.wg.Start(listener.run) p.wg.Start(listener.pop) } p.listenersStarted = true }() <-stopCh p.listenersLock.Lock() defer p.listenersLock.Unlock() for listener := range p.listeners { close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop } // Wipe out list of listeners since they are now closed // (processorListener cannot be re-used) p.listeners = nil // Reset to false since no listeners are running p.listenersStarted = false p.wg.Wait() // Wait for all .pop() and .run() to stop } // shouldResync queries every listener to determine if any of them need a resync, based on each // listener's resyncPeriod. func (p *sharedProcessor) shouldResync() bool { p.listenersLock.Lock() defer p.listenersLock.Unlock() resyncNeeded := false now := p.clock.Now() for listener := range p.listeners { // need to loop through all the listeners to see if they need to resync so we can prepare any // listeners that are going to be resyncing. shouldResync := listener.shouldResync(now) p.listeners[listener] = shouldResync if shouldResync { resyncNeeded = true listener.determineNextResync(now) } } return resyncNeeded } func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Duration) { p.listenersLock.RLock() defer p.listenersLock.RUnlock() for listener := range p.listeners { resyncPeriod := determineResyncPeriod( listener.requestedResyncPeriod, resyncCheckPeriod) listener.setResyncPeriod(resyncPeriod) } } // processorListener relays notifications from a sharedProcessor to // one ResourceEventHandler --- using two goroutines, two unbuffered // channels, and an unbounded ring buffer. The `add(notification)` // function sends the given notification to `addCh`. One goroutine // runs `pop()`, which pumps notifications from `addCh` to `nextCh` // using storage in the ring buffer while `nextCh` is not keeping up. // Another goroutine runs `run()`, which receives notifications from // `nextCh` and synchronously invokes the appropriate handler method. // // processorListener also keeps track of the adjusted requested resync // period of the listener. type processorListener struct { nextCh chan interface{} addCh chan interface{} handler ResourceEventHandler syncTracker *synctrack.SingleFileTracker // pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed.
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/heap.go
vendor/k8s.io/client-go/tools/cache/heap.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file implements a heap data structure. package cache import ( "container/heap" "fmt" "sync" ) const ( closedMsg = "heap is closed" ) // LessFunc is used to compare two objects in the heap. type LessFunc func(interface{}, interface{}) bool type heapItem struct { obj interface{} // The object which is stored in the heap. index int // The index of the object's key in the Heap.queue. } type itemKeyValue struct { key string obj interface{} } // heapData is an internal struct that implements the standard heap interface // and keeps the data stored in the heap. type heapData struct { // items is a map from key of the objects to the objects and their index. // We depend on the property that items in the map are in the queue and vice versa. items map[string]*heapItem // queue implements a heap data structure and keeps the order of elements // according to the heap invariant. The queue keeps the keys of objects stored // in "items". queue []string // keyFunc is used to make the key used for queued item insertion and retrieval, and // should be deterministic. keyFunc KeyFunc // lessFunc is used to compare two objects in the heap. lessFunc LessFunc } var ( _ = heap.Interface(&heapData{}) // heapData is a standard heap ) // Less compares two objects and returns true if the first one should go // in front of the second one in the heap. func (h *heapData) Less(i, j int) bool { if i > len(h.queue) || j > len(h.queue) { return false } itemi, ok := h.items[h.queue[i]] if !ok { return false } itemj, ok := h.items[h.queue[j]] if !ok { return false } return h.lessFunc(itemi.obj, itemj.obj) } // Len returns the number of items in the Heap. func (h *heapData) Len() int { return len(h.queue) } // Swap implements swapping of two elements in the heap. This is a part of standard // heap interface and should never be called directly. func (h *heapData) Swap(i, j int) { h.queue[i], h.queue[j] = h.queue[j], h.queue[i] item := h.items[h.queue[i]] item.index = i item = h.items[h.queue[j]] item.index = j } // Push is supposed to be called by heap.Push only. func (h *heapData) Push(kv interface{}) { keyValue := kv.(*itemKeyValue) n := len(h.queue) h.items[keyValue.key] = &heapItem{keyValue.obj, n} h.queue = append(h.queue, keyValue.key) } // Pop is supposed to be called by heap.Pop only. func (h *heapData) Pop() interface{} { key := h.queue[len(h.queue)-1] h.queue = h.queue[0 : len(h.queue)-1] item, ok := h.items[key] if !ok { // This is an error return nil } delete(h.items, key) return item.obj } // Heap is a thread-safe producer/consumer queue that implements a heap data structure. // It can be used to implement priority queues and similar data structures. type Heap struct { lock sync.RWMutex cond sync.Cond // data stores objects and has a queue that keeps their ordering according // to the heap invariant. data *heapData // closed indicates that the queue is closed. // It is mainly used to let Pop() exit its control loop while waiting for an item. closed bool } // Close the Heap and signals condition variables that may be waiting to pop // items from the heap. func (h *Heap) Close() { h.lock.Lock() defer h.lock.Unlock() h.closed = true h.cond.Broadcast() } // Add inserts an item, and puts it in the queue. The item is updated if it // already exists. func (h *Heap) Add(obj interface{}) error { key, err := h.data.keyFunc(obj) if err != nil { return KeyError{obj, err} } h.lock.Lock() defer h.lock.Unlock() if h.closed { return fmt.Errorf(closedMsg) } if _, exists := h.data.items[key]; exists { h.data.items[key].obj = obj heap.Fix(h.data, h.data.items[key].index) } else { h.addIfNotPresentLocked(key, obj) } h.cond.Broadcast() return nil } // BulkAdd adds all the items in the list to the queue and then signals the condition // variable. It is useful when the caller would like to add all of the items // to the queue before consumer starts processing them. func (h *Heap) BulkAdd(list []interface{}) error { h.lock.Lock() defer h.lock.Unlock() if h.closed { return fmt.Errorf(closedMsg) } for _, obj := range list { key, err := h.data.keyFunc(obj) if err != nil { return KeyError{obj, err} } if _, exists := h.data.items[key]; exists { h.data.items[key].obj = obj heap.Fix(h.data, h.data.items[key].index) } else { h.addIfNotPresentLocked(key, obj) } } h.cond.Broadcast() return nil } // AddIfNotPresent inserts an item, and puts it in the queue. If an item with // the key is present in the map, no changes is made to the item. // // This is useful in a single producer/consumer scenario so that the consumer can // safely retry items without contending with the producer and potentially enqueueing // stale items. func (h *Heap) AddIfNotPresent(obj interface{}) error { id, err := h.data.keyFunc(obj) if err != nil { return KeyError{obj, err} } h.lock.Lock() defer h.lock.Unlock() if h.closed { return fmt.Errorf(closedMsg) } h.addIfNotPresentLocked(id, obj) h.cond.Broadcast() return nil } // addIfNotPresentLocked assumes the lock is already held and adds the provided // item to the queue if it does not already exist. func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) { if _, exists := h.data.items[key]; exists { return } heap.Push(h.data, &itemKeyValue{key, obj}) } // Update is the same as Add in this implementation. When the item does not // exist, it is added. func (h *Heap) Update(obj interface{}) error { return h.Add(obj) } // Delete removes an item. func (h *Heap) Delete(obj interface{}) error { key, err := h.data.keyFunc(obj) if err != nil { return KeyError{obj, err} } h.lock.Lock() defer h.lock.Unlock() if item, ok := h.data.items[key]; ok { heap.Remove(h.data, item.index) return nil } return fmt.Errorf("object not found") } // Pop waits until an item is ready. If multiple items are // ready, they are returned in the order given by Heap.data.lessFunc. func (h *Heap) Pop() (interface{}, error) { h.lock.Lock() defer h.lock.Unlock() for len(h.data.queue) == 0 { // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. // When Close() is called, the h.closed is set and the condition is broadcast, // which causes this loop to continue and return from the Pop(). if h.closed { return nil, fmt.Errorf("heap is closed") } h.cond.Wait() } obj := heap.Pop(h.data) if obj == nil { return nil, fmt.Errorf("object was removed from heap data") } return obj, nil } // List returns a list of all the items. func (h *Heap) List() []interface{} { h.lock.RLock() defer h.lock.RUnlock() list := make([]interface{}, 0, len(h.data.items)) for _, item := range h.data.items { list = append(list, item.obj) } return list } // ListKeys returns a list of all the keys of the objects currently in the Heap. func (h *Heap) ListKeys() []string { h.lock.RLock() defer h.lock.RUnlock() list := make([]string, 0, len(h.data.items)) for key := range h.data.items { list = append(list, key) } return list } // Get returns the requested item, or sets exists=false. func (h *Heap) Get(obj interface{}) (interface{}, bool, error) { key, err := h.data.keyFunc(obj) if err != nil { return nil, false, KeyError{obj, err} } return h.GetByKey(key) } // GetByKey returns the requested item, or sets exists=false. func (h *Heap) GetByKey(key string) (interface{}, bool, error) { h.lock.RLock() defer h.lock.RUnlock() item, exists := h.data.items[key] if !exists { return nil, false, nil } return item.obj, true, nil } // IsClosed returns true if the queue is closed. func (h *Heap) IsClosed() bool { h.lock.RLock() defer h.lock.RUnlock() return h.closed } // NewHeap returns a Heap which can be used to queue up items to process. func NewHeap(keyFn KeyFunc, lessFn LessFunc) *Heap { h := &Heap{ data: &heapData{ items: map[string]*heapItem{}, queue: []string{}, keyFunc: keyFn, lessFunc: lessFn, }, } h.cond.L = &h.lock return h }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/fifo.go
vendor/k8s.io/client-go/tools/cache/fifo.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "errors" "sync" "k8s.io/apimachinery/pkg/util/sets" ) // PopProcessFunc is passed to Pop() method of Queue interface. // It is supposed to process the accumulator popped from the queue. type PopProcessFunc func(obj interface{}, isInInitialList bool) error // ErrRequeue may be returned by a PopProcessFunc to safely requeue // the current item. The value of Err will be returned from Pop. type ErrRequeue struct { // Err is returned by the Pop function Err error } // ErrFIFOClosed used when FIFO is closed var ErrFIFOClosed = errors.New("DeltaFIFO: manipulating with closed queue") func (e ErrRequeue) Error() string { if e.Err == nil { return "the popped item should be requeued without returning an error" } return e.Err.Error() } // Queue extends Store with a collection of Store keys to "process". // Every Add, Update, or Delete may put the object's key in that collection. // A Queue has a way to derive the corresponding key given an accumulator. // A Queue can be accessed concurrently from multiple goroutines. // A Queue can be "closed", after which Pop operations return an error. type Queue interface { Store // Pop blocks until there is at least one key to process or the // Queue is closed. In the latter case Pop returns with an error. // In the former case Pop atomically picks one key to process, // removes that (key, accumulator) association from the Store, and // processes the accumulator. Pop returns the accumulator that // was processed and the result of processing. The PopProcessFunc // may return an ErrRequeue{inner} and in this case Pop will (a) // return that (key, accumulator) association to the Queue as part // of the atomic processing and (b) return the inner error from // Pop. Pop(PopProcessFunc) (interface{}, error) // AddIfNotPresent puts the given accumulator into the Queue (in // association with the accumulator's key) if and only if that key // is not already associated with a non-empty accumulator. AddIfNotPresent(interface{}) error // HasSynced returns true if the first batch of keys have all been // popped. The first batch of keys are those of the first Replace // operation if that happened before any Add, AddIfNotPresent, // Update, or Delete; otherwise the first batch is empty. HasSynced() bool // Close the queue Close() } // Pop is helper function for popping from Queue. // WARNING: Do NOT use this function in non-test code to avoid races // unless you really really really really know what you are doing. // // NOTE: This function is deprecated and may be removed in the future without // additional warning. func Pop(queue Queue) interface{} { var result interface{} queue.Pop(func(obj interface{}, isInInitialList bool) error { result = obj return nil }) return result } // FIFO is a Queue in which (a) each accumulator is simply the most // recently provided object and (b) the collection of keys to process // is a FIFO. The accumulators all start out empty, and deleting an // object from its accumulator empties the accumulator. The Resync // operation is a no-op. // // Thus: if multiple adds/updates of a single object happen while that // object's key is in the queue before it has been processed then it // will only be processed once, and when it is processed the most // recent version will be processed. This can't be done with a channel // // FIFO solves this use case: // - You want to process every object (exactly) once. // - You want to process the most recent version of the object when you process it. // - You do not want to process deleted objects, they should be removed from the queue. // - You do not want to periodically reprocess objects. // // Compare with DeltaFIFO for other use cases. type FIFO struct { lock sync.RWMutex cond sync.Cond // We depend on the property that every key in `items` is also in `queue` items map[string]interface{} queue []string // populated is true if the first batch of items inserted by Replace() has been populated // or Delete/Add/Update was called first. populated bool // initialPopulationCount is the number of items inserted by the first call of Replace() initialPopulationCount int // keyFunc is used to make the key used for queued item insertion and retrieval, and // should be deterministic. keyFunc KeyFunc // Indication the queue is closed. // Used to indicate a queue is closed so a control loop can exit when a queue is empty. // Currently, not used to gate any of CRUD operations. closed bool } var ( _ = Queue(&FIFO{}) // FIFO is a Queue ) // Close the queue. func (f *FIFO) Close() { f.lock.Lock() defer f.lock.Unlock() f.closed = true f.cond.Broadcast() } // HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, // or the first batch of items inserted by Replace() has been popped. func (f *FIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() return f.hasSynced_locked() } func (f *FIFO) hasSynced_locked() bool { return f.populated && f.initialPopulationCount == 0 } // Add inserts an item, and puts it in the queue. The item is only enqueued // if it doesn't already exist in the set. func (f *FIFO) Add(obj interface{}) error { id, err := f.keyFunc(obj) if err != nil { return KeyError{obj, err} } f.lock.Lock() defer f.lock.Unlock() f.populated = true if _, exists := f.items[id]; !exists { f.queue = append(f.queue, id) } f.items[id] = obj f.cond.Broadcast() return nil } // AddIfNotPresent inserts an item, and puts it in the queue. If the item is already // present in the set, it is neither enqueued nor added to the set. // // This is useful in a single producer/consumer scenario so that the consumer can // safely retry items without contending with the producer and potentially enqueueing // stale items. func (f *FIFO) AddIfNotPresent(obj interface{}) error { id, err := f.keyFunc(obj) if err != nil { return KeyError{obj, err} } f.lock.Lock() defer f.lock.Unlock() f.addIfNotPresent(id, obj) return nil } // addIfNotPresent assumes the fifo lock is already held and adds the provided // item to the queue under id if it does not already exist. func (f *FIFO) addIfNotPresent(id string, obj interface{}) { f.populated = true if _, exists := f.items[id]; exists { return } f.queue = append(f.queue, id) f.items[id] = obj f.cond.Broadcast() } // Update is the same as Add in this implementation. func (f *FIFO) Update(obj interface{}) error { return f.Add(obj) } // Delete removes an item. It doesn't add it to the queue, because // this implementation assumes the consumer only cares about the objects, // not the order in which they were created/added. func (f *FIFO) Delete(obj interface{}) error { id, err := f.keyFunc(obj) if err != nil { return KeyError{obj, err} } f.lock.Lock() defer f.lock.Unlock() f.populated = true delete(f.items, id) return err } // List returns a list of all the items. func (f *FIFO) List() []interface{} { f.lock.RLock() defer f.lock.RUnlock() list := make([]interface{}, 0, len(f.items)) for _, item := range f.items { list = append(list, item) } return list } // ListKeys returns a list of all the keys of the objects currently // in the FIFO. func (f *FIFO) ListKeys() []string { f.lock.RLock() defer f.lock.RUnlock() list := make([]string, 0, len(f.items)) for key := range f.items { list = append(list, key) } return list } // Get returns the requested item, or sets exists=false. func (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) { key, err := f.keyFunc(obj) if err != nil { return nil, false, KeyError{obj, err} } return f.GetByKey(key) } // GetByKey returns the requested item, or sets exists=false. func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) { f.lock.RLock() defer f.lock.RUnlock() item, exists = f.items[key] return item, exists, nil } // IsClosed checks if the queue is closed func (f *FIFO) IsClosed() bool { f.lock.Lock() defer f.lock.Unlock() return f.closed } // Pop waits until an item is ready and processes it. If multiple items are // ready, they are returned in the order in which they were added/updated. // The item is removed from the queue (and the store) before it is processed, // so if you don't successfully process it, it should be added back with // AddIfNotPresent(). process function is called under lock, so it is safe // update data structures in it that need to be in sync with the queue. func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { f.lock.Lock() defer f.lock.Unlock() for { for len(f.queue) == 0 { // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. // When Close() is called, the f.closed is set and the condition is broadcasted. // Which causes this loop to continue and return from the Pop(). if f.closed { return nil, ErrFIFOClosed } f.cond.Wait() } isInInitialList := !f.hasSynced_locked() id := f.queue[0] f.queue = f.queue[1:] if f.initialPopulationCount > 0 { f.initialPopulationCount-- } item, ok := f.items[id] if !ok { // Item may have been deleted subsequently. continue } delete(f.items, id) err := process(item, isInInitialList) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) err = e.Err } return item, err } } // Replace will delete the contents of 'f', using instead the given map. // 'f' takes ownership of the map, you should not reference the map again // after calling this function. f's queue is reset, too; upon return, it // will contain the items in the map, in no particular order. func (f *FIFO) Replace(list []interface{}, resourceVersion string) error { items := make(map[string]interface{}, len(list)) for _, item := range list { key, err := f.keyFunc(item) if err != nil { return KeyError{item, err} } items[key] = item } f.lock.Lock() defer f.lock.Unlock() if !f.populated { f.populated = true f.initialPopulationCount = len(items) } f.items = items f.queue = f.queue[:0] for id := range items { f.queue = append(f.queue, id) } if len(f.queue) > 0 { f.cond.Broadcast() } return nil } // Resync will ensure that every object in the Store has its key in the queue. // This should be a no-op, because that property is maintained by all operations. func (f *FIFO) Resync() error { f.lock.Lock() defer f.lock.Unlock() inQueue := sets.NewString() for _, id := range f.queue { inQueue.Insert(id) } for id := range f.items { if !inQueue.Has(id) { f.queue = append(f.queue, id) } } if len(f.queue) > 0 { f.cond.Broadcast() } return nil } // NewFIFO returns a Store which can be used to queue up items to // process. func NewFIFO(keyFunc KeyFunc) *FIFO { f := &FIFO{ items: map[string]interface{}{}, queue: []string{}, keyFunc: keyFunc, } f.cond.L = &f.lock return f }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/listwatch.go
vendor/k8s.io/client-go/tools/cache/listwatch.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "context" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" ) // Lister is any object that knows how to perform an initial list. type Lister interface { // List should return a list type object; the Items field will be extracted, and the // ResourceVersion field will be used to start the watch in the right place. List(options metav1.ListOptions) (runtime.Object, error) } // Watcher is any object that knows how to start a watch on a resource. type Watcher interface { // Watch should begin a watch at the specified version. // // If Watch returns an error, it should handle its own cleanup, including // but not limited to calling Stop() on the watch, if one was constructed. // This allows the caller to ignore the watch, if the error is non-nil. Watch(options metav1.ListOptions) (watch.Interface, error) } // ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. type ListerWatcher interface { Lister Watcher } // ListFunc knows how to list resources type ListFunc func(options metav1.ListOptions) (runtime.Object, error) // WatchFunc knows how to watch resources type WatchFunc func(options metav1.ListOptions) (watch.Interface, error) // ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface. // It is a convenience function for users of NewReflector, etc. // ListFunc and WatchFunc must not be nil type ListWatch struct { ListFunc ListFunc WatchFunc WatchFunc // DisableChunking requests no chunking for this list watcher. DisableChunking bool } // Getter interface knows how to access Get method from RESTClient. type Getter interface { Get() *restclient.Request } // NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector. func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch { optionsModifier := func(options *metav1.ListOptions) { options.FieldSelector = fieldSelector.String() } return NewFilteredListWatchFromClient(c, resource, namespace, optionsModifier) } // NewFilteredListWatchFromClient creates a new ListWatch from the specified client, resource, namespace, and option modifier. // Option modifier is a function takes a ListOptions and modifies the consumed ListOptions. Provide customized modifier function // to apply modification to ListOptions with a field selector, a label selector, or any other desired options. func NewFilteredListWatchFromClient(c Getter, resource string, namespace string, optionsModifier func(options *metav1.ListOptions)) *ListWatch { listFunc := func(options metav1.ListOptions) (runtime.Object, error) { optionsModifier(&options) return c.Get(). Namespace(namespace). Resource(resource). VersionedParams(&options, metav1.ParameterCodec). Do(context.TODO()). Get() } watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { options.Watch = true optionsModifier(&options) return c.Get(). Namespace(namespace). Resource(resource). VersionedParams(&options, metav1.ParameterCodec). Watch(context.TODO()) } return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} } // List a set of apiserver resources func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) { // ListWatch is used in Reflector, which already supports pagination. // Don't paginate here to avoid duplication. return lw.ListFunc(options) } // Watch a set of apiserver resources func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) { return lw.WatchFunc(options) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/expiration_cache.go
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "sync" "time" "k8s.io/utils/clock" ) // ExpirationCache implements the store interface // 1. All entries are automatically time stamped on insert // a. The key is computed based off the original item/keyFunc // b. The value inserted under that key is the timestamped item // 2. Expiration happens lazily on read based on the expiration policy // a. No item can be inserted into the store while we're expiring // *any* item in the cache. // 3. Time-stamps are stripped off unexpired entries before return // // Note that the ExpirationCache is inherently slower than a normal // threadSafeStore because it takes a write lock every time it checks if // an item has expired. type ExpirationCache struct { cacheStorage ThreadSafeStore keyFunc KeyFunc clock clock.Clock expirationPolicy ExpirationPolicy // expirationLock is a write lock used to guarantee that we don't clobber // newly inserted objects because of a stale expiration timestamp comparison expirationLock sync.Mutex } // ExpirationPolicy dictates when an object expires. Currently only abstracted out // so unittests don't rely on the system clock. type ExpirationPolicy interface { IsExpired(obj *TimestampedEntry) bool } // TTLPolicy implements a ttl based ExpirationPolicy. type TTLPolicy struct { // >0: Expire entries with an age > ttl // <=0: Don't expire any entry TTL time.Duration // Clock used to calculate ttl expiration Clock clock.Clock } // IsExpired returns true if the given object is older than the ttl, or it can't // determine its age. func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool { return p.TTL > 0 && p.Clock.Since(obj.Timestamp) > p.TTL } // TimestampedEntry is the only type allowed in a ExpirationCache. // Keep in mind that it is not safe to share timestamps between computers. // Behavior may be inconsistent if you get a timestamp from the API Server and // use it on the client machine as part of your ExpirationCache. type TimestampedEntry struct { Obj interface{} Timestamp time.Time key string } // getTimestampedEntry returns the TimestampedEntry stored under the given key. func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) { item, _ := c.cacheStorage.Get(key) if tsEntry, ok := item.(*TimestampedEntry); ok { return tsEntry, true } return nil, false } // getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't // already expired. It holds a write lock across deletion. func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { // Prevent all inserts from the time we deem an item as "expired" to when we // delete it, so an un-expired item doesn't sneak in under the same key, just // before the Delete. c.expirationLock.Lock() defer c.expirationLock.Unlock() timestampedItem, exists := c.getTimestampedEntry(key) if !exists { return nil, false } if c.expirationPolicy.IsExpired(timestampedItem) { c.cacheStorage.Delete(key) return nil, false } return timestampedItem.Obj, true } // GetByKey returns the item stored under the key, or sets exists=false. func (c *ExpirationCache) GetByKey(key string) (interface{}, bool, error) { obj, exists := c.getOrExpire(key) return obj, exists, nil } // Get returns unexpired items. It purges the cache of expired items in the // process. func (c *ExpirationCache) Get(obj interface{}) (interface{}, bool, error) { key, err := c.keyFunc(obj) if err != nil { return nil, false, KeyError{obj, err} } obj, exists := c.getOrExpire(key) return obj, exists, nil } // List retrieves a list of unexpired items. It purges the cache of expired // items in the process. func (c *ExpirationCache) List() []interface{} { items := c.cacheStorage.List() list := make([]interface{}, 0, len(items)) for _, item := range items { key := item.(*TimestampedEntry).key if obj, exists := c.getOrExpire(key); exists { list = append(list, obj) } } return list } // ListKeys returns a list of all keys in the expiration cache. func (c *ExpirationCache) ListKeys() []string { return c.cacheStorage.ListKeys() } // Add timestamps an item and inserts it into the cache, overwriting entries // that might exist under the same key. func (c *ExpirationCache) Add(obj interface{}) error { key, err := c.keyFunc(obj) if err != nil { return KeyError{obj, err} } c.expirationLock.Lock() defer c.expirationLock.Unlock() c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now(), key}) return nil } // Update has not been implemented yet for lack of a use case, so this method // simply calls `Add`. This effectively refreshes the timestamp. func (c *ExpirationCache) Update(obj interface{}) error { return c.Add(obj) } // Delete removes an item from the cache. func (c *ExpirationCache) Delete(obj interface{}) error { key, err := c.keyFunc(obj) if err != nil { return KeyError{obj, err} } c.expirationLock.Lock() defer c.expirationLock.Unlock() c.cacheStorage.Delete(key) return nil } // Replace will convert all items in the given list to TimestampedEntries // before attempting the replace operation. The replace operation will // delete the contents of the ExpirationCache `c`. func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error { items := make(map[string]interface{}, len(list)) ts := c.clock.Now() for _, item := range list { key, err := c.keyFunc(item) if err != nil { return KeyError{item, err} } items[key] = &TimestampedEntry{item, ts, key} } c.expirationLock.Lock() defer c.expirationLock.Unlock() c.cacheStorage.Replace(items, resourceVersion) return nil } // Resync is a no-op for one of these func (c *ExpirationCache) Resync() error { return nil } // NewTTLStore creates and returns a ExpirationCache with a TTLPolicy func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store { return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}}) } // NewExpirationStore creates and returns a ExpirationCache for a given policy func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store { return &ExpirationCache{ cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), keyFunc: keyFunc, clock: clock.RealClock{}, expirationPolicy: expirationPolicy, } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/listers.go
vendor/k8s.io/client-go/tools/cache/listers.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) // AppendFunc is used to add a matching item to whatever list the caller is using type AppendFunc func(interface{}) // ListAll lists items in the store matching the given selector, calling appendFn on each one. func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { selectAll := selector.Empty() for _, m := range store.List() { if selectAll { // Avoid computing labels of the objects to speed up common flows // of listing all objects. appendFn(m) continue } metadata, err := meta.Accessor(m) if err != nil { return err } if selector.Matches(labels.Set(metadata.GetLabels())) { appendFn(m) } } return nil } // ListAllByNamespace lists items in the given namespace in the store matching the given selector, // calling appendFn on each one. // If a blank namespace (NamespaceAll) is specified, this delegates to ListAll(). func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { if namespace == metav1.NamespaceAll { return ListAll(indexer, selector, appendFn) } items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace}) if err != nil { // Ignore error; do slow search without index. klog.Warningf("can not retrieve list of objects using index : %v", err) for _, m := range indexer.List() { metadata, err := meta.Accessor(m) if err != nil { return err } if metadata.GetNamespace() == namespace && selector.Matches(labels.Set(metadata.GetLabels())) { appendFn(m) } } return nil } selectAll := selector.Empty() for _, m := range items { if selectAll { // Avoid computing labels of the objects to speed up common flows // of listing all objects. appendFn(m) continue } metadata, err := meta.Accessor(m) if err != nil { return err } if selector.Matches(labels.Set(metadata.GetLabels())) { appendFn(m) } } return nil } // GenericLister is a lister skin on a generic Indexer type GenericLister interface { // List will return all objects across namespaces List(selector labels.Selector) (ret []runtime.Object, err error) // Get will attempt to retrieve assuming that name==key Get(name string) (runtime.Object, error) // ByNamespace will give you a GenericNamespaceLister for one namespace ByNamespace(namespace string) GenericNamespaceLister } // GenericNamespaceLister is a lister skin on a generic Indexer type GenericNamespaceLister interface { // List will return all objects in this namespace List(selector labels.Selector) (ret []runtime.Object, err error) // Get will attempt to retrieve by namespace and name Get(name string) (runtime.Object, error) } // NewGenericLister creates a new instance for the genericLister. func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister { return &genericLister{indexer: indexer, resource: resource} } type genericLister struct { indexer Indexer resource schema.GroupResource } func (s *genericLister) List(selector labels.Selector) (ret []runtime.Object, err error) { err = ListAll(s.indexer, selector, func(m interface{}) { ret = append(ret, m.(runtime.Object)) }) return ret, err } func (s *genericLister) ByNamespace(namespace string) GenericNamespaceLister { return &genericNamespaceLister{indexer: s.indexer, namespace: namespace, resource: s.resource} } func (s *genericLister) Get(name string) (runtime.Object, error) { obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { return nil, errors.NewNotFound(s.resource, name) } return obj.(runtime.Object), nil } type genericNamespaceLister struct { indexer Indexer namespace string resource schema.GroupResource } func (s *genericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) { err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { ret = append(ret, m.(runtime.Object)) }) return ret, err } func (s *genericNamespaceLister) Get(name string) (runtime.Object, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { return nil, errors.NewNotFound(s.resource, name) } return obj.(runtime.Object), nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go
vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go
/* Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "k8s.io/utils/clock" "time" ) type RetryWithDeadline interface { After(error) ShouldRetry() bool } type retryWithDeadlineImpl struct { firstErrorTime time.Time lastErrorTime time.Time maxRetryDuration time.Duration minResetPeriod time.Duration isRetryable func(error) bool clock clock.Clock } func NewRetryWithDeadline(maxRetryDuration, minResetPeriod time.Duration, isRetryable func(error) bool, clock clock.Clock) RetryWithDeadline { return &retryWithDeadlineImpl{ firstErrorTime: time.Time{}, lastErrorTime: time.Time{}, maxRetryDuration: maxRetryDuration, minResetPeriod: minResetPeriod, isRetryable: isRetryable, clock: clock, } } func (r *retryWithDeadlineImpl) reset() { r.firstErrorTime = time.Time{} r.lastErrorTime = time.Time{} } func (r *retryWithDeadlineImpl) After(err error) { if r.isRetryable(err) { if r.clock.Now().Sub(r.lastErrorTime) >= r.minResetPeriod { r.reset() } if r.firstErrorTime.IsZero() { r.firstErrorTime = r.clock.Now() } r.lastErrorTime = r.clock.Now() } } func (r *retryWithDeadlineImpl) ShouldRetry() bool { if r.maxRetryDuration <= time.Duration(0) { return false } if r.clock.Now().Sub(r.firstErrorTime) <= r.maxRetryDuration { return true } r.reset() return false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file provides abstractions for setting the provider (e.g., prometheus) // of metrics. package cache import ( "sync" ) // GaugeMetric represents a single numerical value that can arbitrarily go up // and down. type GaugeMetric interface { Set(float64) } // CounterMetric represents a single numerical value that only ever // goes up. type CounterMetric interface { Inc() } // SummaryMetric captures individual observations. type SummaryMetric interface { Observe(float64) } type noopMetric struct{} func (noopMetric) Inc() {} func (noopMetric) Dec() {} func (noopMetric) Observe(float64) {} func (noopMetric) Set(float64) {} // MetricsProvider generates various metrics used by the reflector. type MetricsProvider interface { NewListsMetric(name string) CounterMetric NewListDurationMetric(name string) SummaryMetric NewItemsInListMetric(name string) SummaryMetric NewWatchesMetric(name string) CounterMetric NewShortWatchesMetric(name string) CounterMetric NewWatchDurationMetric(name string) SummaryMetric NewItemsInWatchMetric(name string) SummaryMetric NewLastResourceVersionMetric(name string) GaugeMetric } type noopMetricsProvider struct{} func (noopMetricsProvider) NewListsMetric(name string) CounterMetric { return noopMetric{} } func (noopMetricsProvider) NewListDurationMetric(name string) SummaryMetric { return noopMetric{} } func (noopMetricsProvider) NewItemsInListMetric(name string) SummaryMetric { return noopMetric{} } func (noopMetricsProvider) NewWatchesMetric(name string) CounterMetric { return noopMetric{} } func (noopMetricsProvider) NewShortWatchesMetric(name string) CounterMetric { return noopMetric{} } func (noopMetricsProvider) NewWatchDurationMetric(name string) SummaryMetric { return noopMetric{} } func (noopMetricsProvider) NewItemsInWatchMetric(name string) SummaryMetric { return noopMetric{} } func (noopMetricsProvider) NewLastResourceVersionMetric(name string) GaugeMetric { return noopMetric{} } var metricsFactory = struct { metricsProvider MetricsProvider setProviders sync.Once }{ metricsProvider: noopMetricsProvider{}, } // SetReflectorMetricsProvider sets the metrics provider func SetReflectorMetricsProvider(metricsProvider MetricsProvider) { metricsFactory.setProviders.Do(func() { metricsFactory.metricsProvider = metricsProvider }) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "fmt" "sync" "k8s.io/apimachinery/pkg/util/sets" ) // ThreadSafeStore is an interface that allows concurrent indexed // access to a storage backend. It is like Indexer but does not // (necessarily) know how to extract the Store key from a given // object. // // TL;DR caveats: you must not modify anything returned by Get or List as it will break // the indexing feature in addition to not being thread safe. // // The guarantees of thread safety provided by List/Get are only valid if the caller // treats returned items as read-only. For example, a pointer inserted in the store // through `Add` will be returned as is by `Get`. Multiple clients might invoke `Get` // on the same key and modify the pointer in a non-thread-safe way. Also note that // modifying objects stored by the indexers (if any) will *not* automatically lead // to a re-index. So it's not a good idea to directly modify the objects returned by // Get/List, in general. type ThreadSafeStore interface { Add(key string, obj interface{}) Update(key string, obj interface{}) Delete(key string) Get(key string) (item interface{}, exists bool) List() []interface{} ListKeys() []string Replace(map[string]interface{}, string) Index(indexName string, obj interface{}) ([]interface{}, error) IndexKeys(indexName, indexedValue string) ([]string, error) ListIndexFuncValues(name string) []string ByIndex(indexName, indexedValue string) ([]interface{}, error) GetIndexers() Indexers // AddIndexers adds more indexers to this store. This supports adding indexes after the store already has items. AddIndexers(newIndexers Indexers) error // Resync is a no-op and is deprecated Resync() error } // storeIndex implements the indexing functionality for Store interface type storeIndex struct { // indexers maps a name to an IndexFunc indexers Indexers // indices maps a name to an Index indices Indices } func (i *storeIndex) reset() { i.indices = Indices{} } func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.String, error) { indexFunc := i.indexers[indexName] if indexFunc == nil { return nil, fmt.Errorf("Index with name %s does not exist", indexName) } indexedValues, err := indexFunc(obj) if err != nil { return nil, err } index := i.indices[indexName] var storeKeySet sets.String if len(indexedValues) == 1 { // In majority of cases, there is exactly one value matching. // Optimize the most common path - deduping is not needed here. storeKeySet = index[indexedValues[0]] } else { // Need to de-dupe the return list. // Since multiple keys are allowed, this can happen. storeKeySet = sets.String{} for _, indexedValue := range indexedValues { for key := range index[indexedValue] { storeKeySet.Insert(key) } } } return storeKeySet, nil } func (i *storeIndex) getKeysByIndex(indexName, indexedValue string) (sets.String, error) { indexFunc := i.indexers[indexName] if indexFunc == nil { return nil, fmt.Errorf("Index with name %s does not exist", indexName) } index := i.indices[indexName] return index[indexedValue], nil } func (i *storeIndex) getIndexValues(indexName string) []string { index := i.indices[indexName] names := make([]string, 0, len(index)) for key := range index { names = append(names, key) } return names } func (i *storeIndex) addIndexers(newIndexers Indexers) error { oldKeys := sets.StringKeySet(i.indexers) newKeys := sets.StringKeySet(newIndexers) if oldKeys.HasAny(newKeys.List()...) { return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys)) } for k, v := range newIndexers { i.indexers[k] = v } return nil } // updateSingleIndex modifies the objects location in the named index: // - for create you must provide only the newObj // - for update you must provide both the oldObj and the newObj // - for delete you must provide only the oldObj // updateSingleIndex must be called from a function that already has a lock on the cache func (i *storeIndex) updateSingleIndex(name string, oldObj interface{}, newObj interface{}, key string) { var oldIndexValues, indexValues []string indexFunc, ok := i.indexers[name] if !ok { // Should never happen. Caller is responsible for ensuring this exists, and should call with lock // held to avoid any races. panic(fmt.Errorf("indexer %q does not exist", name)) } if oldObj != nil { var err error oldIndexValues, err = indexFunc(oldObj) if err != nil { panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) } } else { oldIndexValues = oldIndexValues[:0] } if newObj != nil { var err error indexValues, err = indexFunc(newObj) if err != nil { panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) } } else { indexValues = indexValues[:0] } index := i.indices[name] if index == nil { index = Index{} i.indices[name] = index } if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] { // We optimize for the most common case where indexFunc returns a single value which has not been changed return } for _, value := range oldIndexValues { i.deleteKeyFromIndex(key, value, index) } for _, value := range indexValues { i.addKeyToIndex(key, value, index) } } // updateIndices modifies the objects location in the managed indexes: // - for create you must provide only the newObj // - for update you must provide both the oldObj and the newObj // - for delete you must provide only the oldObj // updateIndices must be called from a function that already has a lock on the cache func (i *storeIndex) updateIndices(oldObj interface{}, newObj interface{}, key string) { for name := range i.indexers { i.updateSingleIndex(name, oldObj, newObj, key) } } func (i *storeIndex) addKeyToIndex(key, indexValue string, index Index) { set := index[indexValue] if set == nil { set = sets.String{} index[indexValue] = set } set.Insert(key) } func (i *storeIndex) deleteKeyFromIndex(key, indexValue string, index Index) { set := index[indexValue] if set == nil { return } set.Delete(key) // If we don't delete the set when zero, indices with high cardinality // short lived resources can cause memory to increase over time from // unused empty sets. See `kubernetes/kubernetes/issues/84959`. if len(set) == 0 { delete(index, indexValue) } } // threadSafeMap implements ThreadSafeStore type threadSafeMap struct { lock sync.RWMutex items map[string]interface{} // index implements the indexing functionality index *storeIndex } func (c *threadSafeMap) Add(key string, obj interface{}) { c.Update(key, obj) } func (c *threadSafeMap) Update(key string, obj interface{}) { c.lock.Lock() defer c.lock.Unlock() oldObject := c.items[key] c.items[key] = obj c.index.updateIndices(oldObject, obj, key) } func (c *threadSafeMap) Delete(key string) { c.lock.Lock() defer c.lock.Unlock() if obj, exists := c.items[key]; exists { c.index.updateIndices(obj, nil, key) delete(c.items, key) } } func (c *threadSafeMap) Get(key string) (item interface{}, exists bool) { c.lock.RLock() defer c.lock.RUnlock() item, exists = c.items[key] return item, exists } func (c *threadSafeMap) List() []interface{} { c.lock.RLock() defer c.lock.RUnlock() list := make([]interface{}, 0, len(c.items)) for _, item := range c.items { list = append(list, item) } return list } // ListKeys returns a list of all the keys of the objects currently // in the threadSafeMap. func (c *threadSafeMap) ListKeys() []string { c.lock.RLock() defer c.lock.RUnlock() list := make([]string, 0, len(c.items)) for key := range c.items { list = append(list, key) } return list } func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion string) { c.lock.Lock() defer c.lock.Unlock() c.items = items // rebuild any index c.index.reset() for key, item := range c.items { c.index.updateIndices(nil, item, key) } } // Index returns a list of items that match the given object on the index function. // Index is thread-safe so long as you treat all items as immutable. func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) { c.lock.RLock() defer c.lock.RUnlock() storeKeySet, err := c.index.getKeysFromIndex(indexName, obj) if err != nil { return nil, err } list := make([]interface{}, 0, storeKeySet.Len()) for storeKey := range storeKeySet { list = append(list, c.items[storeKey]) } return list, nil } // ByIndex returns a list of the items whose indexed values in the given index include the given indexed value func (c *threadSafeMap) ByIndex(indexName, indexedValue string) ([]interface{}, error) { c.lock.RLock() defer c.lock.RUnlock() set, err := c.index.getKeysByIndex(indexName, indexedValue) if err != nil { return nil, err } list := make([]interface{}, 0, set.Len()) for key := range set { list = append(list, c.items[key]) } return list, nil } // IndexKeys returns a list of the Store keys of the objects whose indexed values in the given index include the given indexed value. // IndexKeys is thread-safe so long as you treat all items as immutable. func (c *threadSafeMap) IndexKeys(indexName, indexedValue string) ([]string, error) { c.lock.RLock() defer c.lock.RUnlock() set, err := c.index.getKeysByIndex(indexName, indexedValue) if err != nil { return nil, err } return set.List(), nil } func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string { c.lock.RLock() defer c.lock.RUnlock() return c.index.getIndexValues(indexName) } func (c *threadSafeMap) GetIndexers() Indexers { return c.index.indexers } func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { c.lock.Lock() defer c.lock.Unlock() if err := c.index.addIndexers(newIndexers); err != nil { return err } // If there are already items, index them for key, item := range c.items { for name := range newIndexers { c.index.updateSingleIndex(name, nil, item, key) } } return nil } func (c *threadSafeMap) Resync() error { // Nothing to do return nil } // NewThreadSafeStore creates a new instance of ThreadSafeStore. func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore { return &threadSafeMap{ items: map[string]interface{}{}, index: &storeIndex{ indexers: indexers, indices: indices, }, } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/reflector.go
vendor/k8s.io/client-go/tools/cache/reflector.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "context" "errors" "fmt" "io" "math/rand" "reflect" "strings" "sync" "time" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/naming" utilnet "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" clientfeatures "k8s.io/client-go/features" "k8s.io/client-go/tools/pager" "k8s.io/klog/v2" "k8s.io/utils/clock" "k8s.io/utils/pointer" "k8s.io/utils/ptr" "k8s.io/utils/trace" ) const defaultExpectedTypeName = "<unspecified>" var ( // We try to spread the load on apiserver by setting timeouts for // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout]. defaultMinWatchTimeout = 5 * time.Minute ) // Reflector watches a specified resource and causes all changes to be reflected in the given store. type Reflector struct { // name identifies this reflector. By default, it will be a file:line if possible. name string // The name of the type we expect to place in the store. The name // will be the stringification of expectedGVK if provided, and the // stringification of expectedType otherwise. It is for display // only, and should not be used for parsing or comparison. typeDescription string // An example object of the type we expect to place in the store. // Only the type needs to be right, except that when that is // `unstructured.Unstructured` the object's `"apiVersion"` and // `"kind"` must also be right. expectedType reflect.Type // The GVK of the object we expect to place in the store if unstructured. expectedGVK *schema.GroupVersionKind // The destination to sync up with the watch source store Store // listerWatcher is used to perform lists and watches. listerWatcher ListerWatcher // backoff manages backoff of ListWatch backoffManager wait.BackoffManager resyncPeriod time.Duration // minWatchTimeout defines the minimum timeout for watch requests. minWatchTimeout time.Duration // clock allows tests to manipulate time clock clock.Clock // paginatedResult defines whether pagination should be forced for list calls. // It is set based on the result of the initial list call. paginatedResult bool // lastSyncResourceVersion is the resource version token last // observed when doing a sync with the underlying store // it is thread safe, but not synchronized with the underlying store lastSyncResourceVersion string // isLastSyncResourceVersionUnavailable is true if the previous list or watch request with // lastSyncResourceVersion failed with an "expired" or "too large resource version" error. isLastSyncResourceVersionUnavailable bool // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion lastSyncResourceVersionMutex sync.RWMutex // Called whenever the ListAndWatch drops the connection with an error. watchErrorHandler WatchErrorHandler // WatchListPageSize is the requested chunk size of initial and resync watch lists. // If unset, for consistent reads (RV="") or reads that opt-into arbitrarily old data // (RV="0") it will default to pager.PageSize, for the rest (RV != "" && RV != "0") // it will turn off pagination to allow serving them from watch cache. // NOTE: It should be used carefully as paginated lists are always served directly from // etcd, which is significantly less efficient and may lead to serious performance and // scalability problems. WatchListPageSize int64 // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked ShouldResync func() bool // MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch. MaxInternalErrorRetryDuration time.Duration // UseWatchList if turned on instructs the reflector to open a stream to bring data from the API server. // Streaming has the primary advantage of using fewer server's resources to fetch data. // // The old behaviour establishes a LIST request which gets data in chunks. // Paginated list is less efficient and depending on the actual size of objects // might result in an increased memory consumption of the APIServer. // // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#design-details // // TODO(#115478): Consider making reflector.UseWatchList a private field. Since we implemented "api streaming" on the etcd storage layer it should work. UseWatchList *bool } func (r *Reflector) Name() string { return r.name } func (r *Reflector) TypeDescription() string { return r.typeDescription } // ResourceVersionUpdater is an interface that allows store implementation to // track the current resource version of the reflector. This is especially // important if storage bookmarks are enabled. type ResourceVersionUpdater interface { // UpdateResourceVersion is called each time current resource version of the reflector // is updated. UpdateResourceVersion(resourceVersion string) } // The WatchErrorHandler is called whenever ListAndWatch drops the // connection with an error. After calling this handler, the informer // will backoff and retry. // // The default implementation looks at the error type and tries to log // the error message at an appropriate level. // // Implementations of this handler may display the error message in other // ways. Implementations should return quickly - any expensive processing // should be offloaded. type WatchErrorHandler func(r *Reflector, err error) // DefaultWatchErrorHandler is the default implementation of WatchErrorHandler func DefaultWatchErrorHandler(r *Reflector, err error) { switch { case isExpiredError(err): // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) case err == io.EOF: // watch closed normally case err == io.ErrUnexpectedEOF: klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.typeDescription, err) default: utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.typeDescription, err)) } } // NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector // The indexer is configured to key on namespace func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) { indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc}) reflector = NewReflector(lw, expectedType, indexer, resyncPeriod) return indexer, reflector } // NewReflector creates a new Reflector with its name defaulted to the closest source_file.go:line in the call stack // that is outside this package. See NewReflectorWithOptions for further information. func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{ResyncPeriod: resyncPeriod}) } // NewNamedReflector creates a new Reflector with the specified name. See NewReflectorWithOptions for further // information. func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{Name: name, ResyncPeriod: resyncPeriod}) } // ReflectorOptions configures a Reflector. type ReflectorOptions struct { // Name is the Reflector's name. If unset/unspecified, the name defaults to the closest source_file.go:line // in the call stack that is outside this package. Name string // TypeDescription is the Reflector's type description. If unset/unspecified, the type description is defaulted // using the following rules: if the expectedType passed to NewReflectorWithOptions was nil, the type description is // "<unspecified>". If the expectedType is an instance of *unstructured.Unstructured and its apiVersion and kind fields // are set, the type description is the string encoding of those. Otherwise, the type description is set to the // go type of expectedType.. TypeDescription string // ResyncPeriod is the Reflector's resync period. If unset/unspecified, the resync period defaults to 0 // (do not resync). ResyncPeriod time.Duration // MinWatchTimeout, if non-zero, defines the minimum timeout for watch requests send to kube-apiserver. // However, values lower than 5m will not be honored to avoid negative performance impact on controlplane. MinWatchTimeout time.Duration // Clock allows tests to control time. If unset defaults to clock.RealClock{} Clock clock.Clock } // NewReflectorWithOptions creates a new Reflector object which will keep the // given store up to date with the server's contents for the given // resource. Reflector promises to only put things in the store that // have the type of expectedType, unless expectedType is nil. If // resyncPeriod is non-zero, then the reflector will periodically // consult its ShouldResync function to determine whether to invoke // the Store's Resync operation; `ShouldResync==nil` means always // "yes". This enables you to use reflectors to periodically process // everything as well as incrementally processing the things that // change. func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store Store, options ReflectorOptions) *Reflector { reflectorClock := options.Clock if reflectorClock == nil { reflectorClock = clock.RealClock{} } minWatchTimeout := defaultMinWatchTimeout if options.MinWatchTimeout > defaultMinWatchTimeout { minWatchTimeout = options.MinWatchTimeout } r := &Reflector{ name: options.Name, resyncPeriod: options.ResyncPeriod, minWatchTimeout: minWatchTimeout, typeDescription: options.TypeDescription, listerWatcher: lw, store: store, // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), clock: reflectorClock, watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), expectedType: reflect.TypeOf(expectedType), } if r.name == "" { r.name = naming.GetNameFromCallsite(internalPackages...) } if r.typeDescription == "" { r.typeDescription = getTypeDescriptionFromObject(expectedType) } if r.expectedGVK == nil { r.expectedGVK = getExpectedGVKFromObject(expectedType) } // don't overwrite UseWatchList if already set // because the higher layers (e.g. storage/cacher) disabled it on purpose if r.UseWatchList == nil { r.UseWatchList = ptr.To(clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient)) } return r } func getTypeDescriptionFromObject(expectedType interface{}) string { if expectedType == nil { return defaultExpectedTypeName } reflectDescription := reflect.TypeOf(expectedType).String() obj, ok := expectedType.(*unstructured.Unstructured) if !ok { return reflectDescription } gvk := obj.GroupVersionKind() if gvk.Empty() { return reflectDescription } return gvk.String() } func getExpectedGVKFromObject(expectedType interface{}) *schema.GroupVersionKind { obj, ok := expectedType.(*unstructured.Unstructured) if !ok { return nil } gvk := obj.GroupVersionKind() if gvk.Empty() { return nil } return &gvk } // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common // call chains to NewReflector, so they'd be low entropy names for reflectors var internalPackages = []string{"client-go/tools/cache/"} // Run repeatedly uses the reflector's ListAndWatch to fetch all the // objects and subsequent deltas. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { klog.V(3).Infof("Starting reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) wait.BackoffUntil(func() { if err := r.ListAndWatch(stopCh); err != nil { r.watchErrorHandler(r, err) } }, r.backoffManager, true, stopCh) klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) } var ( // nothing will ever be sent down this channel neverExitWatch <-chan time.Time = make(chan time.Time) // Used to indicate that watching stopped because of a signal from the stop // channel passed in from a client of the reflector. errorStopRequested = errors.New("stop requested") ) // resyncChan returns a channel which will receive something when a resync is // required, and a cleanup function. func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { if r.resyncPeriod == 0 { return neverExitWatch, func() bool { return false } } // The cleanup function is required: imagine the scenario where watches // always fail so we end up listing frequently. Then, if we don't // manually stop the timer, we could end up with many timers active // concurrently. t := r.clock.NewTimer(r.resyncPeriod) return t.C(), t.Stop } // ListAndWatch first lists all items and get the resource version at the moment of call, // and then use the resource version to watch. // It returns error if ListAndWatch didn't even try to initialize watch. func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { klog.V(3).Infof("Listing and watching %v from %s", r.typeDescription, r.name) var err error var w watch.Interface useWatchList := ptr.Deref(r.UseWatchList, false) fallbackToList := !useWatchList if useWatchList { w, err = r.watchList(stopCh) if w == nil && err == nil { // stopCh was closed return nil } if err != nil { klog.Warningf("The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = %v", err) fallbackToList = true // ensure that we won't accidentally pass some garbage down the watch. w = nil } } if fallbackToList { err = r.list(stopCh) if err != nil { return err } } klog.V(2).Infof("Caches populated for %v from %s", r.typeDescription, r.name) return r.watchWithResync(w, stopCh) } // startResync periodically calls r.store.Resync() method. // Note that this method is blocking and should be // called in a separate goroutine. func (r *Reflector) startResync(stopCh <-chan struct{}, cancelCh <-chan struct{}, resyncerrc chan error) { resyncCh, cleanup := r.resyncChan() defer func() { cleanup() // Call the last one written into cleanup }() for { select { case <-resyncCh: case <-stopCh: return case <-cancelCh: return } if r.ShouldResync == nil || r.ShouldResync() { klog.V(4).Infof("%s: forcing resync", r.name) if err := r.store.Resync(); err != nil { resyncerrc <- err return } } cleanup() resyncCh, cleanup = r.resyncChan() } } // watchWithResync runs watch with startResync in the background. func (r *Reflector) watchWithResync(w watch.Interface, stopCh <-chan struct{}) error { resyncerrc := make(chan error, 1) cancelCh := make(chan struct{}) defer close(cancelCh) go r.startResync(stopCh, cancelCh, resyncerrc) return r.watch(w, stopCh, resyncerrc) } // watch simply starts a watch request with the server. func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc chan error) error { var err error retry := NewRetryWithDeadline(r.MaxInternalErrorRetryDuration, time.Minute, apierrors.IsInternalError, r.clock) for { // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors select { case <-stopCh: // we can only end up here when the stopCh // was closed after a successful watchlist or list request if w != nil { w.Stop() } return nil default: } // start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent start := r.clock.Now() if w == nil { timeoutSeconds := int64(r.minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) options := metav1.ListOptions{ ResourceVersion: r.LastSyncResourceVersion(), // We want to avoid situations of hanging watchers. Stop any watchers that do not // receive any events within the timeout window. TimeoutSeconds: &timeoutSeconds, // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. // Reflector doesn't assume bookmarks are returned at all (if the server do not support // watch bookmarks, it will ignore this field). AllowWatchBookmarks: true, } w, err = r.listerWatcher.Watch(options) if err != nil { if canRetry := isWatchErrorRetriable(err); canRetry { klog.V(4).Infof("%s: watch of %v returned %v - backing off", r.name, r.typeDescription, err) select { case <-stopCh: return nil case <-r.backoffManager.Backoff().C(): continue } } return err } } err = handleWatch(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, r.clock, resyncerrc, stopCh) // Ensure that watch will not be reused across iterations. w.Stop() w = nil retry.After(err) if err != nil { if !errors.Is(err, errorStopRequested) { switch { case isExpiredError(err): // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) case apierrors.IsTooManyRequests(err): klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.typeDescription) select { case <-stopCh: return nil case <-r.backoffManager.Backoff().C(): continue } case apierrors.IsInternalError(err) && retry.ShouldRetry(): klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.typeDescription, err) continue default: klog.Warningf("%s: watch of %v ended with: %v", r.name, r.typeDescription, err) } } return nil } } } // list simply lists all items and records a resource version obtained from the server at the moment of the call. // the resource version can be used for further progress notification (aka. watch). func (r *Reflector) list(stopCh <-chan struct{}) error { var resourceVersion string options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()} initTrace := trace.New("Reflector ListAndWatch", trace.Field{Key: "name", Value: r.name}) defer initTrace.LogIfLong(10 * time.Second) var list runtime.Object var paginatedResult bool var err error listCh := make(chan struct{}, 1) panicCh := make(chan interface{}, 1) go func() { defer func() { if r := recover(); r != nil { panicCh <- r } }() // Attempt to gather list in chunks, if supported by listerWatcher, if not, the first // list request will return the full response. pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { return r.listerWatcher.List(opts) })) switch { case r.WatchListPageSize != 0: pager.PageSize = r.WatchListPageSize case r.paginatedResult: // We got a paginated result initially. Assume this resource and server honor // paging requests (i.e. watch cache is probably disabled) and leave the default // pager size set. case options.ResourceVersion != "" && options.ResourceVersion != "0": // User didn't explicitly request pagination. // // With ResourceVersion != "", we have a possibility to list from watch cache, // but we do that (for ResourceVersion != "0") only if Limit is unset. // To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly // switch off pagination to force listing from watch cache (if enabled). // With the existing semantic of RV (result is at least as fresh as provided RV), // this is correct and doesn't lead to going back in time. // // We also don't turn off pagination for ResourceVersion="0", since watch cache // is ignoring Limit in that case anyway, and if watch cache is not enabled // we don't introduce regression. pager.PageSize = 0 } list, paginatedResult, err = pager.ListWithAlloc(context.Background(), options) if isExpiredError(err) || isTooLargeResourceVersionError(err) { r.setIsLastSyncResourceVersionUnavailable(true) // Retry immediately if the resource version used to list is unavailable. // The pager already falls back to full list if paginated list calls fail due to an "Expired" error on // continuation pages, but the pager might not be enabled, the full list might fail because the // resource version it is listing at is expired or the cache may not yet be synced to the provided // resource version. So we need to fallback to resourceVersion="" in all to recover and ensure // the reflector makes forward progress. list, paginatedResult, err = pager.ListWithAlloc(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}) } close(listCh) }() select { case <-stopCh: return nil case r := <-panicCh: panic(r) case <-listCh: } initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err}) if err != nil { klog.Warningf("%s: failed to list %v: %v", r.name, r.typeDescription, err) return fmt.Errorf("failed to list %v: %w", r.typeDescription, err) } // We check if the list was paginated and if so set the paginatedResult based on that. // However, we want to do that only for the initial list (which is the only case // when we set ResourceVersion="0"). The reasoning behind it is that later, in some // situations we may force listing directly from etcd (by setting ResourceVersion="") // which will return paginated result, even if watch cache is enabled. However, in // that case, we still want to prefer sending requests to watch cache if possible. // // Paginated result returned for request with ResourceVersion="0" mean that watch // cache is disabled and there are a lot of objects of a given type. In such case, // there is no need to prefer listing from watch cache. if options.ResourceVersion == "0" && paginatedResult { r.paginatedResult = true } r.setIsLastSyncResourceVersionUnavailable(false) // list was successful listMetaInterface, err := meta.ListAccessor(list) if err != nil { return fmt.Errorf("unable to understand list result %#v: %v", list, err) } resourceVersion = listMetaInterface.GetResourceVersion() initTrace.Step("Resource version extracted") items, err := meta.ExtractListWithAlloc(list) if err != nil { return fmt.Errorf("unable to understand list result %#v (%v)", list, err) } initTrace.Step("Objects extracted") if err := r.syncWith(items, resourceVersion); err != nil { return fmt.Errorf("unable to sync list result: %v", err) } initTrace.Step("SyncWith done") r.setLastSyncResourceVersion(resourceVersion) initTrace.Step("Resource version updated") return nil } // watchList establishes a stream to get a consistent snapshot of data // from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal // // case 1: start at Most Recent (RV="", ResourceVersionMatch=ResourceVersionMatchNotOlderThan) // Establishes a consistent stream with the server. // That means the returned data is consistent, as if, served directly from etcd via a quorum read. // It begins with synthetic "Added" events of all resources up to the most recent ResourceVersion. // It ends with a synthetic "Bookmark" event containing the most recent ResourceVersion. // After receiving a "Bookmark" event the reflector is considered to be synchronized. // It replaces its internal store with the collected items and // reuses the current watch requests for getting further events. // // case 2: start at Exact (RV>"0", ResourceVersionMatch=ResourceVersionMatchNotOlderThan) // Establishes a stream with the server at the provided resource version. // To establish the initial state the server begins with synthetic "Added" events. // It ends with a synthetic "Bookmark" event containing the provided or newer resource version. // After receiving a "Bookmark" event the reflector is considered to be synchronized. // It replaces its internal store with the collected items and // reuses the current watch requests for getting further events. func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { var w watch.Interface var err error var temporaryStore Store var resourceVersion string // TODO(#115478): see if this function could be turned // into a method and see if error handling // could be unified with the r.watch method isErrorRetriableWithSideEffectsFn := func(err error) bool { if canRetry := isWatchErrorRetriable(err); canRetry { klog.V(2).Infof("%s: watch-list of %v returned %v - backing off", r.name, r.typeDescription, err) <-r.backoffManager.Backoff().C() return true } if isExpiredError(err) || isTooLargeResourceVersionError(err) { // we tried to re-establish a watch request but the provided RV // has either expired or it is greater than the server knows about. // In that case we reset the RV and // try to get a consistent snapshot from the watch cache (case 1) r.setIsLastSyncResourceVersionUnavailable(true) return true } return false } initTrace := trace.New("Reflector WatchList", trace.Field{Key: "name", Value: r.name}) defer initTrace.LogIfLong(10 * time.Second) for { select { case <-stopCh: return nil, nil default: } resourceVersion = "" lastKnownRV := r.rewatchResourceVersion() temporaryStore = NewStore(DeletionHandlingMetaNamespaceKeyFunc) // TODO(#115478): large "list", slow clients, slow network, p&f // might slow down streaming and eventually fail. // maybe in such a case we should retry with an increased timeout? timeoutSeconds := int64(r.minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) options := metav1.ListOptions{ ResourceVersion: lastKnownRV, AllowWatchBookmarks: true, SendInitialEvents: pointer.Bool(true), ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan, TimeoutSeconds: &timeoutSeconds, } start := r.clock.Now() w, err = r.listerWatcher.Watch(options) if err != nil { if isErrorRetriableWithSideEffectsFn(err) { continue } return nil, err } watchListBookmarkReceived, err := handleListWatch(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription, func(rv string) { resourceVersion = rv }, r.clock, make(chan error), stopCh) if err != nil { w.Stop() // stop and retry with clean state if errors.Is(err, errorStopRequested) { return nil, nil } if isErrorRetriableWithSideEffectsFn(err) { continue } return nil, err } if watchListBookmarkReceived { break } } // We successfully got initial state from watch-list confirmed by the // "k8s.io/initial-events-end" bookmark. initTrace.Step("Objects streamed", trace.Field{Key: "count", Value: len(temporaryStore.List())}) r.setIsLastSyncResourceVersionUnavailable(false) // we utilize the temporaryStore to ensure independence from the current store implementation. // as of today, the store is implemented as a queue and will be drained by the higher-level // component as soon as it finishes replacing the content. checkWatchListDataConsistencyIfRequested(wait.ContextForChannel(stopCh), r.name, resourceVersion, wrapListFuncWithContext(r.listerWatcher.List), temporaryStore.List) if err := r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { return nil, fmt.Errorf("unable to sync watch-list result: %w", err) } initTrace.Step("SyncWith done") r.setLastSyncResourceVersion(resourceVersion) return w, nil } // syncWith replaces the store's items with the given list. func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error { found := make([]interface{}, 0, len(items)) for _, item := range items { found = append(found, item) } return r.store.Replace(found, resourceVersion) } // handleListWatch consumes events from w, updates the Store, and records the // last seen ResourceVersion, to allow continuing from that ResourceVersion on // retry. If successful, the watcher will be left open after receiving the // initial set of objects, to allow watching for future events. func handleListWatch( start time.Time, w watch.Interface, store Store, expectedType reflect.Type, expectedGVK *schema.GroupVersionKind, name string, expectedTypeName string, setLastSyncResourceVersion func(string), clock clock.Clock, errCh chan error, stopCh <-chan struct{}, ) (bool, error) { exitOnWatchListBookmarkReceived := true return handleAnyWatch(start, w, store, expectedType, expectedGVK, name, expectedTypeName, setLastSyncResourceVersion, exitOnWatchListBookmarkReceived, clock, errCh, stopCh) } // handleListWatch consumes events from w, updates the Store, and records the // last seen ResourceVersion, to allow continuing from that ResourceVersion on // retry. The watcher will always be stopped on exit. func handleWatch( start time.Time, w watch.Interface, store Store, expectedType reflect.Type, expectedGVK *schema.GroupVersionKind, name string, expectedTypeName string, setLastSyncResourceVersion func(string), clock clock.Clock, errCh chan error, stopCh <-chan struct{}, ) error { exitOnWatchListBookmarkReceived := false _, err := handleAnyWatch(start, w, store, expectedType, expectedGVK, name, expectedTypeName, setLastSyncResourceVersion, exitOnWatchListBookmarkReceived, clock, errCh, stopCh) return err } // handleAnyWatch consumes events from w, updates the Store, and records the last // seen ResourceVersion, to allow continuing from that ResourceVersion on retry. // If exitOnWatchListBookmarkReceived is true, the watch events will be consumed // until a bookmark event is received with the WatchList annotation present. // Returns true (watchListBookmarkReceived) if the WatchList bookmark was // received, even if exitOnWatchListBookmarkReceived is false. // The watcher will always be stopped, unless exitOnWatchListBookmarkReceived is // true and watchListBookmarkReceived is true. This allows the same watch stream // to be re-used by the caller to continue watching for new events. func handleAnyWatch(start time.Time, w watch.Interface, store Store, expectedType reflect.Type, expectedGVK *schema.GroupVersionKind, name string, expectedTypeName string, setLastSyncResourceVersion func(string), exitOnWatchListBookmarkReceived bool, clock clock.Clock, errCh chan error, stopCh <-chan struct{}, ) (bool, error) { watchListBookmarkReceived := false eventCount := 0 initialEventsEndBookmarkWarningTicker := newInitialEventsEndBookmarkTicker(name, clock, start, exitOnWatchListBookmarkReceived) defer initialEventsEndBookmarkWarningTicker.Stop() loop: for { select { case <-stopCh: return watchListBookmarkReceived, errorStopRequested case err := <-errCh: return watchListBookmarkReceived, err case event, ok := <-w.ResultChan(): if !ok { break loop } if event.Type == watch.Error {
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/mutation_detector.go
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "fmt" "os" "reflect" "strconv" "sync" "time" "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/diff" ) var mutationDetectionEnabled = false func init() { mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR")) } // MutationDetector is able to monitor objects for mutation within a limited window of time type MutationDetector interface { // AddObject adds the given object to the set being monitored for a while from now AddObject(obj interface{}) // Run starts the monitoring and does not return until the monitoring is stopped. Run(stopCh <-chan struct{}) } // NewCacheMutationDetector creates a new instance for the defaultCacheMutationDetector. func NewCacheMutationDetector(name string) MutationDetector { if !mutationDetectionEnabled { return dummyMutationDetector{} } klog.Warningln("Mutation detector is enabled, this will result in memory leakage.") return &defaultCacheMutationDetector{name: name, period: 1 * time.Second, retainDuration: 2 * time.Minute} } type dummyMutationDetector struct{} func (dummyMutationDetector) Run(stopCh <-chan struct{}) { } func (dummyMutationDetector) AddObject(obj interface{}) { } // defaultCacheMutationDetector gives a way to detect if a cached object has been mutated // It has a list of cached objects and their copies. I haven't thought of a way // to see WHO is mutating it, just that it's getting mutated. type defaultCacheMutationDetector struct { name string period time.Duration // compareLock ensures only a single call to CompareObjects runs at a time compareObjectsLock sync.Mutex // addLock guards addedObjs between AddObject and CompareObjects addedObjsLock sync.Mutex addedObjs []cacheObj cachedObjs []cacheObj retainDuration time.Duration lastRotated time.Time retainedCachedObjs []cacheObj // failureFunc is injectable for unit testing. If you don't have it, the process will panic. // This panic is intentional, since turning on this detection indicates you want a strong // failure signal. This failure is effectively a p0 bug and you can't trust process results // after a mutation anyway. failureFunc func(message string) } // cacheObj holds the actual object and a copy type cacheObj struct { cached interface{} copied interface{} } func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) { // we DON'T want protection from panics. If we're running this code, we want to die for { if d.lastRotated.IsZero() { d.lastRotated = time.Now() } else if time.Since(d.lastRotated) > d.retainDuration { d.retainedCachedObjs = d.cachedObjs d.cachedObjs = nil d.lastRotated = time.Now() } d.CompareObjects() select { case <-stopCh: return case <-time.After(d.period): } } } // AddObject makes a deep copy of the object for later comparison. It only works on runtime.Object // but that covers the vast majority of our cached objects func (d *defaultCacheMutationDetector) AddObject(obj interface{}) { if _, ok := obj.(DeletedFinalStateUnknown); ok { return } if obj, ok := obj.(runtime.Object); ok { copiedObj := obj.DeepCopyObject() d.addedObjsLock.Lock() defer d.addedObjsLock.Unlock() d.addedObjs = append(d.addedObjs, cacheObj{cached: obj, copied: copiedObj}) } } func (d *defaultCacheMutationDetector) CompareObjects() { d.compareObjectsLock.Lock() defer d.compareObjectsLock.Unlock() // move addedObjs into cachedObjs under lock // this keeps the critical section small to avoid blocking AddObject while we compare cachedObjs d.addedObjsLock.Lock() d.cachedObjs = append(d.cachedObjs, d.addedObjs...) d.addedObjs = nil d.addedObjsLock.Unlock() altered := false for i, obj := range d.cachedObjs { if !reflect.DeepEqual(obj.cached, obj.copied) { fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied)) altered = true } } for i, obj := range d.retainedCachedObjs { if !reflect.DeepEqual(obj.cached, obj.copied) { fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied)) altered = true } } if altered { msg := fmt.Sprintf("cache %s modified", d.name) if d.failureFunc != nil { d.failureFunc(msg) return } panic(msg) } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/store.go
vendor/k8s.io/client-go/tools/cache/store.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "fmt" "strings" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Store is a generic object storage and processing interface. A // Store holds a map from string keys to accumulators, and has // operations to add, update, and delete a given object to/from the // accumulator currently associated with a given key. A Store also // knows how to extract the key from a given object, so many operations // are given only the object. // // In the simplest Store implementations each accumulator is simply // the last given object, or empty after Delete, and thus the Store's // behavior is simple storage. // // Reflector knows how to watch a server and update a Store. This // package provides a variety of implementations of Store. type Store interface { // Add adds the given object to the accumulator associated with the given object's key Add(obj interface{}) error // Update updates the given object in the accumulator associated with the given object's key Update(obj interface{}) error // Delete deletes the given object from the accumulator associated with the given object's key Delete(obj interface{}) error // List returns a list of all the currently non-empty accumulators List() []interface{} // ListKeys returns a list of all the keys currently associated with non-empty accumulators ListKeys() []string // Get returns the accumulator associated with the given object's key Get(obj interface{}) (item interface{}, exists bool, err error) // GetByKey returns the accumulator associated with the given key GetByKey(key string) (item interface{}, exists bool, err error) // Replace will delete the contents of the store, using instead the // given list. Store takes ownership of the list, you should not reference // it after calling this function. Replace([]interface{}, string) error // Resync is meaningless in the terms appearing here but has // meaning in some implementations that have non-trivial // additional behavior (e.g., DeltaFIFO). Resync() error } // KeyFunc knows how to make a key from an object. Implementations should be deterministic. type KeyFunc func(obj interface{}) (string, error) // KeyError will be returned any time a KeyFunc gives an error; it includes the object // at fault. type KeyError struct { Obj interface{} Err error } // Error gives a human-readable description of the error. func (k KeyError) Error() string { return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err) } // Unwrap implements errors.Unwrap func (k KeyError) Unwrap() error { return k.Err } // ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for // the object but not the object itself. type ExplicitKey string // MetaNamespaceKeyFunc is a convenient default KeyFunc which knows how to make // keys for API objects which implement meta.Interface. // The key uses the format <namespace>/<name> unless <namespace> is empty, then // it's just <name>. // // Clients that want a structured alternative can use ObjectToName or MetaObjectToName. // Note: this would not be a client that wants a key for a Store because those are // necessarily strings. // // TODO maybe some day?: change Store to be keyed differently func MetaNamespaceKeyFunc(obj interface{}) (string, error) { if key, ok := obj.(ExplicitKey); ok { return string(key), nil } objName, err := ObjectToName(obj) if err != nil { return "", err } return objName.String(), nil } // ObjectToName returns the structured name for the given object, // if indeed it can be viewed as a metav1.Object. func ObjectToName(obj interface{}) (ObjectName, error) { meta, err := meta.Accessor(obj) if err != nil { return ObjectName{}, fmt.Errorf("object has no meta: %v", err) } return MetaObjectToName(meta), nil } // MetaObjectToName returns the structured name for the given object func MetaObjectToName(obj metav1.Object) ObjectName { if len(obj.GetNamespace()) > 0 { return ObjectName{Namespace: obj.GetNamespace(), Name: obj.GetName()} } return ObjectName{Namespace: "", Name: obj.GetName()} } // SplitMetaNamespaceKey returns the namespace and name that // MetaNamespaceKeyFunc encoded into key. // // TODO: replace key-as-string with a key-as-struct so that this // packing/unpacking won't be necessary. func SplitMetaNamespaceKey(key string) (namespace, name string, err error) { parts := strings.Split(key, "/") switch len(parts) { case 1: // name only, no namespace return "", parts[0], nil case 2: // namespace and name return parts[0], parts[1], nil } return "", "", fmt.Errorf("unexpected key format: %q", key) } // `*cache` implements Indexer in terms of a ThreadSafeStore and an // associated KeyFunc. type cache struct { // cacheStorage bears the burden of thread safety for the cache cacheStorage ThreadSafeStore // keyFunc is used to make the key for objects stored in and retrieved from items, and // should be deterministic. keyFunc KeyFunc } var _ Store = &cache{} // Add inserts an item into the cache. func (c *cache) Add(obj interface{}) error { key, err := c.keyFunc(obj) if err != nil { return KeyError{obj, err} } c.cacheStorage.Add(key, obj) return nil } // Update sets an item in the cache to its updated state. func (c *cache) Update(obj interface{}) error { key, err := c.keyFunc(obj) if err != nil { return KeyError{obj, err} } c.cacheStorage.Update(key, obj) return nil } // Delete removes an item from the cache. func (c *cache) Delete(obj interface{}) error { key, err := c.keyFunc(obj) if err != nil { return KeyError{obj, err} } c.cacheStorage.Delete(key) return nil } // List returns a list of all the items. // List is completely threadsafe as long as you treat all items as immutable. func (c *cache) List() []interface{} { return c.cacheStorage.List() } // ListKeys returns a list of all the keys of the objects currently // in the cache. func (c *cache) ListKeys() []string { return c.cacheStorage.ListKeys() } // GetIndexers returns the indexers of cache func (c *cache) GetIndexers() Indexers { return c.cacheStorage.GetIndexers() } // Index returns a list of items that match on the index function // Index is thread-safe so long as you treat all items as immutable func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) { return c.cacheStorage.Index(indexName, obj) } // IndexKeys returns the storage keys of the stored objects whose set of // indexed values for the named index includes the given indexed value. // The returned keys are suitable to pass to GetByKey(). func (c *cache) IndexKeys(indexName, indexedValue string) ([]string, error) { return c.cacheStorage.IndexKeys(indexName, indexedValue) } // ListIndexFuncValues returns the list of generated values of an Index func func (c *cache) ListIndexFuncValues(indexName string) []string { return c.cacheStorage.ListIndexFuncValues(indexName) } // ByIndex returns the stored objects whose set of indexed values // for the named index includes the given indexed value. func (c *cache) ByIndex(indexName, indexedValue string) ([]interface{}, error) { return c.cacheStorage.ByIndex(indexName, indexedValue) } func (c *cache) AddIndexers(newIndexers Indexers) error { return c.cacheStorage.AddIndexers(newIndexers) } // Get returns the requested item, or sets exists=false. // Get is completely threadsafe as long as you treat all items as immutable. func (c *cache) Get(obj interface{}) (item interface{}, exists bool, err error) { key, err := c.keyFunc(obj) if err != nil { return nil, false, KeyError{obj, err} } return c.GetByKey(key) } // GetByKey returns the request item, or exists=false. // GetByKey is completely threadsafe as long as you treat all items as immutable. func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) { item, exists = c.cacheStorage.Get(key) return item, exists, nil } // Replace will delete the contents of 'c', using instead the given list. // 'c' takes ownership of the list, you should not reference the list again // after calling this function. func (c *cache) Replace(list []interface{}, resourceVersion string) error { items := make(map[string]interface{}, len(list)) for _, item := range list { key, err := c.keyFunc(item) if err != nil { return KeyError{item, err} } items[key] = item } c.cacheStorage.Replace(items, resourceVersion) return nil } // Resync is meaningless for one of these func (c *cache) Resync() error { return nil } // NewStore returns a Store implemented simply with a map and a lock. func NewStore(keyFunc KeyFunc) Store { return &cache{ cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), keyFunc: keyFunc, } } // NewIndexer returns an Indexer implemented simply with a map and a lock. func NewIndexer(keyFunc KeyFunc, indexers Indexers) Indexer { return &cache{ cacheStorage: NewThreadSafeStore(indexers, Indices{}), keyFunc: keyFunc, } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "errors" "fmt" "sync" "time" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" utiltrace "k8s.io/utils/trace" ) // DeltaFIFOOptions is the configuration parameters for DeltaFIFO. All are // optional. type DeltaFIFOOptions struct { // KeyFunction is used to figure out what key an object should have. (It's // exposed in the returned DeltaFIFO's KeyOf() method, with additional // handling around deleted objects and queue state). // Optional, the default is MetaNamespaceKeyFunc. KeyFunction KeyFunc // KnownObjects is expected to return a list of keys that the consumer of // this queue "knows about". It is used to decide which items are missing // when Replace() is called; 'Deleted' deltas are produced for the missing items. // KnownObjects may be nil if you can tolerate missing deletions on Replace(). KnownObjects KeyListerGetter // EmitDeltaTypeReplaced indicates that the queue consumer // understands the Replaced DeltaType. Before the `Replaced` event type was // added, calls to Replace() were handled the same as Sync(). For // backwards-compatibility purposes, this is false by default. // When true, `Replaced` events will be sent for items passed to a Replace() call. // When false, `Sync` events will be sent instead. EmitDeltaTypeReplaced bool // If set, will be called for objects before enqueueing them. Please // see the comment on TransformFunc for details. Transformer TransformFunc } // DeltaFIFO is like FIFO, but differs in two ways. One is that the // accumulator associated with a given object's key is not that object // but rather a Deltas, which is a slice of Delta values for that // object. Applying an object to a Deltas means to append a Delta // except when the potentially appended Delta is a Deleted and the // Deltas already ends with a Deleted. In that case the Deltas does // not grow, although the terminal Deleted will be replaced by the new // Deleted if the older Deleted's object is a // DeletedFinalStateUnknown. // // The other difference is that DeltaFIFO has two additional ways that // an object can be applied to an accumulator: Replaced and Sync. // If EmitDeltaTypeReplaced is not set to true, Sync will be used in // replace events for backwards compatibility. Sync is used for periodic // resync events. // // DeltaFIFO is a producer-consumer queue, where a Reflector is // intended to be the producer, and the consumer is whatever calls // the Pop() method. // // DeltaFIFO solves this use case: // - You want to process every object change (delta) at most once. // - When you process an object, you want to see everything // that's happened to it since you last processed it. // - You want to process the deletion of some of the objects. // - You might want to periodically reprocess objects. // // DeltaFIFO's Pop(), Get(), and GetByKey() methods return // interface{} to satisfy the Store/Queue interfaces, but they // will always return an object of type Deltas. List() returns // the newest object from each accumulator in the FIFO. // // A DeltaFIFO's knownObjects KeyListerGetter provides the abilities // to list Store keys and to get objects by Store key. The objects in // question are called "known objects" and this set of objects // modifies the behavior of the Delete, Replace, and Resync methods // (each in a different way). // // A note on threading: If you call Pop() in parallel from multiple // threads, you could end up with multiple threads processing slightly // different versions of the same object. type DeltaFIFO struct { // lock/cond protects access to 'items' and 'queue'. lock sync.RWMutex cond sync.Cond // `items` maps a key to a Deltas. // Each such Deltas has at least one Delta. items map[string]Deltas // `queue` maintains FIFO order of keys for consumption in Pop(). // There are no duplicates in `queue`. // A key is in `queue` if and only if it is in `items`. queue []string // populated is true if the first batch of items inserted by Replace() has been populated // or Delete/Add/Update/AddIfNotPresent was called first. populated bool // initialPopulationCount is the number of items inserted by the first call of Replace() initialPopulationCount int // keyFunc is used to make the key used for queued item // insertion and retrieval, and should be deterministic. keyFunc KeyFunc // knownObjects list keys that are "known" --- affecting Delete(), // Replace(), and Resync() knownObjects KeyListerGetter // Used to indicate a queue is closed so a control loop can exit when a queue is empty. // Currently, not used to gate any of CRUD operations. closed bool // emitDeltaTypeReplaced is whether to emit the Replaced or Sync // DeltaType when Replace() is called (to preserve backwards compat). emitDeltaTypeReplaced bool // Called with every object if non-nil. transformer TransformFunc } // TransformFunc allows for transforming an object before it will be processed. // // The most common usage pattern is to clean-up some parts of the object to // reduce component memory usage if a given component doesn't care about them. // // New in v1.27: TransformFunc sees the object before any other actor, and it // is now safe to mutate the object in place instead of making a copy. // // It's recommended for the TransformFunc to be idempotent. // It MUST be idempotent if objects already present in the cache are passed to // the Replace() to avoid re-mutating them. Default informers do not pass // existing objects to Replace though. // // Note that TransformFunc is called while inserting objects into the // notification queue and is therefore extremely performance sensitive; please // do not do anything that will take a long time. type TransformFunc func(interface{}) (interface{}, error) // DeltaType is the type of a change (addition, deletion, etc) type DeltaType string // Change type definition const ( Added DeltaType = "Added" Updated DeltaType = "Updated" Deleted DeltaType = "Deleted" // Replaced is emitted when we encountered watch errors and had to do a // relist. We don't know if the replaced object has changed. // // NOTE: Previous versions of DeltaFIFO would use Sync for Replace events // as well. Hence, Replaced is only emitted when the option // EmitDeltaTypeReplaced is true. Replaced DeltaType = "Replaced" // Sync is for synthetic events during a periodic resync. Sync DeltaType = "Sync" ) // Delta is a member of Deltas (a list of Delta objects) which // in its turn is the type stored by a DeltaFIFO. It tells you what // change happened, and the object's state after* that change. // // [*] Unless the change is a deletion, and then you'll get the final // state of the object before it was deleted. type Delta struct { Type DeltaType Object interface{} } // Deltas is a list of one or more 'Delta's to an individual object. // The oldest delta is at index 0, the newest delta is the last one. type Deltas []Delta // NewDeltaFIFO returns a Queue which can be used to process changes to items. // // keyFunc is used to figure out what key an object should have. (It is // exposed in the returned DeltaFIFO's KeyOf() method, with additional handling // around deleted objects and queue state). // // 'knownObjects' may be supplied to modify the behavior of Delete, // Replace, and Resync. It may be nil if you do not need those // modifications. // // TODO: consider merging keyLister with this object, tracking a list of // "known" keys when Pop() is called. Have to think about how that // affects error retrying. // // NOTE: It is possible to misuse this and cause a race when using an // external known object source. // Whether there is a potential race depends on how the consumer // modifies knownObjects. In Pop(), process function is called under // lock, so it is safe to update data structures in it that need to be // in sync with the queue (e.g. knownObjects). // // Example: // In case of sharedIndexInformer being a consumer // (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192), // there is no race as knownObjects (s.indexer) is modified safely // under DeltaFIFO's lock. The only exceptions are GetStore() and // GetIndexer() methods, which expose ways to modify the underlying // storage. Currently these two methods are used for creating Lister // and internal tests. // // Also see the comment on DeltaFIFO. // // Warning: This constructs a DeltaFIFO that does not differentiate between // events caused by a call to Replace (e.g., from a relist, which may // contain object updates), and synthetic events caused by a periodic resync // (which just emit the existing object). See https://issue.k8s.io/86015 for details. // // Use `NewDeltaFIFOWithOptions(DeltaFIFOOptions{..., EmitDeltaTypeReplaced: true})` // instead to receive a `Replaced` event depending on the type. // // Deprecated: Equivalent to NewDeltaFIFOWithOptions(DeltaFIFOOptions{KeyFunction: keyFunc, KnownObjects: knownObjects}) func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO { return NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KeyFunction: keyFunc, KnownObjects: knownObjects, }) } // NewDeltaFIFOWithOptions returns a Queue which can be used to process changes to // items. See also the comment on DeltaFIFO. func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO { if opts.KeyFunction == nil { opts.KeyFunction = MetaNamespaceKeyFunc } f := &DeltaFIFO{ items: map[string]Deltas{}, queue: []string{}, keyFunc: opts.KeyFunction, knownObjects: opts.KnownObjects, emitDeltaTypeReplaced: opts.EmitDeltaTypeReplaced, transformer: opts.Transformer, } f.cond.L = &f.lock return f } var ( _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue ) var ( // ErrZeroLengthDeltasObject is returned in a KeyError if a Deltas // object with zero length is encountered (should be impossible, // but included for completeness). ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key") ) // Close the queue. func (f *DeltaFIFO) Close() { f.lock.Lock() defer f.lock.Unlock() f.closed = true f.cond.Broadcast() } // KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or // DeletedFinalStateUnknown objects. func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { if d, ok := obj.(Deltas); ok { if len(d) == 0 { return "", KeyError{obj, ErrZeroLengthDeltasObject} } obj = d.Newest().Object } if d, ok := obj.(DeletedFinalStateUnknown); ok { return d.Key, nil } return f.keyFunc(obj) } // HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, // or the first batch of items inserted by Replace() has been popped. func (f *DeltaFIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() return f.hasSynced_locked() } func (f *DeltaFIFO) hasSynced_locked() bool { return f.populated && f.initialPopulationCount == 0 } // Add inserts an item, and puts it in the queue. The item is only enqueued // if it doesn't already exist in the set. func (f *DeltaFIFO) Add(obj interface{}) error { f.lock.Lock() defer f.lock.Unlock() f.populated = true return f.queueActionLocked(Added, obj) } // Update is just like Add, but makes an Updated Delta. func (f *DeltaFIFO) Update(obj interface{}) error { f.lock.Lock() defer f.lock.Unlock() f.populated = true return f.queueActionLocked(Updated, obj) } // Delete is just like Add, but makes a Deleted Delta. If the given // object does not already exist, it will be ignored. (It may have // already been deleted by a Replace (re-list), for example.) In this // method `f.knownObjects`, if not nil, provides (via GetByKey) // _additional_ objects that are considered to already exist. func (f *DeltaFIFO) Delete(obj interface{}) error { id, err := f.KeyOf(obj) if err != nil { return KeyError{obj, err} } f.lock.Lock() defer f.lock.Unlock() f.populated = true if f.knownObjects == nil { if _, exists := f.items[id]; !exists { // Presumably, this was deleted when a relist happened. // Don't provide a second report of the same deletion. return nil } } else { // We only want to skip the "deletion" action if the object doesn't // exist in knownObjects and it doesn't have corresponding item in items. // Note that even if there is a "deletion" action in items, we can ignore it, // because it will be deduped automatically in "queueActionLocked" _, exists, err := f.knownObjects.GetByKey(id) _, itemsExist := f.items[id] if err == nil && !exists && !itemsExist { // Presumably, this was deleted when a relist happened. // Don't provide a second report of the same deletion. return nil } } // exist in items and/or KnownObjects return f.queueActionLocked(Deleted, obj) } // AddIfNotPresent inserts an item, and puts it in the queue. If the item is already // present in the set, it is neither enqueued nor added to the set. // // This is useful in a single producer/consumer scenario so that the consumer can // safely retry items without contending with the producer and potentially enqueueing // stale items. // // Important: obj must be a Deltas (the output of the Pop() function). Yes, this is // different from the Add/Update/Delete functions. func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error { deltas, ok := obj.(Deltas) if !ok { return fmt.Errorf("object must be of type deltas, but got: %#v", obj) } id, err := f.KeyOf(deltas) if err != nil { return KeyError{obj, err} } f.lock.Lock() defer f.lock.Unlock() f.addIfNotPresent(id, deltas) return nil } // addIfNotPresent inserts deltas under id if it does not exist, and assumes the caller // already holds the fifo lock. func (f *DeltaFIFO) addIfNotPresent(id string, deltas Deltas) { f.populated = true if _, exists := f.items[id]; exists { return } f.queue = append(f.queue, id) f.items[id] = deltas f.cond.Broadcast() } // re-listing and watching can deliver the same update multiple times in any // order. This will combine the most recent two deltas if they are the same. func dedupDeltas(deltas Deltas) Deltas { n := len(deltas) if n < 2 { return deltas } a := &deltas[n-1] b := &deltas[n-2] if out := isDup(a, b); out != nil { deltas[n-2] = *out return deltas[:n-1] } return deltas } // If a & b represent the same event, returns the delta that ought to be kept. // Otherwise, returns nil. // TODO: is there anything other than deletions that need deduping? func isDup(a, b *Delta) *Delta { if out := isDeletionDup(a, b); out != nil { return out } // TODO: Detect other duplicate situations? Are there any? return nil } // keep the one with the most information if both are deletions. func isDeletionDup(a, b *Delta) *Delta { if b.Type != Deleted || a.Type != Deleted { return nil } // Do more sophisticated checks, or is this sufficient? if _, ok := b.Object.(DeletedFinalStateUnknown); ok { return a } return b } // queueActionLocked appends to the delta list for the object. // Caller must lock first. func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { return f.queueActionInternalLocked(actionType, actionType, obj) } // queueActionInternalLocked appends to the delta list for the object. // The actionType is emitted and must honor emitDeltaTypeReplaced. // The internalActionType is only used within this function and must // ignore emitDeltaTypeReplaced. // Caller must lock first. func (f *DeltaFIFO) queueActionInternalLocked(actionType, internalActionType DeltaType, obj interface{}) error { id, err := f.KeyOf(obj) if err != nil { return KeyError{obj, err} } // Every object comes through this code path once, so this is a good // place to call the transform func. // // If obj is a DeletedFinalStateUnknown tombstone or the action is a Sync, // then the object have already gone through the transformer. // // If the objects already present in the cache are passed to Replace(), // the transformer must be idempotent to avoid re-mutating them, // or coordinate with all readers from the cache to avoid data races. // Default informers do not pass existing objects to Replace. if f.transformer != nil { _, isTombstone := obj.(DeletedFinalStateUnknown) if !isTombstone && internalActionType != Sync { var err error obj, err = f.transformer(obj) if err != nil { return err } } } oldDeltas := f.items[id] newDeltas := append(oldDeltas, Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) if len(newDeltas) > 0 { if _, exists := f.items[id]; !exists { f.queue = append(f.queue, id) } f.items[id] = newDeltas f.cond.Broadcast() } else { // This never happens, because dedupDeltas never returns an empty list // when given a non-empty list (as it is here). // If somehow it happens anyway, deal with it but complain. if oldDeltas == nil { klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; ignoring", id, oldDeltas, obj) return nil } klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; breaking invariant by storing empty Deltas", id, oldDeltas, obj) f.items[id] = newDeltas return fmt.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; broke DeltaFIFO invariant by storing empty Deltas", id, oldDeltas, obj) } return nil } // List returns a list of all the items; it returns the object // from the most recent Delta. // You should treat the items returned inside the deltas as immutable. func (f *DeltaFIFO) List() []interface{} { f.lock.RLock() defer f.lock.RUnlock() return f.listLocked() } func (f *DeltaFIFO) listLocked() []interface{} { list := make([]interface{}, 0, len(f.items)) for _, item := range f.items { list = append(list, item.Newest().Object) } return list } // ListKeys returns a list of all the keys of the objects currently // in the FIFO. func (f *DeltaFIFO) ListKeys() []string { f.lock.RLock() defer f.lock.RUnlock() list := make([]string, 0, len(f.queue)) for _, key := range f.queue { list = append(list, key) } return list } // Get returns the complete list of deltas for the requested item, // or sets exists=false. // You should treat the items returned inside the deltas as immutable. func (f *DeltaFIFO) Get(obj interface{}) (item interface{}, exists bool, err error) { key, err := f.KeyOf(obj) if err != nil { return nil, false, KeyError{obj, err} } return f.GetByKey(key) } // GetByKey returns the complete list of deltas for the requested item, // setting exists=false if that list is empty. // You should treat the items returned inside the deltas as immutable. func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err error) { f.lock.RLock() defer f.lock.RUnlock() d, exists := f.items[key] if exists { // Copy item's slice so operations on this slice // won't interfere with the object we return. d = copyDeltas(d) } return d, exists, nil } // IsClosed checks if the queue is closed func (f *DeltaFIFO) IsClosed() bool { f.lock.Lock() defer f.lock.Unlock() return f.closed } // Pop blocks until the queue has some items, and then returns one. If // multiple items are ready, they are returned in the order in which they were // added/updated. The item is removed from the queue (and the store) before it // is returned, so if you don't successfully process it, you need to add it back // with AddIfNotPresent(). // process function is called under lock, so it is safe to update data structures // in it that need to be in sync with the queue (e.g. knownKeys). The PopProcessFunc // may return an instance of ErrRequeue with a nested error to indicate the current // item should be requeued (equivalent to calling AddIfNotPresent under the lock). // process should avoid expensive I/O operation so that other queue operations, i.e. // Add() and Get(), won't be blocked for too long. // // Pop returns a 'Deltas', which has a complete list of all the things // that happened to the object (deltas) while it was sitting in the queue. func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { f.lock.Lock() defer f.lock.Unlock() for { for len(f.queue) == 0 { // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. // When Close() is called, the f.closed is set and the condition is broadcasted. // Which causes this loop to continue and return from the Pop(). if f.closed { return nil, ErrFIFOClosed } f.cond.Wait() } isInInitialList := !f.hasSynced_locked() id := f.queue[0] f.queue = f.queue[1:] depth := len(f.queue) if f.initialPopulationCount > 0 { f.initialPopulationCount-- } item, ok := f.items[id] if !ok { // This should never happen klog.Errorf("Inconceivable! %q was in f.queue but not f.items; ignoring.", id) continue } delete(f.items, id) // Only log traces if the queue depth is greater than 10 and it takes more than // 100 milliseconds to process one item from the queue. // Queue depth never goes high because processing an item is locking the queue, // and new items can't be added until processing finish. // https://github.com/kubernetes/kubernetes/issues/103789 if depth > 10 { trace := utiltrace.New("DeltaFIFO Pop Process", utiltrace.Field{Key: "ID", Value: id}, utiltrace.Field{Key: "Depth", Value: depth}, utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"}) defer trace.LogIfLong(100 * time.Millisecond) } err := process(item, isInInitialList) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) err = e.Err } // Don't need to copyDeltas here, because we're transferring // ownership to the caller. return item, err } } // Replace atomically does two things: (1) it adds the given objects // using the Sync or Replace DeltaType and then (2) it does some deletions. // In particular: for every pre-existing key K that is not the key of // an object in `list` there is the effect of // `Delete(DeletedFinalStateUnknown{K, O})` where O is the latest known // object of K. The pre-existing keys are those in the union set of the keys in // `f.items` and `f.knownObjects` (if not nil). The last known object for key K is // the one present in the last delta in `f.items`. If there is no delta for K // in `f.items`, it is the object in `f.knownObjects` func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { f.lock.Lock() defer f.lock.Unlock() keys := make(sets.String, len(list)) // keep backwards compat for old clients action := Sync if f.emitDeltaTypeReplaced { action = Replaced } // Add Sync/Replaced action for each new item. for _, item := range list { key, err := f.KeyOf(item) if err != nil { return KeyError{item, err} } keys.Insert(key) if err := f.queueActionInternalLocked(action, Replaced, item); err != nil { return fmt.Errorf("couldn't enqueue object: %v", err) } } // Do deletion detection against objects in the queue queuedDeletions := 0 for k, oldItem := range f.items { if keys.Has(k) { continue } // Delete pre-existing items not in the new list. // This could happen if watch deletion event was missed while // disconnected from apiserver. var deletedObj interface{} if n := oldItem.Newest(); n != nil { deletedObj = n.Object // if the previous object is a DeletedFinalStateUnknown, we have to extract the actual Object if d, ok := deletedObj.(DeletedFinalStateUnknown); ok { deletedObj = d.Obj } } queuedDeletions++ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { return err } } if f.knownObjects != nil { // Detect deletions for objects not present in the queue, but present in KnownObjects knownKeys := f.knownObjects.ListKeys() for _, k := range knownKeys { if keys.Has(k) { continue } if len(f.items[k]) > 0 { continue } deletedObj, exists, err := f.knownObjects.GetByKey(k) if err != nil { deletedObj = nil klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) } else if !exists { deletedObj = nil klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) } queuedDeletions++ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { return err } } } if !f.populated { f.populated = true f.initialPopulationCount = keys.Len() + queuedDeletions } return nil } // Resync adds, with a Sync type of Delta, every object listed by // `f.knownObjects` whose key is not already queued for processing. // If `f.knownObjects` is `nil` then Resync does nothing. func (f *DeltaFIFO) Resync() error { f.lock.Lock() defer f.lock.Unlock() if f.knownObjects == nil { return nil } keys := f.knownObjects.ListKeys() for _, k := range keys { if err := f.syncKeyLocked(k); err != nil { return err } } return nil } func (f *DeltaFIFO) syncKeyLocked(key string) error { obj, exists, err := f.knownObjects.GetByKey(key) if err != nil { klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) return nil } else if !exists { klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) return nil } // If we are doing Resync() and there is already an event queued for that object, // we ignore the Resync for it. This is to avoid the race, in which the resync // comes with the previous value of object (since queueing an event for the object // doesn't trigger changing the underlying store <knownObjects>. id, err := f.KeyOf(obj) if err != nil { return KeyError{obj, err} } if len(f.items[id]) > 0 { return nil } if err := f.queueActionLocked(Sync, obj); err != nil { return fmt.Errorf("couldn't queue object: %v", err) } return nil } // A KeyListerGetter is anything that knows how to list its keys and look up by key. type KeyListerGetter interface { KeyLister KeyGetter } // A KeyLister is anything that knows how to list its keys. type KeyLister interface { ListKeys() []string } // A KeyGetter is anything that knows how to get the value stored under a given key. type KeyGetter interface { // GetByKey returns the value associated with the key, or sets exists=false. GetByKey(key string) (value interface{}, exists bool, err error) } // Oldest is a convenience function that returns the oldest delta, or // nil if there are no deltas. func (d Deltas) Oldest() *Delta { if len(d) > 0 { return &d[0] } return nil } // Newest is a convenience function that returns the newest delta, or // nil if there are no deltas. func (d Deltas) Newest() *Delta { if n := len(d); n > 0 { return &d[n-1] } return nil } // copyDeltas returns a shallow copy of d; that is, it copies the slice but not // the objects in the slice. This allows Get/List to return an object that we // know won't be clobbered by a subsequent modifications. func copyDeltas(d Deltas) Deltas { d2 := make(Deltas, len(d)) copy(d2, d) return d2 } // DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where an object // was deleted but the watch deletion event was missed while disconnected from // apiserver. In this case we don't know the final "resting" state of the object, so // there's a chance the included `Obj` is stale. type DeletedFinalStateUnknown struct { Key string Obj interface{} }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/controller.go
vendor/k8s.io/client-go/tools/cache/controller.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "errors" "sync" "time" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/clock" ) // This file implements a low-level controller that is used in // sharedIndexInformer, which is an implementation of // SharedIndexInformer. Such informers, in turn, are key components // in the high level controllers that form the backbone of the // Kubernetes control plane. Look at those for examples, or the // example in // https://github.com/kubernetes/client-go/tree/master/examples/workqueue // . // Config contains all the settings for one of these low-level controllers. type Config struct { // The queue for your objects - has to be a DeltaFIFO due to // assumptions in the implementation. Your Process() function // should accept the output of this Queue's Pop() method. Queue // Something that can list and watch your objects. ListerWatcher // Something that can process a popped Deltas. Process ProcessFunc // ObjectType is an example object of the type this controller is // expected to handle. ObjectType runtime.Object // ObjectDescription is the description to use when logging type-specific information about this controller. ObjectDescription string // FullResyncPeriod is the period at which ShouldResync is considered. FullResyncPeriod time.Duration // MinWatchTimeout, if set, will define the minimum timeout for watch requests send // to kube-apiserver. However, values lower than 5m will not be honored to avoid // negative performance impact on controlplane. // Optional - if unset a default value of 5m will be used. MinWatchTimeout time.Duration // ShouldResync is periodically used by the reflector to determine // whether to Resync the Queue. If ShouldResync is `nil` or // returns true, it means the reflector should proceed with the // resync. ShouldResync ShouldResyncFunc // If true, when Process() returns an error, re-enqueue the object. // TODO: add interface to let you inject a delay/backoff or drop // the object completely if desired. Pass the object in // question to this interface as a parameter. This is probably moot // now that this functionality appears at a higher level. RetryOnError bool // Called whenever the ListAndWatch drops the connection with an error. WatchErrorHandler WatchErrorHandler // WatchListPageSize is the requested chunk size of initial and relist watch lists. WatchListPageSize int64 } // ShouldResyncFunc is a type of function that indicates if a reflector should perform a // resync or not. It can be used by a shared informer to support multiple event handlers with custom // resync periods. type ShouldResyncFunc func() bool // ProcessFunc processes a single object. type ProcessFunc func(obj interface{}, isInInitialList bool) error // `*controller` implements Controller type controller struct { config Config reflector *Reflector reflectorMutex sync.RWMutex clock clock.Clock } // Controller is a low-level controller that is parameterized by a // Config and used in sharedIndexInformer. type Controller interface { // Run does two things. One is to construct and run a Reflector // to pump objects/notifications from the Config's ListerWatcher // to the Config's Queue and possibly invoke the occasional Resync // on that Queue. The other is to repeatedly Pop from the Queue // and process with the Config's ProcessFunc. Both of these // continue until `stopCh` is closed. Run(stopCh <-chan struct{}) // HasSynced delegates to the Config's Queue HasSynced() bool // LastSyncResourceVersion delegates to the Reflector when there // is one, otherwise returns the empty string LastSyncResourceVersion() string } // New makes a new Controller from the given Config. func New(c *Config) Controller { ctlr := &controller{ config: *c, clock: &clock.RealClock{}, } return ctlr } // Run begins processing items, and will continue until a value is sent down stopCh or it is closed. // It's an error to call Run more than once. // Run blocks; call via go. func (c *controller) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() go func() { <-stopCh c.config.Queue.Close() }() r := NewReflectorWithOptions( c.config.ListerWatcher, c.config.ObjectType, c.config.Queue, ReflectorOptions{ ResyncPeriod: c.config.FullResyncPeriod, MinWatchTimeout: c.config.MinWatchTimeout, TypeDescription: c.config.ObjectDescription, Clock: c.clock, }, ) r.ShouldResync = c.config.ShouldResync r.WatchListPageSize = c.config.WatchListPageSize if c.config.WatchErrorHandler != nil { r.watchErrorHandler = c.config.WatchErrorHandler } c.reflectorMutex.Lock() c.reflector = r c.reflectorMutex.Unlock() var wg wait.Group wg.StartWithChannel(stopCh, r.Run) wait.Until(c.processLoop, time.Second, stopCh) wg.Wait() } // Returns true once this controller has completed an initial resource listing func (c *controller) HasSynced() bool { return c.config.Queue.HasSynced() } func (c *controller) LastSyncResourceVersion() string { c.reflectorMutex.RLock() defer c.reflectorMutex.RUnlock() if c.reflector == nil { return "" } return c.reflector.LastSyncResourceVersion() } // processLoop drains the work queue. // TODO: Consider doing the processing in parallel. This will require a little thought // to make sure that we don't end up processing the same object multiple times // concurrently. // // TODO: Plumb through the stopCh here (and down to the queue) so that this can // actually exit when the controller is stopped. Or just give up on this stuff // ever being stoppable. Converting this whole package to use Context would // also be helpful. func (c *controller) processLoop() { for { obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process)) if err != nil { if err == ErrFIFOClosed { return } if c.config.RetryOnError { // This is the safe way to re-enqueue. c.config.Queue.AddIfNotPresent(obj) } } } } // ResourceEventHandler can handle notifications for events that // happen to a resource. The events are informational only, so you // can't return an error. The handlers MUST NOT modify the objects // received; this concerns not only the top level of structure but all // the data structures reachable from it. // - OnAdd is called when an object is added. // - OnUpdate is called when an object is modified. Note that oldObj is the // last known state of the object-- it is possible that several changes // were combined together, so you can't use this to see every single // change. OnUpdate is also called when a re-list happens, and it will // get called even if nothing changed. This is useful for periodically // evaluating or syncing something. // - OnDelete will get the final state of the item if it is known, otherwise // it will get an object of type DeletedFinalStateUnknown. This can // happen if the watch is closed and misses the delete event and we don't // notice the deletion until the subsequent re-list. type ResourceEventHandler interface { OnAdd(obj interface{}, isInInitialList bool) OnUpdate(oldObj, newObj interface{}) OnDelete(obj interface{}) } // ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or // as few of the notification functions as you want while still implementing // ResourceEventHandler. This adapter does not remove the prohibition against // modifying the objects. // // See ResourceEventHandlerDetailedFuncs if your use needs to propagate // HasSynced. type ResourceEventHandlerFuncs struct { AddFunc func(obj interface{}) UpdateFunc func(oldObj, newObj interface{}) DeleteFunc func(obj interface{}) } // OnAdd calls AddFunc if it's not nil. func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}, isInInitialList bool) { if r.AddFunc != nil { r.AddFunc(obj) } } // OnUpdate calls UpdateFunc if it's not nil. func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) { if r.UpdateFunc != nil { r.UpdateFunc(oldObj, newObj) } } // OnDelete calls DeleteFunc if it's not nil. func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { if r.DeleteFunc != nil { r.DeleteFunc(obj) } } // ResourceEventHandlerDetailedFuncs is exactly like ResourceEventHandlerFuncs // except its AddFunc accepts the isInInitialList parameter, for propagating // HasSynced. type ResourceEventHandlerDetailedFuncs struct { AddFunc func(obj interface{}, isInInitialList bool) UpdateFunc func(oldObj, newObj interface{}) DeleteFunc func(obj interface{}) } // OnAdd calls AddFunc if it's not nil. func (r ResourceEventHandlerDetailedFuncs) OnAdd(obj interface{}, isInInitialList bool) { if r.AddFunc != nil { r.AddFunc(obj, isInInitialList) } } // OnUpdate calls UpdateFunc if it's not nil. func (r ResourceEventHandlerDetailedFuncs) OnUpdate(oldObj, newObj interface{}) { if r.UpdateFunc != nil { r.UpdateFunc(oldObj, newObj) } } // OnDelete calls DeleteFunc if it's not nil. func (r ResourceEventHandlerDetailedFuncs) OnDelete(obj interface{}) { if r.DeleteFunc != nil { r.DeleteFunc(obj) } } // FilteringResourceEventHandler applies the provided filter to all events coming // in, ensuring the appropriate nested handler method is invoked. An object // that starts passing the filter after an update is considered an add, and an // object that stops passing the filter after an update is considered a delete. // Like the handlers, the filter MUST NOT modify the objects it is given. type FilteringResourceEventHandler struct { FilterFunc func(obj interface{}) bool Handler ResourceEventHandler } // OnAdd calls the nested handler only if the filter succeeds func (r FilteringResourceEventHandler) OnAdd(obj interface{}, isInInitialList bool) { if !r.FilterFunc(obj) { return } r.Handler.OnAdd(obj, isInInitialList) } // OnUpdate ensures the proper handler is called depending on whether the filter matches func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) { newer := r.FilterFunc(newObj) older := r.FilterFunc(oldObj) switch { case newer && older: r.Handler.OnUpdate(oldObj, newObj) case newer && !older: r.Handler.OnAdd(newObj, false) case !newer && older: r.Handler.OnDelete(oldObj) default: // do nothing } } // OnDelete calls the nested handler only if the filter succeeds func (r FilteringResourceEventHandler) OnDelete(obj interface{}) { if !r.FilterFunc(obj) { return } r.Handler.OnDelete(obj) } // DeletionHandlingMetaNamespaceKeyFunc checks for // DeletedFinalStateUnknown objects before calling // MetaNamespaceKeyFunc. func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { if d, ok := obj.(DeletedFinalStateUnknown); ok { return d.Key, nil } return MetaNamespaceKeyFunc(obj) } // DeletionHandlingObjectToName checks for // DeletedFinalStateUnknown objects before calling // ObjectToName. func DeletionHandlingObjectToName(obj interface{}) (ObjectName, error) { if d, ok := obj.(DeletedFinalStateUnknown); ok { return ParseObjectName(d.Key) } return ObjectToName(obj) } // InformerOptions configure a Reflector. type InformerOptions struct { // ListerWatcher implements List and Watch functions for the source of the resource // the informer will be informing about. ListerWatcher ListerWatcher // ObjectType is an object of the type that informer is expected to receive. ObjectType runtime.Object // Handler defines functions that should called on object mutations. Handler ResourceEventHandler // ResyncPeriod is the underlying Reflector's resync period. If non-zero, the store // is re-synced with that frequency - Modify events are delivered even if objects // didn't change. // This is useful for synchronizing objects that configure external resources // (e.g. configure cloud provider functionalities). // Optional - if unset, store resyncing is not happening periodically. ResyncPeriod time.Duration // MinWatchTimeout, if set, will define the minimum timeout for watch requests send // to kube-apiserver. However, values lower than 5m will not be honored to avoid // negative performance impact on controlplane. // Optional - if unset a default value of 5m will be used. MinWatchTimeout time.Duration // Indexers, if set, are the indexers for the received objects to optimize // certain queries. // Optional - if unset no indexes are maintained. Indexers Indexers // Transform function, if set, will be called on all objects before they will be // put into the Store and corresponding Add/Modify/Delete handlers will be invoked // for them. // Optional - if unset no additional transforming is happening. Transform TransformFunc } // NewInformerWithOptions returns a Store and a controller for populating the store // while also providing event notifications. You should only used the returned // Store for Get/List operations; Add/Modify/Deletes will cause the event // notifications to be faulty. func NewInformerWithOptions(options InformerOptions) (Store, Controller) { var clientState Store if options.Indexers == nil { clientState = NewStore(DeletionHandlingMetaNamespaceKeyFunc) } else { clientState = NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers) } return clientState, newInformer(clientState, options) } // NewInformer returns a Store and a controller for populating the store // while also providing event notifications. You should only used the returned // Store for Get/List operations; Add/Modify/Deletes will cause the event // notifications to be faulty. // // Parameters: // - lw is list and watch functions for the source of the resource you want to // be informed of. // - objType is an object of the type that you expect to receive. // - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate // calls, even if nothing changed). Otherwise, re-list will be delayed as // long as possible (until the upstream source closes the watch or times out, // or you stop the controller). // - h is the object you want notifications sent to. // // Deprecated: Use NewInformerWithOptions instead. func NewInformer( lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, ) (Store, Controller) { // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) options := InformerOptions{ ListerWatcher: lw, ObjectType: objType, Handler: h, ResyncPeriod: resyncPeriod, } return clientState, newInformer(clientState, options) } // NewIndexerInformer returns an Indexer and a Controller for populating the index // while also providing event notifications. You should only used the returned // Index for Get/List operations; Add/Modify/Deletes will cause the event // notifications to be faulty. // // Parameters: // - lw is list and watch functions for the source of the resource you want to // be informed of. // - objType is an object of the type that you expect to receive. // - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate // calls, even if nothing changed). Otherwise, re-list will be delayed as // long as possible (until the upstream source closes the watch or times out, // or you stop the controller). // - h is the object you want notifications sent to. // - indexers is the indexer for the received object type. // // Deprecated: Use NewInformerWithOptions instead. func NewIndexerInformer( lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, indexers Indexers, ) (Indexer, Controller) { // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) options := InformerOptions{ ListerWatcher: lw, ObjectType: objType, Handler: h, ResyncPeriod: resyncPeriod, Indexers: indexers, } return clientState, newInformer(clientState, options) } // NewTransformingInformer returns a Store and a controller for populating // the store while also providing event notifications. You should only used // the returned Store for Get/List operations; Add/Modify/Deletes will cause // the event notifications to be faulty. // The given transform function will be called on all objects before they will // put into the Store and corresponding Add/Modify/Delete handlers will // be invoked for them. // // Deprecated: Use NewInformerWithOptions instead. func NewTransformingInformer( lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, transformer TransformFunc, ) (Store, Controller) { // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) options := InformerOptions{ ListerWatcher: lw, ObjectType: objType, Handler: h, ResyncPeriod: resyncPeriod, Transform: transformer, } return clientState, newInformer(clientState, options) } // NewTransformingIndexerInformer returns an Indexer and a controller for // populating the index while also providing event notifications. You should // only used the returned Index for Get/List operations; Add/Modify/Deletes // will cause the event notifications to be faulty. // The given transform function will be called on all objects before they will // be put into the Index and corresponding Add/Modify/Delete handlers will // be invoked for them. // // Deprecated: Use NewInformerWithOptions instead. func NewTransformingIndexerInformer( lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h ResourceEventHandler, indexers Indexers, transformer TransformFunc, ) (Indexer, Controller) { // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) options := InformerOptions{ ListerWatcher: lw, ObjectType: objType, Handler: h, ResyncPeriod: resyncPeriod, Indexers: indexers, Transform: transformer, } return clientState, newInformer(clientState, options) } // Multiplexes updates in the form of a list of Deltas into a Store, and informs // a given handler of events OnUpdate, OnAdd, OnDelete func processDeltas( // Object which receives event notifications from the given deltas handler ResourceEventHandler, clientState Store, deltas Deltas, isInInitialList bool, ) error { // from oldest to newest for _, d := range deltas { obj := d.Object switch d.Type { case Sync, Replaced, Added, Updated: if old, exists, err := clientState.Get(obj); err == nil && exists { if err := clientState.Update(obj); err != nil { return err } handler.OnUpdate(old, obj) } else { if err := clientState.Add(obj); err != nil { return err } handler.OnAdd(obj, isInInitialList) } case Deleted: if err := clientState.Delete(obj); err != nil { return err } handler.OnDelete(obj) } } return nil } // newInformer returns a controller for populating the store while also // providing event notifications. // // Parameters // - clientState is the store you want to populate // - options contain the options to configure the controller func newInformer(clientState Store, options InformerOptions) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: clientState, EmitDeltaTypeReplaced: true, Transformer: options.Transform, }) cfg := &Config{ Queue: fifo, ListerWatcher: options.ListerWatcher, ObjectType: options.ObjectType, FullResyncPeriod: options.ResyncPeriod, MinWatchTimeout: options.MinWatchTimeout, RetryOnError: false, Process: func(obj interface{}, isInInitialList bool) error { if deltas, ok := obj.(Deltas); ok { return processDeltas(options.Handler, clientState, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") }, } return New(cfg) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/undelta_store.go
vendor/k8s.io/client-go/tools/cache/undelta_store.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache // UndeltaStore listens to incremental updates and sends complete state on every change. // It implements the Store interface so that it can receive a stream of mirrored objects // from Reflector. Whenever it receives any complete (Store.Replace) or incremental change // (Store.Add, Store.Update, Store.Delete), it sends the complete state by calling PushFunc. // It is thread-safe. It guarantees that every change (Add, Update, Replace, Delete) results // in one call to PushFunc, but sometimes PushFunc may be called twice with the same values. // PushFunc should be thread safe. type UndeltaStore struct { Store PushFunc func([]interface{}) } // Assert that it implements the Store interface. var _ Store = &UndeltaStore{} // Add inserts an object into the store and sends complete state by calling PushFunc. // Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods. // In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc} // and the List. So, the following can happen, resulting in two identical calls to PushFunc. // time thread 1 thread 2 // 0 UndeltaStore.Add(a) // 1 UndeltaStore.Add(b) // 2 Store.Add(a) // 3 Store.Add(b) // 4 Store.List() -> [a,b] // 5 Store.List() -> [a,b] func (u *UndeltaStore) Add(obj interface{}) error { if err := u.Store.Add(obj); err != nil { return err } u.PushFunc(u.Store.List()) return nil } // Update sets an item in the cache to its updated state and sends complete state by calling PushFunc. func (u *UndeltaStore) Update(obj interface{}) error { if err := u.Store.Update(obj); err != nil { return err } u.PushFunc(u.Store.List()) return nil } // Delete removes an item from the cache and sends complete state by calling PushFunc. func (u *UndeltaStore) Delete(obj interface{}) error { if err := u.Store.Delete(obj); err != nil { return err } u.PushFunc(u.Store.List()) return nil } // Replace will delete the contents of current store, using instead the given list. // 'u' takes ownership of the list, you should not reference the list again // after calling this function. // The new contents complete state will be sent by calling PushFunc after replacement. func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error { if err := u.Store.Replace(list, resourceVersion); err != nil { return err } u.PushFunc(u.Store.List()) return nil } // NewUndeltaStore returns an UndeltaStore implemented with a Store. func NewUndeltaStore(pushFunc func([]interface{}), keyFunc KeyFunc) *UndeltaStore { return &UndeltaStore{ Store: NewStore(keyFunc), PushFunc: pushFunc, } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
/* Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "context" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/util/consistencydetector" ) // checkWatchListDataConsistencyIfRequested performs a data consistency check only when // the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. // // The consistency check is meant to be enforced only in the CI, not in production. // The check ensures that data retrieved by the watch-list api call // is exactly the same as data received by the standard list api call against etcd. // // Note that this function will panic when data inconsistency is detected. // This is intentional because we want to catch it in the CI. func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) { if !consistencydetector.IsDataConsistencyDetectionForWatchListEnabled() { return } // for informers we pass an empty ListOptions because // listFn might be wrapped for filtering during informer construction. consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, metav1.ListOptions{}, retrieveItemsFn) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/doc.go
vendor/k8s.io/client-go/tools/cache/doc.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package cache is a client-side caching mechanism. It is useful for // reducing the number of server calls you'd otherwise need to make. // Reflector watches a server and updates a Store. Two stores are provided; // one that simply caches objects (for example, to allow a scheduler to // list currently available nodes), and one that additionally acts as // a FIFO queue (for example, to allow a scheduler to process incoming // pods). package cache // import "k8s.io/client-go/tools/cache"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/k8s.io/client-go/tools/cache/object-names.go
vendor/k8s.io/client-go/tools/cache/object-names.go
/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "k8s.io/apimachinery/pkg/types" ) // ObjectName is a reference to an object of some implicit kind type ObjectName struct { Namespace string Name string } // NewObjectName constructs a new one func NewObjectName(namespace, name string) ObjectName { return ObjectName{Namespace: namespace, Name: name} } // Parts is the inverse of the constructor func (objName ObjectName) Parts() (namespace, name string) { return objName.Namespace, objName.Name } // String returns the standard string encoding, // which is designed to match the historical behavior of MetaNamespaceKeyFunc. // Note this behavior is different from the String method of types.NamespacedName. func (objName ObjectName) String() string { if len(objName.Namespace) > 0 { return objName.Namespace + "/" + objName.Name } return objName.Name } // ParseObjectName tries to parse the standard encoding func ParseObjectName(str string) (ObjectName, error) { var objName ObjectName var err error objName.Namespace, objName.Name, err = SplitMetaNamespaceKey(str) return objName, err } // NamespacedNameAsObjectName rebrands the given NamespacedName as an ObjectName func NamespacedNameAsObjectName(nn types.NamespacedName) ObjectName { return NewObjectName(nn.Namespace, nn.Name) } // AsNamespacedName rebrands as a NamespacedName func (objName ObjectName) AsNamespacedName() types.NamespacedName { return types.NamespacedName{Namespace: objName.Namespace, Name: objName.Name} }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false