repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretspec.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretspec.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/core/v1"
)
// SecretSpecApplyConfiguration represents an declarative configuration of the SecretSpec type for use
// with apply.
type SecretSpecApplyConfiguration struct {
SecretSource *v1.LocalObjectReference `json:"secretSource,omitempty"`
MountPath *string `json:"mountPath,omitempty"`
}
// SecretSpecApplyConfiguration constructs an declarative configuration of the SecretSpec type for use with
// apply.
func SecretSpec() *SecretSpecApplyConfiguration {
return &SecretSpecApplyConfiguration{}
}
// WithSecretSource sets the SecretSource field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SecretSource field is set to the value of the last call.
func (b *SecretSpecApplyConfiguration) WithSecretSource(value v1.LocalObjectReference) *SecretSpecApplyConfiguration {
b.SecretSource = &value
return b
}
// WithMountPath sets the MountPath field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the MountPath field is set to the value of the last call.
func (b *SecretSpecApplyConfiguration) WithMountPath(value string) *SecretSpecApplyConfiguration {
b.MountPath = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutput.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutput.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// BuildStatusOutputApplyConfiguration represents an declarative configuration of the BuildStatusOutput type for use
// with apply.
type BuildStatusOutputApplyConfiguration struct {
To *BuildStatusOutputToApplyConfiguration `json:"to,omitempty"`
}
// BuildStatusOutputApplyConfiguration constructs an declarative configuration of the BuildStatusOutput type for use with
// apply.
func BuildStatusOutput() *BuildStatusOutputApplyConfiguration {
return &BuildStatusOutputApplyConfiguration{}
}
// WithTo sets the To field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the To field is set to the value of the last call.
func (b *BuildStatusOutputApplyConfiguration) WithTo(value *BuildStatusOutputToApplyConfiguration) *BuildStatusOutputApplyConfiguration {
b.To = value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildcondition.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildcondition.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/openshift/api/build/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// BuildConditionApplyConfiguration represents an declarative configuration of the BuildCondition type for use
// with apply.
type BuildConditionApplyConfiguration struct {
Type *v1.BuildConditionType `json:"type,omitempty"`
Status *corev1.ConditionStatus `json:"status,omitempty"`
LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
Reason *string `json:"reason,omitempty"`
Message *string `json:"message,omitempty"`
}
// BuildConditionApplyConfiguration constructs an declarative configuration of the BuildCondition type for use with
// apply.
func BuildCondition() *BuildConditionApplyConfiguration {
return &BuildConditionApplyConfiguration{}
}
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
func (b *BuildConditionApplyConfiguration) WithType(value v1.BuildConditionType) *BuildConditionApplyConfiguration {
b.Type = &value
return b
}
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
func (b *BuildConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *BuildConditionApplyConfiguration {
b.Status = &value
return b
}
// WithLastUpdateTime sets the LastUpdateTime field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the LastUpdateTime field is set to the value of the last call.
func (b *BuildConditionApplyConfiguration) WithLastUpdateTime(value metav1.Time) *BuildConditionApplyConfiguration {
b.LastUpdateTime = &value
return b
}
// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the LastTransitionTime field is set to the value of the last call.
func (b *BuildConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *BuildConditionApplyConfiguration {
b.LastTransitionTime = &value
return b
}
// WithReason sets the Reason field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Reason field is set to the value of the last call.
func (b *BuildConditionApplyConfiguration) WithReason(value string) *BuildConditionApplyConfiguration {
b.Reason = &value
return b
}
// WithMessage sets the Message field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Message field is set to the value of the last call.
func (b *BuildConditionApplyConfiguration) WithMessage(value string) *BuildConditionApplyConfiguration {
b.Message = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcerevision.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcerevision.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/openshift/api/build/v1"
)
// SourceRevisionApplyConfiguration represents an declarative configuration of the SourceRevision type for use
// with apply.
type SourceRevisionApplyConfiguration struct {
Type *v1.BuildSourceType `json:"type,omitempty"`
Git *GitSourceRevisionApplyConfiguration `json:"git,omitempty"`
}
// SourceRevisionApplyConfiguration constructs an declarative configuration of the SourceRevision type for use with
// apply.
func SourceRevision() *SourceRevisionApplyConfiguration {
return &SourceRevisionApplyConfiguration{}
}
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
func (b *SourceRevisionApplyConfiguration) WithType(value v1.BuildSourceType) *SourceRevisionApplyConfiguration {
b.Type = &value
return b
}
// WithGit sets the Git field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Git field is set to the value of the last call.
func (b *SourceRevisionApplyConfiguration) WithGit(value *GitSourceRevisionApplyConfiguration) *SourceRevisionApplyConfiguration {
b.Git = value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesource.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesource.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/core/v1"
)
// ImageSourceApplyConfiguration represents an declarative configuration of the ImageSource type for use
// with apply.
type ImageSourceApplyConfiguration struct {
From *v1.ObjectReference `json:"from,omitempty"`
As []string `json:"as,omitempty"`
Paths []ImageSourcePathApplyConfiguration `json:"paths,omitempty"`
PullSecret *v1.LocalObjectReference `json:"pullSecret,omitempty"`
}
// ImageSourceApplyConfiguration constructs an declarative configuration of the ImageSource type for use with
// apply.
func ImageSource() *ImageSourceApplyConfiguration {
return &ImageSourceApplyConfiguration{}
}
// WithFrom sets the From field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the From field is set to the value of the last call.
func (b *ImageSourceApplyConfiguration) WithFrom(value v1.ObjectReference) *ImageSourceApplyConfiguration {
b.From = &value
return b
}
// WithAs adds the given value to the As field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the As field.
func (b *ImageSourceApplyConfiguration) WithAs(values ...string) *ImageSourceApplyConfiguration {
for i := range values {
b.As = append(b.As, values[i])
}
return b
}
// WithPaths adds the given value to the Paths field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Paths field.
func (b *ImageSourceApplyConfiguration) WithPaths(values ...*ImageSourcePathApplyConfiguration) *ImageSourceApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithPaths")
}
b.Paths = append(b.Paths, *values[i])
}
return b
}
// WithPullSecret sets the PullSecret field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PullSecret field is set to the value of the last call.
func (b *ImageSourceApplyConfiguration) WithPullSecret(value v1.LocalObjectReference) *ImageSourceApplyConfiguration {
b.PullSecret = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretbuildsource.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretbuildsource.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/core/v1"
)
// SecretBuildSourceApplyConfiguration represents an declarative configuration of the SecretBuildSource type for use
// with apply.
type SecretBuildSourceApplyConfiguration struct {
Secret *v1.LocalObjectReference `json:"secret,omitempty"`
DestinationDir *string `json:"destinationDir,omitempty"`
}
// SecretBuildSourceApplyConfiguration constructs an declarative configuration of the SecretBuildSource type for use with
// apply.
func SecretBuildSource() *SecretBuildSourceApplyConfiguration {
return &SecretBuildSourceApplyConfiguration{}
}
// WithSecret sets the Secret field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Secret field is set to the value of the last call.
func (b *SecretBuildSourceApplyConfiguration) WithSecret(value v1.LocalObjectReference) *SecretBuildSourceApplyConfiguration {
b.Secret = &value
return b
}
// WithDestinationDir sets the DestinationDir field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DestinationDir field is set to the value of the last call.
func (b *SecretBuildSourceApplyConfiguration) WithDestinationDir(value string) *SecretBuildSourceApplyConfiguration {
b.DestinationDir = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretlocalreference.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretlocalreference.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// SecretLocalReferenceApplyConfiguration represents an declarative configuration of the SecretLocalReference type for use
// with apply.
type SecretLocalReferenceApplyConfiguration struct {
Name *string `json:"name,omitempty"`
}
// SecretLocalReferenceApplyConfiguration constructs an declarative configuration of the SecretLocalReference type for use with
// apply.
func SecretLocalReference() *SecretLocalReferenceApplyConfiguration {
return &SecretLocalReferenceApplyConfiguration{}
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *SecretLocalReferenceApplyConfiguration) WithName(value string) *SecretLocalReferenceApplyConfiguration {
b.Name = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfig.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfig.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
apibuildv1 "github.com/openshift/api/build/v1"
internal "github.com/openshift/client-go/build/applyconfigurations/internal"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// BuildConfigApplyConfiguration represents an declarative configuration of the BuildConfig type for use
// with apply.
type BuildConfigApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
Spec *BuildConfigSpecApplyConfiguration `json:"spec,omitempty"`
Status *BuildConfigStatusApplyConfiguration `json:"status,omitempty"`
}
// BuildConfig constructs an declarative configuration of the BuildConfig type for use with
// apply.
func BuildConfig(name, namespace string) *BuildConfigApplyConfiguration {
b := &BuildConfigApplyConfiguration{}
b.WithName(name)
b.WithNamespace(namespace)
b.WithKind("BuildConfig")
b.WithAPIVersion("build.openshift.io/v1")
return b
}
// ExtractBuildConfig extracts the applied configuration owned by fieldManager from
// buildConfig. If no managedFields are found in buildConfig for fieldManager, a
// BuildConfigApplyConfiguration is returned with only the Name, Namespace (if applicable),
// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// buildConfig must be a unmodified BuildConfig API object that was retrieved from the Kubernetes API.
// ExtractBuildConfig provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractBuildConfig(buildConfig *apibuildv1.BuildConfig, fieldManager string) (*BuildConfigApplyConfiguration, error) {
return extractBuildConfig(buildConfig, fieldManager, "")
}
// ExtractBuildConfigStatus is the same as ExtractBuildConfig except
// that it extracts the status subresource applied configuration.
// Experimental!
func ExtractBuildConfigStatus(buildConfig *apibuildv1.BuildConfig, fieldManager string) (*BuildConfigApplyConfiguration, error) {
return extractBuildConfig(buildConfig, fieldManager, "status")
}
func extractBuildConfig(buildConfig *apibuildv1.BuildConfig, fieldManager string, subresource string) (*BuildConfigApplyConfiguration, error) {
b := &BuildConfigApplyConfiguration{}
err := managedfields.ExtractInto(buildConfig, internal.Parser().Type("com.github.openshift.api.build.v1.BuildConfig"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
b.WithName(buildConfig.Name)
b.WithNamespace(buildConfig.Namespace)
b.WithKind("BuildConfig")
b.WithAPIVersion("build.openshift.io/v1")
return b, nil
}
// WithKind sets the Kind field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *BuildConfigApplyConfiguration) WithKind(value string) *BuildConfigApplyConfiguration {
b.Kind = &value
return b
}
// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *BuildConfigApplyConfiguration) WithAPIVersion(value string) *BuildConfigApplyConfiguration {
b.APIVersion = &value
return b
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *BuildConfigApplyConfiguration) WithName(value string) *BuildConfigApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Name = &value
return b
}
// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *BuildConfigApplyConfiguration) WithGenerateName(value string) *BuildConfigApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.GenerateName = &value
return b
}
// WithNamespace sets the Namespace field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *BuildConfigApplyConfiguration) WithNamespace(value string) *BuildConfigApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Namespace = &value
return b
}
// WithUID sets the UID field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the UID field is set to the value of the last call.
func (b *BuildConfigApplyConfiguration) WithUID(value types.UID) *BuildConfigApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.UID = &value
return b
}
// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *BuildConfigApplyConfiguration) WithResourceVersion(value string) *BuildConfigApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.ResourceVersion = &value
return b
}
// WithGeneration sets the Generation field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Generation field is set to the value of the last call.
func (b *BuildConfigApplyConfiguration) WithGeneration(value int64) *BuildConfigApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *BuildConfigApplyConfiguration) WithCreationTimestamp(value metav1.Time) *BuildConfigApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *BuildConfigApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *BuildConfigApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.DeletionTimestamp = &value
return b
}
// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *BuildConfigApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *BuildConfigApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.DeletionGracePeriodSeconds = &value
return b
}
// WithLabels puts the entries into the Labels field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Labels field,
// overwriting an existing map entries in Labels field with the same key.
func (b *BuildConfigApplyConfiguration) WithLabels(entries map[string]string) *BuildConfigApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
if b.Labels == nil && len(entries) > 0 {
b.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
b.Labels[k] = v
}
return b
}
// WithAnnotations puts the entries into the Annotations field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Annotations field,
// overwriting an existing map entries in Annotations field with the same key.
func (b *BuildConfigApplyConfiguration) WithAnnotations(entries map[string]string) *BuildConfigApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
if b.Annotations == nil && len(entries) > 0 {
b.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
b.Annotations[k] = v
}
return b
}
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
func (b *BuildConfigApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *BuildConfigApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
b.OwnerReferences = append(b.OwnerReferences, *values[i])
}
return b
}
// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Finalizers field.
func (b *BuildConfigApplyConfiguration) WithFinalizers(values ...string) *BuildConfigApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
b.Finalizers = append(b.Finalizers, values[i])
}
return b
}
func (b *BuildConfigApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
}
}
// WithSpec sets the Spec field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Spec field is set to the value of the last call.
func (b *BuildConfigApplyConfiguration) WithSpec(value *BuildConfigSpecApplyConfiguration) *BuildConfigApplyConfiguration {
b.Spec = value
return b
}
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
func (b *BuildConfigApplyConfiguration) WithStatus(value *BuildConfigStatusApplyConfiguration) *BuildConfigApplyConfiguration {
b.Status = value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggerpolicy.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggerpolicy.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/openshift/api/build/v1"
)
// BuildTriggerPolicyApplyConfiguration represents an declarative configuration of the BuildTriggerPolicy type for use
// with apply.
type BuildTriggerPolicyApplyConfiguration struct {
Type *v1.BuildTriggerType `json:"type,omitempty"`
GitHubWebHook *WebHookTriggerApplyConfiguration `json:"github,omitempty"`
GenericWebHook *WebHookTriggerApplyConfiguration `json:"generic,omitempty"`
ImageChange *ImageChangeTriggerApplyConfiguration `json:"imageChange,omitempty"`
GitLabWebHook *WebHookTriggerApplyConfiguration `json:"gitlab,omitempty"`
BitbucketWebHook *WebHookTriggerApplyConfiguration `json:"bitbucket,omitempty"`
}
// BuildTriggerPolicyApplyConfiguration constructs an declarative configuration of the BuildTriggerPolicy type for use with
// apply.
func BuildTriggerPolicy() *BuildTriggerPolicyApplyConfiguration {
return &BuildTriggerPolicyApplyConfiguration{}
}
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
func (b *BuildTriggerPolicyApplyConfiguration) WithType(value v1.BuildTriggerType) *BuildTriggerPolicyApplyConfiguration {
b.Type = &value
return b
}
// WithGitHubWebHook sets the GitHubWebHook field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GitHubWebHook field is set to the value of the last call.
func (b *BuildTriggerPolicyApplyConfiguration) WithGitHubWebHook(value *WebHookTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration {
b.GitHubWebHook = value
return b
}
// WithGenericWebHook sets the GenericWebHook field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GenericWebHook field is set to the value of the last call.
func (b *BuildTriggerPolicyApplyConfiguration) WithGenericWebHook(value *WebHookTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration {
b.GenericWebHook = value
return b
}
// WithImageChange sets the ImageChange field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ImageChange field is set to the value of the last call.
func (b *BuildTriggerPolicyApplyConfiguration) WithImageChange(value *ImageChangeTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration {
b.ImageChange = value
return b
}
// WithGitLabWebHook sets the GitLabWebHook field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GitLabWebHook field is set to the value of the last call.
func (b *BuildTriggerPolicyApplyConfiguration) WithGitLabWebHook(value *WebHookTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration {
b.GitLabWebHook = value
return b
}
// WithBitbucketWebHook sets the BitbucketWebHook field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the BitbucketWebHook field is set to the value of the last call.
func (b *BuildTriggerPolicyApplyConfiguration) WithBitbucketWebHook(value *WebHookTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration {
b.BitbucketWebHook = value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildsource.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildsource.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/openshift/api/build/v1"
corev1 "k8s.io/api/core/v1"
)
// BuildSourceApplyConfiguration represents an declarative configuration of the BuildSource type for use
// with apply.
type BuildSourceApplyConfiguration struct {
Type *v1.BuildSourceType `json:"type,omitempty"`
Binary *BinaryBuildSourceApplyConfiguration `json:"binary,omitempty"`
Dockerfile *string `json:"dockerfile,omitempty"`
Git *GitBuildSourceApplyConfiguration `json:"git,omitempty"`
Images []ImageSourceApplyConfiguration `json:"images,omitempty"`
ContextDir *string `json:"contextDir,omitempty"`
SourceSecret *corev1.LocalObjectReference `json:"sourceSecret,omitempty"`
Secrets []SecretBuildSourceApplyConfiguration `json:"secrets,omitempty"`
ConfigMaps []ConfigMapBuildSourceApplyConfiguration `json:"configMaps,omitempty"`
}
// BuildSourceApplyConfiguration constructs an declarative configuration of the BuildSource type for use with
// apply.
func BuildSource() *BuildSourceApplyConfiguration {
return &BuildSourceApplyConfiguration{}
}
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
func (b *BuildSourceApplyConfiguration) WithType(value v1.BuildSourceType) *BuildSourceApplyConfiguration {
b.Type = &value
return b
}
// WithBinary sets the Binary field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Binary field is set to the value of the last call.
func (b *BuildSourceApplyConfiguration) WithBinary(value *BinaryBuildSourceApplyConfiguration) *BuildSourceApplyConfiguration {
b.Binary = value
return b
}
// WithDockerfile sets the Dockerfile field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Dockerfile field is set to the value of the last call.
func (b *BuildSourceApplyConfiguration) WithDockerfile(value string) *BuildSourceApplyConfiguration {
b.Dockerfile = &value
return b
}
// WithGit sets the Git field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Git field is set to the value of the last call.
func (b *BuildSourceApplyConfiguration) WithGit(value *GitBuildSourceApplyConfiguration) *BuildSourceApplyConfiguration {
b.Git = value
return b
}
// WithImages adds the given value to the Images field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Images field.
func (b *BuildSourceApplyConfiguration) WithImages(values ...*ImageSourceApplyConfiguration) *BuildSourceApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithImages")
}
b.Images = append(b.Images, *values[i])
}
return b
}
// WithContextDir sets the ContextDir field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ContextDir field is set to the value of the last call.
func (b *BuildSourceApplyConfiguration) WithContextDir(value string) *BuildSourceApplyConfiguration {
b.ContextDir = &value
return b
}
// WithSourceSecret sets the SourceSecret field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SourceSecret field is set to the value of the last call.
func (b *BuildSourceApplyConfiguration) WithSourceSecret(value corev1.LocalObjectReference) *BuildSourceApplyConfiguration {
b.SourceSecret = &value
return b
}
// WithSecrets adds the given value to the Secrets field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Secrets field.
func (b *BuildSourceApplyConfiguration) WithSecrets(values ...*SecretBuildSourceApplyConfiguration) *BuildSourceApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithSecrets")
}
b.Secrets = append(b.Secrets, *values[i])
}
return b
}
// WithConfigMaps adds the given value to the ConfigMaps field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the ConfigMaps field.
func (b *BuildSourceApplyConfiguration) WithConfigMaps(values ...*ConfigMapBuildSourceApplyConfiguration) *BuildSourceApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithConfigMaps")
}
b.ConfigMaps = append(b.ConfigMaps, *values[i])
}
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonspec.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonspec.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
buildv1 "github.com/openshift/api/build/v1"
corev1 "k8s.io/api/core/v1"
)
// CommonSpecApplyConfiguration represents an declarative configuration of the CommonSpec type for use
// with apply.
type CommonSpecApplyConfiguration struct {
ServiceAccount *string `json:"serviceAccount,omitempty"`
Source *BuildSourceApplyConfiguration `json:"source,omitempty"`
Revision *SourceRevisionApplyConfiguration `json:"revision,omitempty"`
Strategy *BuildStrategyApplyConfiguration `json:"strategy,omitempty"`
Output *BuildOutputApplyConfiguration `json:"output,omitempty"`
Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
PostCommit *BuildPostCommitSpecApplyConfiguration `json:"postCommit,omitempty"`
CompletionDeadlineSeconds *int64 `json:"completionDeadlineSeconds,omitempty"`
NodeSelector *buildv1.OptionalNodeSelector `json:"nodeSelector,omitempty"`
MountTrustedCA *bool `json:"mountTrustedCA,omitempty"`
}
// CommonSpecApplyConfiguration constructs an declarative configuration of the CommonSpec type for use with
// apply.
func CommonSpec() *CommonSpecApplyConfiguration {
return &CommonSpecApplyConfiguration{}
}
// WithServiceAccount sets the ServiceAccount field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ServiceAccount field is set to the value of the last call.
func (b *CommonSpecApplyConfiguration) WithServiceAccount(value string) *CommonSpecApplyConfiguration {
b.ServiceAccount = &value
return b
}
// WithSource sets the Source field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Source field is set to the value of the last call.
func (b *CommonSpecApplyConfiguration) WithSource(value *BuildSourceApplyConfiguration) *CommonSpecApplyConfiguration {
b.Source = value
return b
}
// WithRevision sets the Revision field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Revision field is set to the value of the last call.
func (b *CommonSpecApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *CommonSpecApplyConfiguration {
b.Revision = value
return b
}
// WithStrategy sets the Strategy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Strategy field is set to the value of the last call.
func (b *CommonSpecApplyConfiguration) WithStrategy(value *BuildStrategyApplyConfiguration) *CommonSpecApplyConfiguration {
b.Strategy = value
return b
}
// WithOutput sets the Output field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Output field is set to the value of the last call.
func (b *CommonSpecApplyConfiguration) WithOutput(value *BuildOutputApplyConfiguration) *CommonSpecApplyConfiguration {
b.Output = value
return b
}
// WithResources sets the Resources field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Resources field is set to the value of the last call.
func (b *CommonSpecApplyConfiguration) WithResources(value corev1.ResourceRequirements) *CommonSpecApplyConfiguration {
b.Resources = &value
return b
}
// WithPostCommit sets the PostCommit field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PostCommit field is set to the value of the last call.
func (b *CommonSpecApplyConfiguration) WithPostCommit(value *BuildPostCommitSpecApplyConfiguration) *CommonSpecApplyConfiguration {
b.PostCommit = value
return b
}
// WithCompletionDeadlineSeconds sets the CompletionDeadlineSeconds field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CompletionDeadlineSeconds field is set to the value of the last call.
func (b *CommonSpecApplyConfiguration) WithCompletionDeadlineSeconds(value int64) *CommonSpecApplyConfiguration {
b.CompletionDeadlineSeconds = &value
return b
}
// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NodeSelector field is set to the value of the last call.
func (b *CommonSpecApplyConfiguration) WithNodeSelector(value buildv1.OptionalNodeSelector) *CommonSpecApplyConfiguration {
b.NodeSelector = &value
return b
}
// WithMountTrustedCA sets the MountTrustedCA field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the MountTrustedCA field is set to the value of the last call.
func (b *CommonSpecApplyConfiguration) WithMountTrustedCA(value bool) *CommonSpecApplyConfiguration {
b.MountTrustedCA = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumemount.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumemount.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// BuildVolumeMountApplyConfiguration represents an declarative configuration of the BuildVolumeMount type for use
// with apply.
type BuildVolumeMountApplyConfiguration struct {
DestinationPath *string `json:"destinationPath,omitempty"`
}
// BuildVolumeMountApplyConfiguration constructs an declarative configuration of the BuildVolumeMount type for use with
// apply.
func BuildVolumeMount() *BuildVolumeMountApplyConfiguration {
return &BuildVolumeMountApplyConfiguration{}
}
// WithDestinationPath sets the DestinationPath field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DestinationPath field is set to the value of the last call.
func (b *BuildVolumeMountApplyConfiguration) WithDestinationPath(value string) *BuildVolumeMountApplyConfiguration {
b.DestinationPath = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/build.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/build.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
apibuildv1 "github.com/openshift/api/build/v1"
internal "github.com/openshift/client-go/build/applyconfigurations/internal"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// BuildApplyConfiguration represents an declarative configuration of the Build type for use
// with apply.
type BuildApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
Spec *BuildSpecApplyConfiguration `json:"spec,omitempty"`
Status *BuildStatusApplyConfiguration `json:"status,omitempty"`
}
// Build constructs an declarative configuration of the Build type for use with
// apply.
func Build(name, namespace string) *BuildApplyConfiguration {
b := &BuildApplyConfiguration{}
b.WithName(name)
b.WithNamespace(namespace)
b.WithKind("Build")
b.WithAPIVersion("build.openshift.io/v1")
return b
}
// ExtractBuild extracts the applied configuration owned by fieldManager from
// build. If no managedFields are found in build for fieldManager, a
// BuildApplyConfiguration is returned with only the Name, Namespace (if applicable),
// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// build must be a unmodified Build API object that was retrieved from the Kubernetes API.
// ExtractBuild provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractBuild(build *apibuildv1.Build, fieldManager string) (*BuildApplyConfiguration, error) {
return extractBuild(build, fieldManager, "")
}
// ExtractBuildStatus is the same as ExtractBuild except
// that it extracts the status subresource applied configuration.
// Experimental!
func ExtractBuildStatus(build *apibuildv1.Build, fieldManager string) (*BuildApplyConfiguration, error) {
return extractBuild(build, fieldManager, "status")
}
func extractBuild(build *apibuildv1.Build, fieldManager string, subresource string) (*BuildApplyConfiguration, error) {
b := &BuildApplyConfiguration{}
err := managedfields.ExtractInto(build, internal.Parser().Type("com.github.openshift.api.build.v1.Build"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
b.WithName(build.Name)
b.WithNamespace(build.Namespace)
b.WithKind("Build")
b.WithAPIVersion("build.openshift.io/v1")
return b, nil
}
// WithKind sets the Kind field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *BuildApplyConfiguration) WithKind(value string) *BuildApplyConfiguration {
b.Kind = &value
return b
}
// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *BuildApplyConfiguration) WithAPIVersion(value string) *BuildApplyConfiguration {
b.APIVersion = &value
return b
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *BuildApplyConfiguration) WithName(value string) *BuildApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Name = &value
return b
}
// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *BuildApplyConfiguration) WithGenerateName(value string) *BuildApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.GenerateName = &value
return b
}
// WithNamespace sets the Namespace field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *BuildApplyConfiguration) WithNamespace(value string) *BuildApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Namespace = &value
return b
}
// WithUID sets the UID field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the UID field is set to the value of the last call.
func (b *BuildApplyConfiguration) WithUID(value types.UID) *BuildApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.UID = &value
return b
}
// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *BuildApplyConfiguration) WithResourceVersion(value string) *BuildApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.ResourceVersion = &value
return b
}
// WithGeneration sets the Generation field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Generation field is set to the value of the last call.
func (b *BuildApplyConfiguration) WithGeneration(value int64) *BuildApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *BuildApplyConfiguration) WithCreationTimestamp(value metav1.Time) *BuildApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *BuildApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *BuildApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.DeletionTimestamp = &value
return b
}
// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *BuildApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *BuildApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.DeletionGracePeriodSeconds = &value
return b
}
// WithLabels puts the entries into the Labels field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Labels field,
// overwriting an existing map entries in Labels field with the same key.
func (b *BuildApplyConfiguration) WithLabels(entries map[string]string) *BuildApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
if b.Labels == nil && len(entries) > 0 {
b.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
b.Labels[k] = v
}
return b
}
// WithAnnotations puts the entries into the Annotations field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Annotations field,
// overwriting an existing map entries in Annotations field with the same key.
func (b *BuildApplyConfiguration) WithAnnotations(entries map[string]string) *BuildApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
if b.Annotations == nil && len(entries) > 0 {
b.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
b.Annotations[k] = v
}
return b
}
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
func (b *BuildApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *BuildApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
b.OwnerReferences = append(b.OwnerReferences, *values[i])
}
return b
}
// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Finalizers field.
func (b *BuildApplyConfiguration) WithFinalizers(values ...string) *BuildApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
b.Finalizers = append(b.Finalizers, values[i])
}
return b
}
func (b *BuildApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
}
}
// WithSpec sets the Spec field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Spec field is set to the value of the last call.
func (b *BuildApplyConfiguration) WithSpec(value *BuildSpecApplyConfiguration) *BuildApplyConfiguration {
b.Spec = value
return b
}
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
func (b *BuildApplyConfiguration) WithStatus(value *BuildStatusApplyConfiguration) *BuildApplyConfiguration {
b.Status = value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolume.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolume.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// BuildVolumeApplyConfiguration represents an declarative configuration of the BuildVolume type for use
// with apply.
type BuildVolumeApplyConfiguration struct {
Name *string `json:"name,omitempty"`
Source *BuildVolumeSourceApplyConfiguration `json:"source,omitempty"`
Mounts []BuildVolumeMountApplyConfiguration `json:"mounts,omitempty"`
}
// BuildVolumeApplyConfiguration constructs an declarative configuration of the BuildVolume type for use with
// apply.
func BuildVolume() *BuildVolumeApplyConfiguration {
return &BuildVolumeApplyConfiguration{}
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *BuildVolumeApplyConfiguration) WithName(value string) *BuildVolumeApplyConfiguration {
b.Name = &value
return b
}
// WithSource sets the Source field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Source field is set to the value of the last call.
func (b *BuildVolumeApplyConfiguration) WithSource(value *BuildVolumeSourceApplyConfiguration) *BuildVolumeApplyConfiguration {
b.Source = value
return b
}
// WithMounts adds the given value to the Mounts field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Mounts field.
func (b *BuildVolumeApplyConfiguration) WithMounts(values ...*BuildVolumeMountApplyConfiguration) *BuildVolumeApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithMounts")
}
b.Mounts = append(b.Mounts, *values[i])
}
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildoutput.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildoutput.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/core/v1"
)
// BuildOutputApplyConfiguration represents an declarative configuration of the BuildOutput type for use
// with apply.
type BuildOutputApplyConfiguration struct {
To *v1.ObjectReference `json:"to,omitempty"`
PushSecret *v1.LocalObjectReference `json:"pushSecret,omitempty"`
ImageLabels []ImageLabelApplyConfiguration `json:"imageLabels,omitempty"`
}
// BuildOutputApplyConfiguration constructs an declarative configuration of the BuildOutput type for use with
// apply.
func BuildOutput() *BuildOutputApplyConfiguration {
return &BuildOutputApplyConfiguration{}
}
// WithTo sets the To field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the To field is set to the value of the last call.
func (b *BuildOutputApplyConfiguration) WithTo(value v1.ObjectReference) *BuildOutputApplyConfiguration {
b.To = &value
return b
}
// WithPushSecret sets the PushSecret field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PushSecret field is set to the value of the last call.
func (b *BuildOutputApplyConfiguration) WithPushSecret(value v1.LocalObjectReference) *BuildOutputApplyConfiguration {
b.PushSecret = &value
return b
}
// WithImageLabels adds the given value to the ImageLabels field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the ImageLabels field.
func (b *BuildOutputApplyConfiguration) WithImageLabels(values ...*ImageLabelApplyConfiguration) *BuildOutputApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithImageLabels")
}
b.ImageLabels = append(b.ImageLabels, *values[i])
}
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetrigger.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetrigger.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/core/v1"
)
// ImageChangeTriggerApplyConfiguration represents an declarative configuration of the ImageChangeTrigger type for use
// with apply.
type ImageChangeTriggerApplyConfiguration struct {
LastTriggeredImageID *string `json:"lastTriggeredImageID,omitempty"`
From *v1.ObjectReference `json:"from,omitempty"`
Paused *bool `json:"paused,omitempty"`
}
// ImageChangeTriggerApplyConfiguration constructs an declarative configuration of the ImageChangeTrigger type for use with
// apply.
func ImageChangeTrigger() *ImageChangeTriggerApplyConfiguration {
return &ImageChangeTriggerApplyConfiguration{}
}
// WithLastTriggeredImageID sets the LastTriggeredImageID field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the LastTriggeredImageID field is set to the value of the last call.
func (b *ImageChangeTriggerApplyConfiguration) WithLastTriggeredImageID(value string) *ImageChangeTriggerApplyConfiguration {
b.LastTriggeredImageID = &value
return b
}
// WithFrom sets the From field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the From field is set to the value of the last call.
func (b *ImageChangeTriggerApplyConfiguration) WithFrom(value v1.ObjectReference) *ImageChangeTriggerApplyConfiguration {
b.From = &value
return b
}
// WithPaused sets the Paused field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Paused field is set to the value of the last call.
func (b *ImageChangeTriggerApplyConfiguration) WithPaused(value bool) *ImageChangeTriggerApplyConfiguration {
b.Paused = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcecontroluser.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcecontroluser.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// SourceControlUserApplyConfiguration represents an declarative configuration of the SourceControlUser type for use
// with apply.
type SourceControlUserApplyConfiguration struct {
Name *string `json:"name,omitempty"`
Email *string `json:"email,omitempty"`
}
// SourceControlUserApplyConfiguration constructs an declarative configuration of the SourceControlUser type for use with
// apply.
func SourceControlUser() *SourceControlUserApplyConfiguration {
return &SourceControlUserApplyConfiguration{}
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *SourceControlUserApplyConfiguration) WithName(value string) *SourceControlUserApplyConfiguration {
b.Name = &value
return b
}
// WithEmail sets the Email field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Email field is set to the value of the last call.
func (b *SourceControlUserApplyConfiguration) WithEmail(value string) *SourceControlUserApplyConfiguration {
b.Email = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stepinfo.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stepinfo.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/openshift/api/build/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// StepInfoApplyConfiguration represents an declarative configuration of the StepInfo type for use
// with apply.
type StepInfoApplyConfiguration struct {
Name *v1.StepName `json:"name,omitempty"`
StartTime *metav1.Time `json:"startTime,omitempty"`
DurationMilliseconds *int64 `json:"durationMilliseconds,omitempty"`
}
// StepInfoApplyConfiguration constructs an declarative configuration of the StepInfo type for use with
// apply.
func StepInfo() *StepInfoApplyConfiguration {
return &StepInfoApplyConfiguration{}
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *StepInfoApplyConfiguration) WithName(value v1.StepName) *StepInfoApplyConfiguration {
b.Name = &value
return b
}
// WithStartTime sets the StartTime field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the StartTime field is set to the value of the last call.
func (b *StepInfoApplyConfiguration) WithStartTime(value metav1.Time) *StepInfoApplyConfiguration {
b.StartTime = &value
return b
}
// WithDurationMilliseconds sets the DurationMilliseconds field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DurationMilliseconds field is set to the value of the last call.
func (b *StepInfoApplyConfiguration) WithDurationMilliseconds(value int64) *StepInfoApplyConfiguration {
b.DurationMilliseconds = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/genericwebhookcause.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/genericwebhookcause.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// GenericWebHookCauseApplyConfiguration represents an declarative configuration of the GenericWebHookCause type for use
// with apply.
type GenericWebHookCauseApplyConfiguration struct {
Revision *SourceRevisionApplyConfiguration `json:"revision,omitempty"`
Secret *string `json:"secret,omitempty"`
}
// GenericWebHookCauseApplyConfiguration constructs an declarative configuration of the GenericWebHookCause type for use with
// apply.
func GenericWebHookCause() *GenericWebHookCauseApplyConfiguration {
return &GenericWebHookCauseApplyConfiguration{}
}
// WithRevision sets the Revision field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Revision field is set to the value of the last call.
func (b *GenericWebHookCauseApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *GenericWebHookCauseApplyConfiguration {
b.Revision = value
return b
}
// WithSecret sets the Secret field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Secret field is set to the value of the last call.
func (b *GenericWebHookCauseApplyConfiguration) WithSecret(value string) *GenericWebHookCauseApplyConfiguration {
b.Secret = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/bitbucketwebhookcause.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/bitbucketwebhookcause.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// BitbucketWebHookCauseApplyConfiguration represents an declarative configuration of the BitbucketWebHookCause type for use
// with apply.
type BitbucketWebHookCauseApplyConfiguration struct {
CommonWebHookCauseApplyConfiguration `json:",inline"`
}
// BitbucketWebHookCauseApplyConfiguration constructs an declarative configuration of the BitbucketWebHookCause type for use with
// apply.
func BitbucketWebHookCause() *BitbucketWebHookCauseApplyConfiguration {
return &BitbucketWebHookCauseApplyConfiguration{}
}
// WithRevision sets the Revision field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Revision field is set to the value of the last call.
func (b *BitbucketWebHookCauseApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *BitbucketWebHookCauseApplyConfiguration {
b.Revision = value
return b
}
// WithSecret sets the Secret field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Secret field is set to the value of the last call.
func (b *BitbucketWebHookCauseApplyConfiguration) WithSecret(value string) *BitbucketWebHookCauseApplyConfiguration {
b.Secret = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagestreamtagreference.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagestreamtagreference.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// ImageStreamTagReferenceApplyConfiguration represents an declarative configuration of the ImageStreamTagReference type for use
// with apply.
type ImageStreamTagReferenceApplyConfiguration struct {
Namespace *string `json:"namespace,omitempty"`
Name *string `json:"name,omitempty"`
}
// ImageStreamTagReferenceApplyConfiguration constructs an declarative configuration of the ImageStreamTagReference type for use with
// apply.
func ImageStreamTagReference() *ImageStreamTagReferenceApplyConfiguration {
return &ImageStreamTagReferenceApplyConfiguration{}
}
// WithNamespace sets the Namespace field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ImageStreamTagReferenceApplyConfiguration) WithNamespace(value string) *ImageStreamTagReferenceApplyConfiguration {
b.Namespace = &value
return b
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *ImageStreamTagReferenceApplyConfiguration) WithName(value string) *ImageStreamTagReferenceApplyConfiguration {
b.Name = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/proxyconfig.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/proxyconfig.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// ProxyConfigApplyConfiguration represents an declarative configuration of the ProxyConfig type for use
// with apply.
type ProxyConfigApplyConfiguration struct {
HTTPProxy *string `json:"httpProxy,omitempty"`
HTTPSProxy *string `json:"httpsProxy,omitempty"`
NoProxy *string `json:"noProxy,omitempty"`
}
// ProxyConfigApplyConfiguration constructs an declarative configuration of the ProxyConfig type for use with
// apply.
func ProxyConfig() *ProxyConfigApplyConfiguration {
return &ProxyConfigApplyConfiguration{}
}
// WithHTTPProxy sets the HTTPProxy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the HTTPProxy field is set to the value of the last call.
func (b *ProxyConfigApplyConfiguration) WithHTTPProxy(value string) *ProxyConfigApplyConfiguration {
b.HTTPProxy = &value
return b
}
// WithHTTPSProxy sets the HTTPSProxy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the HTTPSProxy field is set to the value of the last call.
func (b *ProxyConfigApplyConfiguration) WithHTTPSProxy(value string) *ProxyConfigApplyConfiguration {
b.HTTPSProxy = &value
return b
}
// WithNoProxy sets the NoProxy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NoProxy field is set to the value of the last call.
func (b *ProxyConfigApplyConfiguration) WithNoProxy(value string) *ProxyConfigApplyConfiguration {
b.NoProxy = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/dockerbuildstrategy.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/dockerbuildstrategy.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
buildv1 "github.com/openshift/api/build/v1"
v1 "k8s.io/api/core/v1"
)
// DockerBuildStrategyApplyConfiguration represents an declarative configuration of the DockerBuildStrategy type for use
// with apply.
type DockerBuildStrategyApplyConfiguration struct {
From *v1.ObjectReference `json:"from,omitempty"`
PullSecret *v1.LocalObjectReference `json:"pullSecret,omitempty"`
NoCache *bool `json:"noCache,omitempty"`
Env []v1.EnvVar `json:"env,omitempty"`
ForcePull *bool `json:"forcePull,omitempty"`
DockerfilePath *string `json:"dockerfilePath,omitempty"`
BuildArgs []v1.EnvVar `json:"buildArgs,omitempty"`
ImageOptimizationPolicy *buildv1.ImageOptimizationPolicy `json:"imageOptimizationPolicy,omitempty"`
Volumes []BuildVolumeApplyConfiguration `json:"volumes,omitempty"`
}
// DockerBuildStrategyApplyConfiguration constructs an declarative configuration of the DockerBuildStrategy type for use with
// apply.
func DockerBuildStrategy() *DockerBuildStrategyApplyConfiguration {
return &DockerBuildStrategyApplyConfiguration{}
}
// WithFrom sets the From field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the From field is set to the value of the last call.
func (b *DockerBuildStrategyApplyConfiguration) WithFrom(value v1.ObjectReference) *DockerBuildStrategyApplyConfiguration {
b.From = &value
return b
}
// WithPullSecret sets the PullSecret field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PullSecret field is set to the value of the last call.
func (b *DockerBuildStrategyApplyConfiguration) WithPullSecret(value v1.LocalObjectReference) *DockerBuildStrategyApplyConfiguration {
b.PullSecret = &value
return b
}
// WithNoCache sets the NoCache field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NoCache field is set to the value of the last call.
func (b *DockerBuildStrategyApplyConfiguration) WithNoCache(value bool) *DockerBuildStrategyApplyConfiguration {
b.NoCache = &value
return b
}
// WithEnv adds the given value to the Env field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Env field.
func (b *DockerBuildStrategyApplyConfiguration) WithEnv(values ...v1.EnvVar) *DockerBuildStrategyApplyConfiguration {
for i := range values {
b.Env = append(b.Env, values[i])
}
return b
}
// WithForcePull sets the ForcePull field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ForcePull field is set to the value of the last call.
func (b *DockerBuildStrategyApplyConfiguration) WithForcePull(value bool) *DockerBuildStrategyApplyConfiguration {
b.ForcePull = &value
return b
}
// WithDockerfilePath sets the DockerfilePath field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DockerfilePath field is set to the value of the last call.
func (b *DockerBuildStrategyApplyConfiguration) WithDockerfilePath(value string) *DockerBuildStrategyApplyConfiguration {
b.DockerfilePath = &value
return b
}
// WithBuildArgs adds the given value to the BuildArgs field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the BuildArgs field.
func (b *DockerBuildStrategyApplyConfiguration) WithBuildArgs(values ...v1.EnvVar) *DockerBuildStrategyApplyConfiguration {
for i := range values {
b.BuildArgs = append(b.BuildArgs, values[i])
}
return b
}
// WithImageOptimizationPolicy sets the ImageOptimizationPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ImageOptimizationPolicy field is set to the value of the last call.
func (b *DockerBuildStrategyApplyConfiguration) WithImageOptimizationPolicy(value buildv1.ImageOptimizationPolicy) *DockerBuildStrategyApplyConfiguration {
b.ImageOptimizationPolicy = &value
return b
}
// WithVolumes adds the given value to the Volumes field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Volumes field.
func (b *DockerBuildStrategyApplyConfiguration) WithVolumes(values ...*BuildVolumeApplyConfiguration) *DockerBuildStrategyApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithVolumes")
}
b.Volumes = append(b.Volumes, *values[i])
}
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/binarybuildsource.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/binarybuildsource.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// BinaryBuildSourceApplyConfiguration represents an declarative configuration of the BinaryBuildSource type for use
// with apply.
type BinaryBuildSourceApplyConfiguration struct {
AsFile *string `json:"asFile,omitempty"`
}
// BinaryBuildSourceApplyConfiguration constructs an declarative configuration of the BinaryBuildSource type for use with
// apply.
func BinaryBuildSource() *BinaryBuildSourceApplyConfiguration {
return &BinaryBuildSourceApplyConfiguration{}
}
// WithAsFile sets the AsFile field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the AsFile field is set to the value of the last call.
func (b *BinaryBuildSourceApplyConfiguration) WithAsFile(value string) *BinaryBuildSourceApplyConfiguration {
b.AsFile = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonwebhookcause.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonwebhookcause.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// CommonWebHookCauseApplyConfiguration represents an declarative configuration of the CommonWebHookCause type for use
// with apply.
type CommonWebHookCauseApplyConfiguration struct {
Revision *SourceRevisionApplyConfiguration `json:"revision,omitempty"`
Secret *string `json:"secret,omitempty"`
}
// CommonWebHookCauseApplyConfiguration constructs an declarative configuration of the CommonWebHookCause type for use with
// apply.
func CommonWebHookCause() *CommonWebHookCauseApplyConfiguration {
return &CommonWebHookCauseApplyConfiguration{}
}
// WithRevision sets the Revision field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Revision field is set to the value of the last call.
func (b *CommonWebHookCauseApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *CommonWebHookCauseApplyConfiguration {
b.Revision = value
return b
}
// WithSecret sets the Secret field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Secret field is set to the value of the last call.
func (b *CommonWebHookCauseApplyConfiguration) WithSecret(value string) *CommonWebHookCauseApplyConfiguration {
b.Secret = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigspec.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigspec.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
buildv1 "github.com/openshift/api/build/v1"
corev1 "k8s.io/api/core/v1"
)
// BuildConfigSpecApplyConfiguration represents an declarative configuration of the BuildConfigSpec type for use
// with apply.
type BuildConfigSpecApplyConfiguration struct {
Triggers []BuildTriggerPolicyApplyConfiguration `json:"triggers,omitempty"`
RunPolicy *buildv1.BuildRunPolicy `json:"runPolicy,omitempty"`
CommonSpecApplyConfiguration `json:",inline"`
SuccessfulBuildsHistoryLimit *int32 `json:"successfulBuildsHistoryLimit,omitempty"`
FailedBuildsHistoryLimit *int32 `json:"failedBuildsHistoryLimit,omitempty"`
}
// BuildConfigSpecApplyConfiguration constructs an declarative configuration of the BuildConfigSpec type for use with
// apply.
func BuildConfigSpec() *BuildConfigSpecApplyConfiguration {
return &BuildConfigSpecApplyConfiguration{}
}
// WithTriggers adds the given value to the Triggers field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Triggers field.
func (b *BuildConfigSpecApplyConfiguration) WithTriggers(values ...*BuildTriggerPolicyApplyConfiguration) *BuildConfigSpecApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithTriggers")
}
b.Triggers = append(b.Triggers, *values[i])
}
return b
}
// WithRunPolicy sets the RunPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the RunPolicy field is set to the value of the last call.
func (b *BuildConfigSpecApplyConfiguration) WithRunPolicy(value buildv1.BuildRunPolicy) *BuildConfigSpecApplyConfiguration {
b.RunPolicy = &value
return b
}
// WithServiceAccount sets the ServiceAccount field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ServiceAccount field is set to the value of the last call.
func (b *BuildConfigSpecApplyConfiguration) WithServiceAccount(value string) *BuildConfigSpecApplyConfiguration {
b.ServiceAccount = &value
return b
}
// WithSource sets the Source field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Source field is set to the value of the last call.
func (b *BuildConfigSpecApplyConfiguration) WithSource(value *BuildSourceApplyConfiguration) *BuildConfigSpecApplyConfiguration {
b.Source = value
return b
}
// WithRevision sets the Revision field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Revision field is set to the value of the last call.
func (b *BuildConfigSpecApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *BuildConfigSpecApplyConfiguration {
b.Revision = value
return b
}
// WithStrategy sets the Strategy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Strategy field is set to the value of the last call.
func (b *BuildConfigSpecApplyConfiguration) WithStrategy(value *BuildStrategyApplyConfiguration) *BuildConfigSpecApplyConfiguration {
b.Strategy = value
return b
}
// WithOutput sets the Output field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Output field is set to the value of the last call.
func (b *BuildConfigSpecApplyConfiguration) WithOutput(value *BuildOutputApplyConfiguration) *BuildConfigSpecApplyConfiguration {
b.Output = value
return b
}
// WithResources sets the Resources field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Resources field is set to the value of the last call.
func (b *BuildConfigSpecApplyConfiguration) WithResources(value corev1.ResourceRequirements) *BuildConfigSpecApplyConfiguration {
b.Resources = &value
return b
}
// WithPostCommit sets the PostCommit field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PostCommit field is set to the value of the last call.
func (b *BuildConfigSpecApplyConfiguration) WithPostCommit(value *BuildPostCommitSpecApplyConfiguration) *BuildConfigSpecApplyConfiguration {
b.PostCommit = value
return b
}
// WithCompletionDeadlineSeconds sets the CompletionDeadlineSeconds field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CompletionDeadlineSeconds field is set to the value of the last call.
func (b *BuildConfigSpecApplyConfiguration) WithCompletionDeadlineSeconds(value int64) *BuildConfigSpecApplyConfiguration {
b.CompletionDeadlineSeconds = &value
return b
}
// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NodeSelector field is set to the value of the last call.
func (b *BuildConfigSpecApplyConfiguration) WithNodeSelector(value buildv1.OptionalNodeSelector) *BuildConfigSpecApplyConfiguration {
b.NodeSelector = &value
return b
}
// WithMountTrustedCA sets the MountTrustedCA field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the MountTrustedCA field is set to the value of the last call.
func (b *BuildConfigSpecApplyConfiguration) WithMountTrustedCA(value bool) *BuildConfigSpecApplyConfiguration {
b.MountTrustedCA = &value
return b
}
// WithSuccessfulBuildsHistoryLimit sets the SuccessfulBuildsHistoryLimit field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SuccessfulBuildsHistoryLimit field is set to the value of the last call.
func (b *BuildConfigSpecApplyConfiguration) WithSuccessfulBuildsHistoryLimit(value int32) *BuildConfigSpecApplyConfiguration {
b.SuccessfulBuildsHistoryLimit = &value
return b
}
// WithFailedBuildsHistoryLimit sets the FailedBuildsHistoryLimit field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the FailedBuildsHistoryLimit field is set to the value of the last call.
func (b *BuildConfigSpecApplyConfiguration) WithFailedBuildsHistoryLimit(value int32) *BuildConfigSpecApplyConfiguration {
b.FailedBuildsHistoryLimit = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stageinfo.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stageinfo.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/openshift/api/build/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// StageInfoApplyConfiguration represents an declarative configuration of the StageInfo type for use
// with apply.
type StageInfoApplyConfiguration struct {
Name *v1.StageName `json:"name,omitempty"`
StartTime *metav1.Time `json:"startTime,omitempty"`
DurationMilliseconds *int64 `json:"durationMilliseconds,omitempty"`
Steps []StepInfoApplyConfiguration `json:"steps,omitempty"`
}
// StageInfoApplyConfiguration constructs an declarative configuration of the StageInfo type for use with
// apply.
func StageInfo() *StageInfoApplyConfiguration {
return &StageInfoApplyConfiguration{}
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *StageInfoApplyConfiguration) WithName(value v1.StageName) *StageInfoApplyConfiguration {
b.Name = &value
return b
}
// WithStartTime sets the StartTime field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the StartTime field is set to the value of the last call.
func (b *StageInfoApplyConfiguration) WithStartTime(value metav1.Time) *StageInfoApplyConfiguration {
b.StartTime = &value
return b
}
// WithDurationMilliseconds sets the DurationMilliseconds field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DurationMilliseconds field is set to the value of the last call.
func (b *StageInfoApplyConfiguration) WithDurationMilliseconds(value int64) *StageInfoApplyConfiguration {
b.DurationMilliseconds = &value
return b
}
// WithSteps adds the given value to the Steps field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Steps field.
func (b *StageInfoApplyConfiguration) WithSteps(values ...*StepInfoApplyConfiguration) *StageInfoApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithSteps")
}
b.Steps = append(b.Steps, *values[i])
}
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/jenkinspipelinebuildstrategy.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/jenkinspipelinebuildstrategy.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/core/v1"
)
// JenkinsPipelineBuildStrategyApplyConfiguration represents an declarative configuration of the JenkinsPipelineBuildStrategy type for use
// with apply.
type JenkinsPipelineBuildStrategyApplyConfiguration struct {
JenkinsfilePath *string `json:"jenkinsfilePath,omitempty"`
Jenkinsfile *string `json:"jenkinsfile,omitempty"`
Env []v1.EnvVar `json:"env,omitempty"`
}
// JenkinsPipelineBuildStrategyApplyConfiguration constructs an declarative configuration of the JenkinsPipelineBuildStrategy type for use with
// apply.
func JenkinsPipelineBuildStrategy() *JenkinsPipelineBuildStrategyApplyConfiguration {
return &JenkinsPipelineBuildStrategyApplyConfiguration{}
}
// WithJenkinsfilePath sets the JenkinsfilePath field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the JenkinsfilePath field is set to the value of the last call.
func (b *JenkinsPipelineBuildStrategyApplyConfiguration) WithJenkinsfilePath(value string) *JenkinsPipelineBuildStrategyApplyConfiguration {
b.JenkinsfilePath = &value
return b
}
// WithJenkinsfile sets the Jenkinsfile field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Jenkinsfile field is set to the value of the last call.
func (b *JenkinsPipelineBuildStrategyApplyConfiguration) WithJenkinsfile(value string) *JenkinsPipelineBuildStrategyApplyConfiguration {
b.Jenkinsfile = &value
return b
}
// WithEnv adds the given value to the Env field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Env field.
func (b *JenkinsPipelineBuildStrategyApplyConfiguration) WithEnv(values ...v1.EnvVar) *JenkinsPipelineBuildStrategyApplyConfiguration {
for i := range values {
b.Env = append(b.Env, values[i])
}
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumesource.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumesource.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/openshift/api/build/v1"
corev1 "k8s.io/api/core/v1"
)
// BuildVolumeSourceApplyConfiguration represents an declarative configuration of the BuildVolumeSource type for use
// with apply.
type BuildVolumeSourceApplyConfiguration struct {
Type *v1.BuildVolumeSourceType `json:"type,omitempty"`
Secret *corev1.SecretVolumeSource `json:"secret,omitempty"`
ConfigMap *corev1.ConfigMapVolumeSource `json:"configMap,omitempty"`
CSI *corev1.CSIVolumeSource `json:"csi,omitempty"`
}
// BuildVolumeSourceApplyConfiguration constructs an declarative configuration of the BuildVolumeSource type for use with
// apply.
func BuildVolumeSource() *BuildVolumeSourceApplyConfiguration {
return &BuildVolumeSourceApplyConfiguration{}
}
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
func (b *BuildVolumeSourceApplyConfiguration) WithType(value v1.BuildVolumeSourceType) *BuildVolumeSourceApplyConfiguration {
b.Type = &value
return b
}
// WithSecret sets the Secret field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Secret field is set to the value of the last call.
func (b *BuildVolumeSourceApplyConfiguration) WithSecret(value corev1.SecretVolumeSource) *BuildVolumeSourceApplyConfiguration {
b.Secret = &value
return b
}
// WithConfigMap sets the ConfigMap field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ConfigMap field is set to the value of the last call.
func (b *BuildVolumeSourceApplyConfiguration) WithConfigMap(value corev1.ConfigMapVolumeSource) *BuildVolumeSourceApplyConfiguration {
b.ConfigMap = &value
return b
}
// WithCSI sets the CSI field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CSI field is set to the value of the last call.
func (b *BuildVolumeSourceApplyConfiguration) WithCSI(value corev1.CSIVolumeSource) *BuildVolumeSourceApplyConfiguration {
b.CSI = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangecause.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangecause.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/core/v1"
)
// ImageChangeCauseApplyConfiguration represents an declarative configuration of the ImageChangeCause type for use
// with apply.
type ImageChangeCauseApplyConfiguration struct {
ImageID *string `json:"imageID,omitempty"`
FromRef *v1.ObjectReference `json:"fromRef,omitempty"`
}
// ImageChangeCauseApplyConfiguration constructs an declarative configuration of the ImageChangeCause type for use with
// apply.
func ImageChangeCause() *ImageChangeCauseApplyConfiguration {
return &ImageChangeCauseApplyConfiguration{}
}
// WithImageID sets the ImageID field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ImageID field is set to the value of the last call.
func (b *ImageChangeCauseApplyConfiguration) WithImageID(value string) *ImageChangeCauseApplyConfiguration {
b.ImageID = &value
return b
}
// WithFromRef sets the FromRef field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the FromRef field is set to the value of the last call.
func (b *ImageChangeCauseApplyConfiguration) WithFromRef(value v1.ObjectReference) *ImageChangeCauseApplyConfiguration {
b.FromRef = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/configmapbuildsource.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/configmapbuildsource.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/core/v1"
)
// ConfigMapBuildSourceApplyConfiguration represents an declarative configuration of the ConfigMapBuildSource type for use
// with apply.
type ConfigMapBuildSourceApplyConfiguration struct {
ConfigMap *v1.LocalObjectReference `json:"configMap,omitempty"`
DestinationDir *string `json:"destinationDir,omitempty"`
}
// ConfigMapBuildSourceApplyConfiguration constructs an declarative configuration of the ConfigMapBuildSource type for use with
// apply.
func ConfigMapBuildSource() *ConfigMapBuildSourceApplyConfiguration {
return &ConfigMapBuildSourceApplyConfiguration{}
}
// WithConfigMap sets the ConfigMap field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ConfigMap field is set to the value of the last call.
func (b *ConfigMapBuildSourceApplyConfiguration) WithConfigMap(value v1.LocalObjectReference) *ConfigMapBuildSourceApplyConfiguration {
b.ConfigMap = &value
return b
}
// WithDestinationDir sets the DestinationDir field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DestinationDir field is set to the value of the last call.
func (b *ConfigMapBuildSourceApplyConfiguration) WithDestinationDir(value string) *ConfigMapBuildSourceApplyConfiguration {
b.DestinationDir = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildspec.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildspec.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
buildv1 "github.com/openshift/api/build/v1"
corev1 "k8s.io/api/core/v1"
)
// BuildSpecApplyConfiguration represents an declarative configuration of the BuildSpec type for use
// with apply.
type BuildSpecApplyConfiguration struct {
CommonSpecApplyConfiguration `json:",inline"`
TriggeredBy []BuildTriggerCauseApplyConfiguration `json:"triggeredBy,omitempty"`
}
// BuildSpecApplyConfiguration constructs an declarative configuration of the BuildSpec type for use with
// apply.
func BuildSpec() *BuildSpecApplyConfiguration {
return &BuildSpecApplyConfiguration{}
}
// WithServiceAccount sets the ServiceAccount field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ServiceAccount field is set to the value of the last call.
func (b *BuildSpecApplyConfiguration) WithServiceAccount(value string) *BuildSpecApplyConfiguration {
b.ServiceAccount = &value
return b
}
// WithSource sets the Source field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Source field is set to the value of the last call.
func (b *BuildSpecApplyConfiguration) WithSource(value *BuildSourceApplyConfiguration) *BuildSpecApplyConfiguration {
b.Source = value
return b
}
// WithRevision sets the Revision field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Revision field is set to the value of the last call.
func (b *BuildSpecApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *BuildSpecApplyConfiguration {
b.Revision = value
return b
}
// WithStrategy sets the Strategy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Strategy field is set to the value of the last call.
func (b *BuildSpecApplyConfiguration) WithStrategy(value *BuildStrategyApplyConfiguration) *BuildSpecApplyConfiguration {
b.Strategy = value
return b
}
// WithOutput sets the Output field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Output field is set to the value of the last call.
func (b *BuildSpecApplyConfiguration) WithOutput(value *BuildOutputApplyConfiguration) *BuildSpecApplyConfiguration {
b.Output = value
return b
}
// WithResources sets the Resources field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Resources field is set to the value of the last call.
func (b *BuildSpecApplyConfiguration) WithResources(value corev1.ResourceRequirements) *BuildSpecApplyConfiguration {
b.Resources = &value
return b
}
// WithPostCommit sets the PostCommit field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PostCommit field is set to the value of the last call.
func (b *BuildSpecApplyConfiguration) WithPostCommit(value *BuildPostCommitSpecApplyConfiguration) *BuildSpecApplyConfiguration {
b.PostCommit = value
return b
}
// WithCompletionDeadlineSeconds sets the CompletionDeadlineSeconds field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CompletionDeadlineSeconds field is set to the value of the last call.
func (b *BuildSpecApplyConfiguration) WithCompletionDeadlineSeconds(value int64) *BuildSpecApplyConfiguration {
b.CompletionDeadlineSeconds = &value
return b
}
// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NodeSelector field is set to the value of the last call.
func (b *BuildSpecApplyConfiguration) WithNodeSelector(value buildv1.OptionalNodeSelector) *BuildSpecApplyConfiguration {
b.NodeSelector = &value
return b
}
// WithMountTrustedCA sets the MountTrustedCA field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the MountTrustedCA field is set to the value of the last call.
func (b *BuildSpecApplyConfiguration) WithMountTrustedCA(value bool) *BuildSpecApplyConfiguration {
b.MountTrustedCA = &value
return b
}
// WithTriggeredBy adds the given value to the TriggeredBy field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the TriggeredBy field.
func (b *BuildSpecApplyConfiguration) WithTriggeredBy(values ...*BuildTriggerCauseApplyConfiguration) *BuildSpecApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithTriggeredBy")
}
b.TriggeredBy = append(b.TriggeredBy, *values[i])
}
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcebuildstrategy.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcebuildstrategy.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/core/v1"
)
// SourceBuildStrategyApplyConfiguration represents an declarative configuration of the SourceBuildStrategy type for use
// with apply.
type SourceBuildStrategyApplyConfiguration struct {
From *v1.ObjectReference `json:"from,omitempty"`
PullSecret *v1.LocalObjectReference `json:"pullSecret,omitempty"`
Env []v1.EnvVar `json:"env,omitempty"`
Scripts *string `json:"scripts,omitempty"`
Incremental *bool `json:"incremental,omitempty"`
ForcePull *bool `json:"forcePull,omitempty"`
Volumes []BuildVolumeApplyConfiguration `json:"volumes,omitempty"`
}
// SourceBuildStrategyApplyConfiguration constructs an declarative configuration of the SourceBuildStrategy type for use with
// apply.
func SourceBuildStrategy() *SourceBuildStrategyApplyConfiguration {
return &SourceBuildStrategyApplyConfiguration{}
}
// WithFrom sets the From field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the From field is set to the value of the last call.
func (b *SourceBuildStrategyApplyConfiguration) WithFrom(value v1.ObjectReference) *SourceBuildStrategyApplyConfiguration {
b.From = &value
return b
}
// WithPullSecret sets the PullSecret field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PullSecret field is set to the value of the last call.
func (b *SourceBuildStrategyApplyConfiguration) WithPullSecret(value v1.LocalObjectReference) *SourceBuildStrategyApplyConfiguration {
b.PullSecret = &value
return b
}
// WithEnv adds the given value to the Env field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Env field.
func (b *SourceBuildStrategyApplyConfiguration) WithEnv(values ...v1.EnvVar) *SourceBuildStrategyApplyConfiguration {
for i := range values {
b.Env = append(b.Env, values[i])
}
return b
}
// WithScripts sets the Scripts field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Scripts field is set to the value of the last call.
func (b *SourceBuildStrategyApplyConfiguration) WithScripts(value string) *SourceBuildStrategyApplyConfiguration {
b.Scripts = &value
return b
}
// WithIncremental sets the Incremental field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Incremental field is set to the value of the last call.
func (b *SourceBuildStrategyApplyConfiguration) WithIncremental(value bool) *SourceBuildStrategyApplyConfiguration {
b.Incremental = &value
return b
}
// WithForcePull sets the ForcePull field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ForcePull field is set to the value of the last call.
func (b *SourceBuildStrategyApplyConfiguration) WithForcePull(value bool) *SourceBuildStrategyApplyConfiguration {
b.ForcePull = &value
return b
}
// WithVolumes adds the given value to the Volumes field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Volumes field.
func (b *SourceBuildStrategyApplyConfiguration) WithVolumes(values ...*BuildVolumeApplyConfiguration) *SourceBuildStrategyApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithVolumes")
}
b.Volumes = append(b.Volumes, *values[i])
}
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigstatus.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigstatus.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// BuildConfigStatusApplyConfiguration represents an declarative configuration of the BuildConfigStatus type for use
// with apply.
type BuildConfigStatusApplyConfiguration struct {
LastVersion *int64 `json:"lastVersion,omitempty"`
ImageChangeTriggers []ImageChangeTriggerStatusApplyConfiguration `json:"imageChangeTriggers,omitempty"`
}
// BuildConfigStatusApplyConfiguration constructs an declarative configuration of the BuildConfigStatus type for use with
// apply.
func BuildConfigStatus() *BuildConfigStatusApplyConfiguration {
return &BuildConfigStatusApplyConfiguration{}
}
// WithLastVersion sets the LastVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the LastVersion field is set to the value of the last call.
func (b *BuildConfigStatusApplyConfiguration) WithLastVersion(value int64) *BuildConfigStatusApplyConfiguration {
b.LastVersion = &value
return b
}
// WithImageChangeTriggers adds the given value to the ImageChangeTriggers field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the ImageChangeTriggers field.
func (b *BuildConfigStatusApplyConfiguration) WithImageChangeTriggers(values ...*ImageChangeTriggerStatusApplyConfiguration) *BuildConfigStatusApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithImageChangeTriggers")
}
b.ImageChangeTriggers = append(b.ImageChangeTriggers, *values[i])
}
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagelabel.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagelabel.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// ImageLabelApplyConfiguration represents an declarative configuration of the ImageLabel type for use
// with apply.
type ImageLabelApplyConfiguration struct {
Name *string `json:"name,omitempty"`
Value *string `json:"value,omitempty"`
}
// ImageLabelApplyConfiguration constructs an declarative configuration of the ImageLabel type for use with
// apply.
func ImageLabel() *ImageLabelApplyConfiguration {
return &ImageLabelApplyConfiguration{}
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *ImageLabelApplyConfiguration) WithName(value string) *ImageLabelApplyConfiguration {
b.Name = &value
return b
}
// WithValue sets the Value field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Value field is set to the value of the last call.
func (b *ImageLabelApplyConfiguration) WithValue(value string) *ImageLabelApplyConfiguration {
b.Value = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/custombuildstrategy.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/custombuildstrategy.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/core/v1"
)
// CustomBuildStrategyApplyConfiguration represents an declarative configuration of the CustomBuildStrategy type for use
// with apply.
type CustomBuildStrategyApplyConfiguration struct {
From *v1.ObjectReference `json:"from,omitempty"`
PullSecret *v1.LocalObjectReference `json:"pullSecret,omitempty"`
Env []v1.EnvVar `json:"env,omitempty"`
ExposeDockerSocket *bool `json:"exposeDockerSocket,omitempty"`
ForcePull *bool `json:"forcePull,omitempty"`
Secrets []SecretSpecApplyConfiguration `json:"secrets,omitempty"`
BuildAPIVersion *string `json:"buildAPIVersion,omitempty"`
}
// CustomBuildStrategyApplyConfiguration constructs an declarative configuration of the CustomBuildStrategy type for use with
// apply.
func CustomBuildStrategy() *CustomBuildStrategyApplyConfiguration {
return &CustomBuildStrategyApplyConfiguration{}
}
// WithFrom sets the From field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the From field is set to the value of the last call.
func (b *CustomBuildStrategyApplyConfiguration) WithFrom(value v1.ObjectReference) *CustomBuildStrategyApplyConfiguration {
b.From = &value
return b
}
// WithPullSecret sets the PullSecret field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PullSecret field is set to the value of the last call.
func (b *CustomBuildStrategyApplyConfiguration) WithPullSecret(value v1.LocalObjectReference) *CustomBuildStrategyApplyConfiguration {
b.PullSecret = &value
return b
}
// WithEnv adds the given value to the Env field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Env field.
func (b *CustomBuildStrategyApplyConfiguration) WithEnv(values ...v1.EnvVar) *CustomBuildStrategyApplyConfiguration {
for i := range values {
b.Env = append(b.Env, values[i])
}
return b
}
// WithExposeDockerSocket sets the ExposeDockerSocket field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ExposeDockerSocket field is set to the value of the last call.
func (b *CustomBuildStrategyApplyConfiguration) WithExposeDockerSocket(value bool) *CustomBuildStrategyApplyConfiguration {
b.ExposeDockerSocket = &value
return b
}
// WithForcePull sets the ForcePull field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ForcePull field is set to the value of the last call.
func (b *CustomBuildStrategyApplyConfiguration) WithForcePull(value bool) *CustomBuildStrategyApplyConfiguration {
b.ForcePull = &value
return b
}
// WithSecrets adds the given value to the Secrets field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Secrets field.
func (b *CustomBuildStrategyApplyConfiguration) WithSecrets(values ...*SecretSpecApplyConfiguration) *CustomBuildStrategyApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithSecrets")
}
b.Secrets = append(b.Secrets, *values[i])
}
return b
}
// WithBuildAPIVersion sets the BuildAPIVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the BuildAPIVersion field is set to the value of the last call.
func (b *CustomBuildStrategyApplyConfiguration) WithBuildAPIVersion(value string) *CustomBuildStrategyApplyConfiguration {
b.BuildAPIVersion = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/githubwebhookcause.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/githubwebhookcause.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// GitHubWebHookCauseApplyConfiguration represents an declarative configuration of the GitHubWebHookCause type for use
// with apply.
type GitHubWebHookCauseApplyConfiguration struct {
Revision *SourceRevisionApplyConfiguration `json:"revision,omitempty"`
Secret *string `json:"secret,omitempty"`
}
// GitHubWebHookCauseApplyConfiguration constructs an declarative configuration of the GitHubWebHookCause type for use with
// apply.
func GitHubWebHookCause() *GitHubWebHookCauseApplyConfiguration {
return &GitHubWebHookCauseApplyConfiguration{}
}
// WithRevision sets the Revision field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Revision field is set to the value of the last call.
func (b *GitHubWebHookCauseApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *GitHubWebHookCauseApplyConfiguration {
b.Revision = value
return b
}
// WithSecret sets the Secret field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Secret field is set to the value of the last call.
func (b *GitHubWebHookCauseApplyConfiguration) WithSecret(value string) *GitHubWebHookCauseApplyConfiguration {
b.Secret = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstrategy.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstrategy.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/openshift/api/build/v1"
)
// BuildStrategyApplyConfiguration represents an declarative configuration of the BuildStrategy type for use
// with apply.
type BuildStrategyApplyConfiguration struct {
Type *v1.BuildStrategyType `json:"type,omitempty"`
DockerStrategy *DockerBuildStrategyApplyConfiguration `json:"dockerStrategy,omitempty"`
SourceStrategy *SourceBuildStrategyApplyConfiguration `json:"sourceStrategy,omitempty"`
CustomStrategy *CustomBuildStrategyApplyConfiguration `json:"customStrategy,omitempty"`
JenkinsPipelineStrategy *JenkinsPipelineBuildStrategyApplyConfiguration `json:"jenkinsPipelineStrategy,omitempty"`
}
// BuildStrategyApplyConfiguration constructs an declarative configuration of the BuildStrategy type for use with
// apply.
func BuildStrategy() *BuildStrategyApplyConfiguration {
return &BuildStrategyApplyConfiguration{}
}
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
func (b *BuildStrategyApplyConfiguration) WithType(value v1.BuildStrategyType) *BuildStrategyApplyConfiguration {
b.Type = &value
return b
}
// WithDockerStrategy sets the DockerStrategy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DockerStrategy field is set to the value of the last call.
func (b *BuildStrategyApplyConfiguration) WithDockerStrategy(value *DockerBuildStrategyApplyConfiguration) *BuildStrategyApplyConfiguration {
b.DockerStrategy = value
return b
}
// WithSourceStrategy sets the SourceStrategy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SourceStrategy field is set to the value of the last call.
func (b *BuildStrategyApplyConfiguration) WithSourceStrategy(value *SourceBuildStrategyApplyConfiguration) *BuildStrategyApplyConfiguration {
b.SourceStrategy = value
return b
}
// WithCustomStrategy sets the CustomStrategy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CustomStrategy field is set to the value of the last call.
func (b *BuildStrategyApplyConfiguration) WithCustomStrategy(value *CustomBuildStrategyApplyConfiguration) *BuildStrategyApplyConfiguration {
b.CustomStrategy = value
return b
}
// WithJenkinsPipelineStrategy sets the JenkinsPipelineStrategy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the JenkinsPipelineStrategy field is set to the value of the last call.
func (b *BuildStrategyApplyConfiguration) WithJenkinsPipelineStrategy(value *JenkinsPipelineBuildStrategyApplyConfiguration) *BuildStrategyApplyConfiguration {
b.JenkinsPipelineStrategy = value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutputto.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutputto.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// BuildStatusOutputToApplyConfiguration represents an declarative configuration of the BuildStatusOutputTo type for use
// with apply.
type BuildStatusOutputToApplyConfiguration struct {
ImageDigest *string `json:"imageDigest,omitempty"`
}
// BuildStatusOutputToApplyConfiguration constructs an declarative configuration of the BuildStatusOutputTo type for use with
// apply.
func BuildStatusOutputTo() *BuildStatusOutputToApplyConfiguration {
return &BuildStatusOutputToApplyConfiguration{}
}
// WithImageDigest sets the ImageDigest field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ImageDigest field is set to the value of the last call.
func (b *BuildStatusOutputToApplyConfiguration) WithImageDigest(value string) *BuildStatusOutputToApplyConfiguration {
b.ImageDigest = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetriggerstatus.go | vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetriggerstatus.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ImageChangeTriggerStatusApplyConfiguration represents an declarative configuration of the ImageChangeTriggerStatus type for use
// with apply.
type ImageChangeTriggerStatusApplyConfiguration struct {
LastTriggeredImageID *string `json:"lastTriggeredImageID,omitempty"`
From *ImageStreamTagReferenceApplyConfiguration `json:"from,omitempty"`
LastTriggerTime *metav1.Time `json:"lastTriggerTime,omitempty"`
}
// ImageChangeTriggerStatusApplyConfiguration constructs an declarative configuration of the ImageChangeTriggerStatus type for use with
// apply.
func ImageChangeTriggerStatus() *ImageChangeTriggerStatusApplyConfiguration {
return &ImageChangeTriggerStatusApplyConfiguration{}
}
// WithLastTriggeredImageID sets the LastTriggeredImageID field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the LastTriggeredImageID field is set to the value of the last call.
func (b *ImageChangeTriggerStatusApplyConfiguration) WithLastTriggeredImageID(value string) *ImageChangeTriggerStatusApplyConfiguration {
b.LastTriggeredImageID = &value
return b
}
// WithFrom sets the From field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the From field is set to the value of the last call.
func (b *ImageChangeTriggerStatusApplyConfiguration) WithFrom(value *ImageStreamTagReferenceApplyConfiguration) *ImageChangeTriggerStatusApplyConfiguration {
b.From = value
return b
}
// WithLastTriggerTime sets the LastTriggerTime field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the LastTriggerTime field is set to the value of the last call.
func (b *ImageChangeTriggerStatusApplyConfiguration) WithLastTriggerTime(value metav1.Time) *ImageChangeTriggerStatusApplyConfiguration {
b.LastTriggerTime = &value
return b
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/client-go/build/applyconfigurations/internal/internal.go | vendor/github.com/openshift/client-go/build/applyconfigurations/internal/internal.go | // Code generated by applyconfiguration-gen. DO NOT EDIT.
package internal
import (
"fmt"
"sync"
typed "sigs.k8s.io/structured-merge-diff/v4/typed"
)
func Parser() *typed.Parser {
parserOnce.Do(func() {
var err error
parser, err = typed.NewParser(schemaYAML)
if err != nil {
panic(fmt.Sprintf("Failed to parse schema: %v", err))
}
})
return parser
}
var parserOnce sync.Once
var parser *typed.Parser
var schemaYAML = typed.YAMLObject(`types:
- name: com.github.openshift.api.build.v1.BinaryBuildSource
map:
fields:
- name: asFile
type:
scalar: string
- name: com.github.openshift.api.build.v1.BitbucketWebHookCause
map:
fields:
- name: revision
type:
namedType: com.github.openshift.api.build.v1.SourceRevision
- name: secret
type:
scalar: string
- name: com.github.openshift.api.build.v1.Build
map:
fields:
- name: apiVersion
type:
scalar: string
- name: kind
type:
scalar: string
- name: metadata
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
default: {}
- name: spec
type:
namedType: com.github.openshift.api.build.v1.BuildSpec
default: {}
- name: status
type:
namedType: com.github.openshift.api.build.v1.BuildStatus
default: {}
- name: com.github.openshift.api.build.v1.BuildCondition
map:
fields:
- name: lastTransitionTime
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
default: {}
- name: lastUpdateTime
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
default: {}
- name: message
type:
scalar: string
- name: reason
type:
scalar: string
- name: status
type:
scalar: string
default: ""
- name: type
type:
scalar: string
default: ""
- name: com.github.openshift.api.build.v1.BuildConfig
map:
fields:
- name: apiVersion
type:
scalar: string
- name: kind
type:
scalar: string
- name: metadata
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
default: {}
- name: spec
type:
namedType: com.github.openshift.api.build.v1.BuildConfigSpec
default: {}
- name: status
type:
namedType: com.github.openshift.api.build.v1.BuildConfigStatus
default: {}
- name: com.github.openshift.api.build.v1.BuildConfigSpec
map:
fields:
- name: completionDeadlineSeconds
type:
scalar: numeric
- name: failedBuildsHistoryLimit
type:
scalar: numeric
- name: mountTrustedCA
type:
scalar: boolean
- name: nodeSelector
type:
map:
elementType:
scalar: string
- name: output
type:
namedType: com.github.openshift.api.build.v1.BuildOutput
default: {}
- name: postCommit
type:
namedType: com.github.openshift.api.build.v1.BuildPostCommitSpec
default: {}
- name: resources
type:
namedType: io.k8s.api.core.v1.ResourceRequirements
default: {}
- name: revision
type:
namedType: com.github.openshift.api.build.v1.SourceRevision
- name: runPolicy
type:
scalar: string
- name: serviceAccount
type:
scalar: string
- name: source
type:
namedType: com.github.openshift.api.build.v1.BuildSource
default: {}
- name: strategy
type:
namedType: com.github.openshift.api.build.v1.BuildStrategy
default: {}
- name: successfulBuildsHistoryLimit
type:
scalar: numeric
- name: triggers
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.BuildTriggerPolicy
elementRelationship: atomic
- name: com.github.openshift.api.build.v1.BuildConfigStatus
map:
fields:
- name: imageChangeTriggers
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.ImageChangeTriggerStatus
elementRelationship: atomic
- name: lastVersion
type:
scalar: numeric
default: 0
- name: com.github.openshift.api.build.v1.BuildOutput
map:
fields:
- name: imageLabels
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.ImageLabel
elementRelationship: atomic
- name: pushSecret
type:
namedType: io.k8s.api.core.v1.LocalObjectReference
- name: to
type:
namedType: io.k8s.api.core.v1.ObjectReference
- name: com.github.openshift.api.build.v1.BuildPostCommitSpec
map:
fields:
- name: args
type:
list:
elementType:
scalar: string
elementRelationship: atomic
- name: command
type:
list:
elementType:
scalar: string
elementRelationship: atomic
- name: script
type:
scalar: string
- name: com.github.openshift.api.build.v1.BuildSource
map:
fields:
- name: binary
type:
namedType: com.github.openshift.api.build.v1.BinaryBuildSource
- name: configMaps
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.ConfigMapBuildSource
elementRelationship: atomic
- name: contextDir
type:
scalar: string
- name: dockerfile
type:
scalar: string
- name: git
type:
namedType: com.github.openshift.api.build.v1.GitBuildSource
- name: images
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.ImageSource
elementRelationship: atomic
- name: secrets
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.SecretBuildSource
elementRelationship: atomic
- name: sourceSecret
type:
namedType: io.k8s.api.core.v1.LocalObjectReference
- name: type
type:
scalar: string
- name: com.github.openshift.api.build.v1.BuildSpec
map:
fields:
- name: completionDeadlineSeconds
type:
scalar: numeric
- name: mountTrustedCA
type:
scalar: boolean
- name: nodeSelector
type:
map:
elementType:
scalar: string
- name: output
type:
namedType: com.github.openshift.api.build.v1.BuildOutput
default: {}
- name: postCommit
type:
namedType: com.github.openshift.api.build.v1.BuildPostCommitSpec
default: {}
- name: resources
type:
namedType: io.k8s.api.core.v1.ResourceRequirements
default: {}
- name: revision
type:
namedType: com.github.openshift.api.build.v1.SourceRevision
- name: serviceAccount
type:
scalar: string
- name: source
type:
namedType: com.github.openshift.api.build.v1.BuildSource
default: {}
- name: strategy
type:
namedType: com.github.openshift.api.build.v1.BuildStrategy
default: {}
- name: triggeredBy
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.BuildTriggerCause
elementRelationship: atomic
- name: com.github.openshift.api.build.v1.BuildStatus
map:
fields:
- name: cancelled
type:
scalar: boolean
- name: completionTimestamp
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
- name: conditions
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.BuildCondition
elementRelationship: associative
keys:
- type
- name: config
type:
namedType: io.k8s.api.core.v1.ObjectReference
- name: duration
type:
scalar: numeric
- name: logSnippet
type:
scalar: string
- name: message
type:
scalar: string
- name: output
type:
namedType: com.github.openshift.api.build.v1.BuildStatusOutput
default: {}
- name: outputDockerImageReference
type:
scalar: string
- name: phase
type:
scalar: string
default: ""
- name: reason
type:
scalar: string
- name: stages
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.StageInfo
elementRelationship: atomic
- name: startTimestamp
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
- name: com.github.openshift.api.build.v1.BuildStatusOutput
map:
fields:
- name: to
type:
namedType: com.github.openshift.api.build.v1.BuildStatusOutputTo
- name: com.github.openshift.api.build.v1.BuildStatusOutputTo
map:
fields:
- name: imageDigest
type:
scalar: string
- name: com.github.openshift.api.build.v1.BuildStrategy
map:
fields:
- name: customStrategy
type:
namedType: com.github.openshift.api.build.v1.CustomBuildStrategy
- name: dockerStrategy
type:
namedType: com.github.openshift.api.build.v1.DockerBuildStrategy
- name: jenkinsPipelineStrategy
type:
namedType: com.github.openshift.api.build.v1.JenkinsPipelineBuildStrategy
- name: sourceStrategy
type:
namedType: com.github.openshift.api.build.v1.SourceBuildStrategy
- name: type
type:
scalar: string
- name: com.github.openshift.api.build.v1.BuildTriggerCause
map:
fields:
- name: bitbucketWebHook
type:
namedType: com.github.openshift.api.build.v1.BitbucketWebHookCause
- name: genericWebHook
type:
namedType: com.github.openshift.api.build.v1.GenericWebHookCause
- name: githubWebHook
type:
namedType: com.github.openshift.api.build.v1.GitHubWebHookCause
- name: gitlabWebHook
type:
namedType: com.github.openshift.api.build.v1.GitLabWebHookCause
- name: imageChangeBuild
type:
namedType: com.github.openshift.api.build.v1.ImageChangeCause
- name: message
type:
scalar: string
- name: com.github.openshift.api.build.v1.BuildTriggerPolicy
map:
fields:
- name: bitbucket
type:
namedType: com.github.openshift.api.build.v1.WebHookTrigger
- name: generic
type:
namedType: com.github.openshift.api.build.v1.WebHookTrigger
- name: github
type:
namedType: com.github.openshift.api.build.v1.WebHookTrigger
- name: gitlab
type:
namedType: com.github.openshift.api.build.v1.WebHookTrigger
- name: imageChange
type:
namedType: com.github.openshift.api.build.v1.ImageChangeTrigger
- name: type
type:
scalar: string
default: ""
- name: com.github.openshift.api.build.v1.BuildVolume
map:
fields:
- name: mounts
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.BuildVolumeMount
elementRelationship: associative
keys:
- destinationPath
- name: name
type:
scalar: string
default: ""
- name: source
type:
namedType: com.github.openshift.api.build.v1.BuildVolumeSource
default: {}
- name: com.github.openshift.api.build.v1.BuildVolumeMount
map:
fields:
- name: destinationPath
type:
scalar: string
default: ""
- name: com.github.openshift.api.build.v1.BuildVolumeSource
map:
fields:
- name: configMap
type:
namedType: io.k8s.api.core.v1.ConfigMapVolumeSource
- name: csi
type:
namedType: io.k8s.api.core.v1.CSIVolumeSource
- name: secret
type:
namedType: io.k8s.api.core.v1.SecretVolumeSource
- name: type
type:
scalar: string
default: ""
- name: com.github.openshift.api.build.v1.ConfigMapBuildSource
map:
fields:
- name: configMap
type:
namedType: io.k8s.api.core.v1.LocalObjectReference
default: {}
- name: destinationDir
type:
scalar: string
- name: com.github.openshift.api.build.v1.CustomBuildStrategy
map:
fields:
- name: buildAPIVersion
type:
scalar: string
- name: env
type:
list:
elementType:
namedType: io.k8s.api.core.v1.EnvVar
elementRelationship: atomic
- name: exposeDockerSocket
type:
scalar: boolean
- name: forcePull
type:
scalar: boolean
- name: from
type:
namedType: io.k8s.api.core.v1.ObjectReference
default: {}
- name: pullSecret
type:
namedType: io.k8s.api.core.v1.LocalObjectReference
- name: secrets
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.SecretSpec
elementRelationship: atomic
- name: com.github.openshift.api.build.v1.DockerBuildStrategy
map:
fields:
- name: buildArgs
type:
list:
elementType:
namedType: io.k8s.api.core.v1.EnvVar
elementRelationship: atomic
- name: dockerfilePath
type:
scalar: string
- name: env
type:
list:
elementType:
namedType: io.k8s.api.core.v1.EnvVar
elementRelationship: atomic
- name: forcePull
type:
scalar: boolean
- name: from
type:
namedType: io.k8s.api.core.v1.ObjectReference
- name: imageOptimizationPolicy
type:
scalar: string
- name: noCache
type:
scalar: boolean
- name: pullSecret
type:
namedType: io.k8s.api.core.v1.LocalObjectReference
- name: volumes
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.BuildVolume
elementRelationship: associative
keys:
- name
- name: com.github.openshift.api.build.v1.GenericWebHookCause
map:
fields:
- name: revision
type:
namedType: com.github.openshift.api.build.v1.SourceRevision
- name: secret
type:
scalar: string
- name: com.github.openshift.api.build.v1.GitBuildSource
map:
fields:
- name: httpProxy
type:
scalar: string
- name: httpsProxy
type:
scalar: string
- name: noProxy
type:
scalar: string
- name: ref
type:
scalar: string
- name: uri
type:
scalar: string
default: ""
- name: com.github.openshift.api.build.v1.GitHubWebHookCause
map:
fields:
- name: revision
type:
namedType: com.github.openshift.api.build.v1.SourceRevision
- name: secret
type:
scalar: string
- name: com.github.openshift.api.build.v1.GitLabWebHookCause
map:
fields:
- name: revision
type:
namedType: com.github.openshift.api.build.v1.SourceRevision
- name: secret
type:
scalar: string
- name: com.github.openshift.api.build.v1.GitSourceRevision
map:
fields:
- name: author
type:
namedType: com.github.openshift.api.build.v1.SourceControlUser
default: {}
- name: commit
type:
scalar: string
- name: committer
type:
namedType: com.github.openshift.api.build.v1.SourceControlUser
default: {}
- name: message
type:
scalar: string
- name: com.github.openshift.api.build.v1.ImageChangeCause
map:
fields:
- name: fromRef
type:
namedType: io.k8s.api.core.v1.ObjectReference
- name: imageID
type:
scalar: string
- name: com.github.openshift.api.build.v1.ImageChangeTrigger
map:
fields:
- name: from
type:
namedType: io.k8s.api.core.v1.ObjectReference
- name: lastTriggeredImageID
type:
scalar: string
- name: paused
type:
scalar: boolean
- name: com.github.openshift.api.build.v1.ImageChangeTriggerStatus
map:
fields:
- name: from
type:
namedType: com.github.openshift.api.build.v1.ImageStreamTagReference
default: {}
- name: lastTriggerTime
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
default: {}
- name: lastTriggeredImageID
type:
scalar: string
- name: com.github.openshift.api.build.v1.ImageLabel
map:
fields:
- name: name
type:
scalar: string
default: ""
- name: value
type:
scalar: string
- name: com.github.openshift.api.build.v1.ImageSource
map:
fields:
- name: as
type:
list:
elementType:
scalar: string
elementRelationship: atomic
- name: from
type:
namedType: io.k8s.api.core.v1.ObjectReference
default: {}
- name: paths
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.ImageSourcePath
elementRelationship: atomic
- name: pullSecret
type:
namedType: io.k8s.api.core.v1.LocalObjectReference
- name: com.github.openshift.api.build.v1.ImageSourcePath
map:
fields:
- name: destinationDir
type:
scalar: string
default: ""
- name: sourcePath
type:
scalar: string
default: ""
- name: com.github.openshift.api.build.v1.ImageStreamTagReference
map:
fields:
- name: name
type:
scalar: string
- name: namespace
type:
scalar: string
- name: com.github.openshift.api.build.v1.JenkinsPipelineBuildStrategy
map:
fields:
- name: env
type:
list:
elementType:
namedType: io.k8s.api.core.v1.EnvVar
elementRelationship: atomic
- name: jenkinsfile
type:
scalar: string
- name: jenkinsfilePath
type:
scalar: string
- name: com.github.openshift.api.build.v1.SecretBuildSource
map:
fields:
- name: destinationDir
type:
scalar: string
- name: secret
type:
namedType: io.k8s.api.core.v1.LocalObjectReference
default: {}
- name: com.github.openshift.api.build.v1.SecretLocalReference
map:
fields:
- name: name
type:
scalar: string
default: ""
- name: com.github.openshift.api.build.v1.SecretSpec
map:
fields:
- name: mountPath
type:
scalar: string
default: ""
- name: secretSource
type:
namedType: io.k8s.api.core.v1.LocalObjectReference
default: {}
- name: com.github.openshift.api.build.v1.SourceBuildStrategy
map:
fields:
- name: env
type:
list:
elementType:
namedType: io.k8s.api.core.v1.EnvVar
elementRelationship: atomic
- name: forcePull
type:
scalar: boolean
- name: from
type:
namedType: io.k8s.api.core.v1.ObjectReference
default: {}
- name: incremental
type:
scalar: boolean
- name: pullSecret
type:
namedType: io.k8s.api.core.v1.LocalObjectReference
- name: scripts
type:
scalar: string
- name: volumes
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.BuildVolume
elementRelationship: associative
keys:
- name
- name: com.github.openshift.api.build.v1.SourceControlUser
map:
fields:
- name: email
type:
scalar: string
- name: name
type:
scalar: string
- name: com.github.openshift.api.build.v1.SourceRevision
map:
fields:
- name: git
type:
namedType: com.github.openshift.api.build.v1.GitSourceRevision
- name: type
type:
scalar: string
default: ""
- name: com.github.openshift.api.build.v1.StageInfo
map:
fields:
- name: durationMilliseconds
type:
scalar: numeric
- name: name
type:
scalar: string
- name: startTime
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
default: {}
- name: steps
type:
list:
elementType:
namedType: com.github.openshift.api.build.v1.StepInfo
elementRelationship: atomic
- name: com.github.openshift.api.build.v1.StepInfo
map:
fields:
- name: durationMilliseconds
type:
scalar: numeric
- name: name
type:
scalar: string
- name: startTime
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
default: {}
- name: com.github.openshift.api.build.v1.WebHookTrigger
map:
fields:
- name: allowEnv
type:
scalar: boolean
- name: secret
type:
scalar: string
- name: secretReference
type:
namedType: com.github.openshift.api.build.v1.SecretLocalReference
- name: io.k8s.api.core.v1.CSIVolumeSource
map:
fields:
- name: driver
type:
scalar: string
default: ""
- name: fsType
type:
scalar: string
- name: nodePublishSecretRef
type:
namedType: io.k8s.api.core.v1.LocalObjectReference
- name: readOnly
type:
scalar: boolean
- name: volumeAttributes
type:
map:
elementType:
scalar: string
- name: io.k8s.api.core.v1.ConfigMapKeySelector
map:
fields:
- name: key
type:
scalar: string
default: ""
- name: name
type:
scalar: string
- name: optional
type:
scalar: boolean
elementRelationship: atomic
- name: io.k8s.api.core.v1.ConfigMapVolumeSource
map:
fields:
- name: defaultMode
type:
scalar: numeric
- name: items
type:
list:
elementType:
namedType: io.k8s.api.core.v1.KeyToPath
elementRelationship: atomic
- name: name
type:
scalar: string
- name: optional
type:
scalar: boolean
- name: io.k8s.api.core.v1.EnvVar
map:
fields:
- name: name
type:
scalar: string
default: ""
- name: value
type:
scalar: string
- name: valueFrom
type:
namedType: io.k8s.api.core.v1.EnvVarSource
- name: io.k8s.api.core.v1.EnvVarSource
map:
fields:
- name: configMapKeyRef
type:
namedType: io.k8s.api.core.v1.ConfigMapKeySelector
- name: fieldRef
type:
namedType: io.k8s.api.core.v1.ObjectFieldSelector
- name: resourceFieldRef
type:
namedType: io.k8s.api.core.v1.ResourceFieldSelector
- name: secretKeyRef
type:
namedType: io.k8s.api.core.v1.SecretKeySelector
- name: io.k8s.api.core.v1.KeyToPath
map:
fields:
- name: key
type:
scalar: string
default: ""
- name: mode
type:
scalar: numeric
- name: path
type:
scalar: string
default: ""
- name: io.k8s.api.core.v1.LocalObjectReference
map:
fields:
- name: name
type:
scalar: string
elementRelationship: atomic
- name: io.k8s.api.core.v1.ObjectFieldSelector
map:
fields:
- name: apiVersion
type:
scalar: string
- name: fieldPath
type:
scalar: string
default: ""
elementRelationship: atomic
- name: io.k8s.api.core.v1.ObjectReference
map:
fields:
- name: apiVersion
type:
scalar: string
- name: fieldPath
type:
scalar: string
- name: kind
type:
scalar: string
- name: name
type:
scalar: string
- name: namespace
type:
scalar: string
- name: resourceVersion
type:
scalar: string
- name: uid
type:
scalar: string
elementRelationship: atomic
- name: io.k8s.api.core.v1.ResourceClaim
map:
fields:
- name: name
type:
scalar: string
default: ""
- name: io.k8s.api.core.v1.ResourceFieldSelector
map:
fields:
- name: containerName
type:
scalar: string
- name: divisor
type:
namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
default: {}
- name: resource
type:
scalar: string
default: ""
elementRelationship: atomic
- name: io.k8s.api.core.v1.ResourceRequirements
map:
fields:
- name: claims
type:
list:
elementType:
namedType: io.k8s.api.core.v1.ResourceClaim
elementRelationship: associative
keys:
- name
- name: limits
type:
map:
elementType:
namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
- name: requests
type:
map:
elementType:
namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
- name: io.k8s.api.core.v1.SecretKeySelector
map:
fields:
- name: key
type:
scalar: string
default: ""
- name: name
type:
scalar: string
- name: optional
type:
scalar: boolean
elementRelationship: atomic
- name: io.k8s.api.core.v1.SecretVolumeSource
map:
fields:
- name: defaultMode
type:
scalar: numeric
- name: items
type:
list:
elementType:
namedType: io.k8s.api.core.v1.KeyToPath
elementRelationship: atomic
- name: optional
type:
scalar: boolean
- name: secretName
type:
scalar: string
- name: io.k8s.apimachinery.pkg.api.resource.Quantity
scalar: untyped
- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1
map:
elementType:
scalar: untyped
list:
elementType:
namedType: __untyped_atomic_
elementRelationship: atomic
map:
elementType:
namedType: __untyped_deduced_
elementRelationship: separable
- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry
map:
fields:
- name: apiVersion
type:
scalar: string
- name: fieldsType
type:
scalar: string
- name: fieldsV1
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1
- name: manager
type:
scalar: string
- name: operation
type:
scalar: string
- name: subresource
type:
scalar: string
- name: time
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
map:
fields:
- name: annotations
type:
map:
elementType:
scalar: string
- name: creationTimestamp
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
default: {}
- name: deletionGracePeriodSeconds
type:
scalar: numeric
- name: deletionTimestamp
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
- name: finalizers
type:
list:
elementType:
scalar: string
elementRelationship: associative
- name: generateName
type:
scalar: string
- name: generation
type:
scalar: numeric
- name: labels
type:
map:
elementType:
scalar: string
- name: managedFields
type:
list:
elementType:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry
elementRelationship: atomic
- name: name
type:
scalar: string
- name: namespace
type:
scalar: string
- name: ownerReferences
type:
list:
elementType:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference
elementRelationship: associative
keys:
- uid
- name: resourceVersion
type:
scalar: string
- name: selfLink
type:
scalar: string
- name: uid
type:
scalar: string
- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference
map:
fields:
- name: apiVersion
type:
scalar: string
default: ""
- name: blockOwnerDeletion
type:
scalar: boolean
- name: controller
type:
scalar: boolean
- name: kind
type:
scalar: string
default: ""
- name: name
type:
scalar: string
default: ""
- name: uid
type:
scalar: string
default: ""
elementRelationship: atomic
- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time
scalar: untyped
- name: __untyped_atomic_
scalar: untyped
list:
elementType:
namedType: __untyped_atomic_
elementRelationship: atomic
map:
elementType:
namedType: __untyped_atomic_
elementRelationship: atomic
- name: __untyped_deduced_
scalar: untyped
list:
elementType:
namedType: __untyped_atomic_
elementRelationship: atomic
map:
elementType:
namedType: __untyped_deduced_
elementRelationship: separable
`)
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/pkg/serialization/serialization.go | vendor/github.com/openshift/api/pkg/serialization/serialization.go | package serialization
import (
"k8s.io/apimachinery/pkg/runtime"
)
// DecodeNestedRawExtensionOrUnknown
func DecodeNestedRawExtensionOrUnknown(d runtime.Decoder, ext *runtime.RawExtension) {
if ext.Raw == nil || ext.Object != nil {
return
}
obj, gvk, err := d.Decode(ext.Raw, nil, nil)
if err != nil {
unk := &runtime.Unknown{Raw: ext.Raw}
if runtime.IsNotRegisteredError(err) {
if _, gvk, err := d.Decode(ext.Raw, nil, unk); err == nil {
unk.APIVersion = gvk.GroupVersion().String()
unk.Kind = gvk.Kind
ext.Object = unk
return
}
}
// TODO: record mime-type with the object
if gvk != nil {
unk.APIVersion = gvk.GroupVersion().String()
unk.Kind = gvk.Kind
}
obj = unk
}
ext.Object = obj
}
// EncodeNestedRawExtension will encode the object in the RawExtension (if not nil) or
// return an error.
func EncodeNestedRawExtension(e runtime.Encoder, ext *runtime.RawExtension) error {
if ext.Raw != nil || ext.Object == nil {
return nil
}
data, err := runtime.Encode(e, ext.Object)
if err != nil {
return err
}
ext.Raw = data
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go | vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go | //go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
corev1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomDeploymentStrategyParams) DeepCopyInto(out *CustomDeploymentStrategyParams) {
*out = *in
if in.Environment != nil {
in, out := &in.Environment, &out.Environment
*out = make([]corev1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDeploymentStrategyParams.
func (in *CustomDeploymentStrategyParams) DeepCopy() *CustomDeploymentStrategyParams {
if in == nil {
return nil
}
out := new(CustomDeploymentStrategyParams)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentCause) DeepCopyInto(out *DeploymentCause) {
*out = *in
if in.ImageTrigger != nil {
in, out := &in.ImageTrigger, &out.ImageTrigger
*out = new(DeploymentCauseImageTrigger)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCause.
func (in *DeploymentCause) DeepCopy() *DeploymentCause {
if in == nil {
return nil
}
out := new(DeploymentCause)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentCauseImageTrigger) DeepCopyInto(out *DeploymentCauseImageTrigger) {
*out = *in
out.From = in.From
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCauseImageTrigger.
func (in *DeploymentCauseImageTrigger) DeepCopy() *DeploymentCauseImageTrigger {
if in == nil {
return nil
}
out := new(DeploymentCauseImageTrigger)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
*out = *in
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
if in == nil {
return nil
}
out := new(DeploymentCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentConfig) DeepCopyInto(out *DeploymentConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfig.
func (in *DeploymentConfig) DeepCopy() *DeploymentConfig {
if in == nil {
return nil
}
out := new(DeploymentConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentConfigList) DeepCopyInto(out *DeploymentConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DeploymentConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigList.
func (in *DeploymentConfigList) DeepCopy() *DeploymentConfigList {
if in == nil {
return nil
}
out := new(DeploymentConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentConfigRollback) DeepCopyInto(out *DeploymentConfigRollback) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.UpdatedAnnotations != nil {
in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.Spec = in.Spec
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigRollback.
func (in *DeploymentConfigRollback) DeepCopy() *DeploymentConfigRollback {
if in == nil {
return nil
}
out := new(DeploymentConfigRollback)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentConfigRollback) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentConfigRollbackSpec) DeepCopyInto(out *DeploymentConfigRollbackSpec) {
*out = *in
out.From = in.From
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigRollbackSpec.
func (in *DeploymentConfigRollbackSpec) DeepCopy() *DeploymentConfigRollbackSpec {
if in == nil {
return nil
}
out := new(DeploymentConfigRollbackSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentConfigSpec) DeepCopyInto(out *DeploymentConfigSpec) {
*out = *in
in.Strategy.DeepCopyInto(&out.Strategy)
if in.Triggers != nil {
in, out := &in.Triggers, &out.Triggers
*out = make(DeploymentTriggerPolicies, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
*out = new(int32)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Template != nil {
in, out := &in.Template, &out.Template
*out = new(corev1.PodTemplateSpec)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigSpec.
func (in *DeploymentConfigSpec) DeepCopy() *DeploymentConfigSpec {
if in == nil {
return nil
}
out := new(DeploymentConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentConfigStatus) DeepCopyInto(out *DeploymentConfigStatus) {
*out = *in
if in.Details != nil {
in, out := &in.Details, &out.Details
*out = new(DeploymentDetails)
(*in).DeepCopyInto(*out)
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DeploymentCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigStatus.
func (in *DeploymentConfigStatus) DeepCopy() *DeploymentConfigStatus {
if in == nil {
return nil
}
out := new(DeploymentConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentDetails) DeepCopyInto(out *DeploymentDetails) {
*out = *in
if in.Causes != nil {
in, out := &in.Causes, &out.Causes
*out = make([]DeploymentCause, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentDetails.
func (in *DeploymentDetails) DeepCopy() *DeploymentDetails {
if in == nil {
return nil
}
out := new(DeploymentDetails)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentLog) DeepCopyInto(out *DeploymentLog) {
*out = *in
out.TypeMeta = in.TypeMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentLog.
func (in *DeploymentLog) DeepCopy() *DeploymentLog {
if in == nil {
return nil
}
out := new(DeploymentLog)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentLog) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentLogOptions) DeepCopyInto(out *DeploymentLogOptions) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.SinceSeconds != nil {
in, out := &in.SinceSeconds, &out.SinceSeconds
*out = new(int64)
**out = **in
}
if in.SinceTime != nil {
in, out := &in.SinceTime, &out.SinceTime
*out = (*in).DeepCopy()
}
if in.TailLines != nil {
in, out := &in.TailLines, &out.TailLines
*out = new(int64)
**out = **in
}
if in.LimitBytes != nil {
in, out := &in.LimitBytes, &out.LimitBytes
*out = new(int64)
**out = **in
}
if in.Version != nil {
in, out := &in.Version, &out.Version
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentLogOptions.
func (in *DeploymentLogOptions) DeepCopy() *DeploymentLogOptions {
if in == nil {
return nil
}
out := new(DeploymentLogOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentLogOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentRequest) DeepCopyInto(out *DeploymentRequest) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.ExcludeTriggers != nil {
in, out := &in.ExcludeTriggers, &out.ExcludeTriggers
*out = make([]DeploymentTriggerType, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRequest.
func (in *DeploymentRequest) DeepCopy() *DeploymentRequest {
if in == nil {
return nil
}
out := new(DeploymentRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
*out = *in
if in.CustomParams != nil {
in, out := &in.CustomParams, &out.CustomParams
*out = new(CustomDeploymentStrategyParams)
(*in).DeepCopyInto(*out)
}
if in.RecreateParams != nil {
in, out := &in.RecreateParams, &out.RecreateParams
*out = new(RecreateDeploymentStrategyParams)
(*in).DeepCopyInto(*out)
}
if in.RollingParams != nil {
in, out := &in.RollingParams, &out.RollingParams
*out = new(RollingDeploymentStrategyParams)
(*in).DeepCopyInto(*out)
}
in.Resources.DeepCopyInto(&out.Resources)
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ActiveDeadlineSeconds != nil {
in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
if in == nil {
return nil
}
out := new(DeploymentStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentTriggerImageChangeParams) DeepCopyInto(out *DeploymentTriggerImageChangeParams) {
*out = *in
if in.ContainerNames != nil {
in, out := &in.ContainerNames, &out.ContainerNames
*out = make([]string, len(*in))
copy(*out, *in)
}
out.From = in.From
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerImageChangeParams.
func (in *DeploymentTriggerImageChangeParams) DeepCopy() *DeploymentTriggerImageChangeParams {
if in == nil {
return nil
}
out := new(DeploymentTriggerImageChangeParams)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in DeploymentTriggerPolicies) DeepCopyInto(out *DeploymentTriggerPolicies) {
{
in := &in
*out = make(DeploymentTriggerPolicies, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerPolicies.
func (in DeploymentTriggerPolicies) DeepCopy() DeploymentTriggerPolicies {
if in == nil {
return nil
}
out := new(DeploymentTriggerPolicies)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentTriggerPolicy) DeepCopyInto(out *DeploymentTriggerPolicy) {
*out = *in
if in.ImageChangeParams != nil {
in, out := &in.ImageChangeParams, &out.ImageChangeParams
*out = new(DeploymentTriggerImageChangeParams)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerPolicy.
func (in *DeploymentTriggerPolicy) DeepCopy() *DeploymentTriggerPolicy {
if in == nil {
return nil
}
out := new(DeploymentTriggerPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExecNewPodHook) DeepCopyInto(out *ExecNewPodHook) {
*out = *in
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]corev1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecNewPodHook.
func (in *ExecNewPodHook) DeepCopy() *ExecNewPodHook {
if in == nil {
return nil
}
out := new(ExecNewPodHook)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LifecycleHook) DeepCopyInto(out *LifecycleHook) {
*out = *in
if in.ExecNewPod != nil {
in, out := &in.ExecNewPod, &out.ExecNewPod
*out = new(ExecNewPodHook)
(*in).DeepCopyInto(*out)
}
if in.TagImages != nil {
in, out := &in.TagImages, &out.TagImages
*out = make([]TagImageHook, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHook.
func (in *LifecycleHook) DeepCopy() *LifecycleHook {
if in == nil {
return nil
}
out := new(LifecycleHook)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RecreateDeploymentStrategyParams) DeepCopyInto(out *RecreateDeploymentStrategyParams) {
*out = *in
if in.TimeoutSeconds != nil {
in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
*out = new(int64)
**out = **in
}
if in.Pre != nil {
in, out := &in.Pre, &out.Pre
*out = new(LifecycleHook)
(*in).DeepCopyInto(*out)
}
if in.Mid != nil {
in, out := &in.Mid, &out.Mid
*out = new(LifecycleHook)
(*in).DeepCopyInto(*out)
}
if in.Post != nil {
in, out := &in.Post, &out.Post
*out = new(LifecycleHook)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecreateDeploymentStrategyParams.
func (in *RecreateDeploymentStrategyParams) DeepCopy() *RecreateDeploymentStrategyParams {
if in == nil {
return nil
}
out := new(RecreateDeploymentStrategyParams)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingDeploymentStrategyParams) DeepCopyInto(out *RollingDeploymentStrategyParams) {
*out = *in
if in.UpdatePeriodSeconds != nil {
in, out := &in.UpdatePeriodSeconds, &out.UpdatePeriodSeconds
*out = new(int64)
**out = **in
}
if in.IntervalSeconds != nil {
in, out := &in.IntervalSeconds, &out.IntervalSeconds
*out = new(int64)
**out = **in
}
if in.TimeoutSeconds != nil {
in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
*out = new(int64)
**out = **in
}
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
if in.MaxSurge != nil {
in, out := &in.MaxSurge, &out.MaxSurge
*out = new(intstr.IntOrString)
**out = **in
}
if in.Pre != nil {
in, out := &in.Pre, &out.Pre
*out = new(LifecycleHook)
(*in).DeepCopyInto(*out)
}
if in.Post != nil {
in, out := &in.Post, &out.Post
*out = new(LifecycleHook)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingDeploymentStrategyParams.
func (in *RollingDeploymentStrategyParams) DeepCopy() *RollingDeploymentStrategyParams {
if in == nil {
return nil
}
out := new(RollingDeploymentStrategyParams)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TagImageHook) DeepCopyInto(out *TagImageHook) {
*out = *in
out.To = in.To
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagImageHook.
func (in *TagImageHook) DeepCopy() *TagImageHook {
if in == nil {
return nil
}
out := new(TagImageHook)
in.DeepCopyInto(out)
return out
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/apps/v1/consts.go | vendor/github.com/openshift/api/apps/v1/consts.go | package v1
const (
// DeploymentStatusReasonAnnotation represents the reason for deployment being in a given state
// Used for specifying the reason for cancellation or failure of a deployment
// This is on replication controller set by deployer controller.
DeploymentStatusReasonAnnotation = "openshift.io/deployment.status-reason"
// DeploymentPodAnnotation is an annotation on a deployment (a ReplicationController). The
// annotation value is the name of the deployer Pod which will act upon the ReplicationController
// to implement the deployment behavior.
// This is set on replication controller by deployer controller.
DeploymentPodAnnotation = "openshift.io/deployer-pod.name"
// DeploymentConfigAnnotation is an annotation name used to correlate a deployment with the
// DeploymentConfig on which the deployment is based.
// This is set on replication controller pod template by deployer controller.
DeploymentConfigAnnotation = "openshift.io/deployment-config.name"
// DeploymentCancelledAnnotation indicates that the deployment has been cancelled
// The annotation value does not matter and its mere presence indicates cancellation.
// This is set on replication controller by deployment config controller or oc rollout cancel command.
DeploymentCancelledAnnotation = "openshift.io/deployment.cancelled"
// DeploymentEncodedConfigAnnotation is an annotation name used to retrieve specific encoded
// DeploymentConfig on which a given deployment is based.
// This is set on replication controller by deployer controller.
DeploymentEncodedConfigAnnotation = "openshift.io/encoded-deployment-config"
// DeploymentVersionAnnotation is an annotation on a deployment (a ReplicationController). The
// annotation value is the LatestVersion value of the DeploymentConfig which was the basis for
// the deployment.
// This is set on replication controller pod template by deployment config controller.
DeploymentVersionAnnotation = "openshift.io/deployment-config.latest-version"
// DeployerPodForDeploymentLabel is a label which groups pods related to a
// deployment. The value is a deployment name. The deployer pod and hook pods
// created by the internal strategies will have this label. Custom
// strategies can apply this label to any pods they create, enabling
// platform-provided cancellation and garbage collection support.
// This is set on deployer pod by deployer controller.
DeployerPodForDeploymentLabel = "openshift.io/deployer-pod-for.name"
// DeploymentStatusAnnotation is an annotation name used to retrieve the DeploymentPhase of
// a deployment.
// This is set on replication controller by deployer controller.
DeploymentStatusAnnotation = "openshift.io/deployment.phase"
)
type DeploymentConditionReason string
var (
// ReplicationControllerUpdatedReason is added in a deployment config when one of its replication
// controllers is updated as part of the rollout process.
ReplicationControllerUpdatedReason DeploymentConditionReason = "ReplicationControllerUpdated"
// ReplicationControllerCreateError is added in a deployment config when it cannot create a new replication
// controller.
ReplicationControllerCreateErrorReason DeploymentConditionReason = "ReplicationControllerCreateError"
// ReplicationControllerCreatedReason is added in a deployment config when it creates a new replication
// controller.
NewReplicationControllerCreatedReason DeploymentConditionReason = "NewReplicationControllerCreated"
// NewReplicationControllerAvailableReason is added in a deployment config when its newest replication controller is made
// available ie. the number of new pods that have passed readiness checks and run for at least
// minReadySeconds is at least the minimum available pods that need to run for the deployment config.
NewReplicationControllerAvailableReason DeploymentConditionReason = "NewReplicationControllerAvailable"
// ProgressDeadlineExceededReason is added in a deployment config when its newest replication controller fails to show
// any progress within the given deadline (progressDeadlineSeconds).
ProgressDeadlineExceededReason DeploymentConditionReason = "ProgressDeadlineExceeded"
// DeploymentConfigPausedReason is added in a deployment config when it is paused. Lack of progress shouldn't be
// estimated once a deployment config is paused.
DeploymentConfigPausedReason DeploymentConditionReason = "DeploymentConfigPaused"
// DeploymentConfigResumedReason is added in a deployment config when it is resumed. Useful for not failing accidentally
// deployment configs that paused amidst a rollout.
DeploymentConfigResumedReason DeploymentConditionReason = "DeploymentConfigResumed"
// RolloutCancelledReason is added in a deployment config when its newest rollout was
// interrupted by cancellation.
RolloutCancelledReason DeploymentConditionReason = "RolloutCancelled"
)
// DeploymentStatus describes the possible states a deployment can be in.
type DeploymentStatus string
var (
// DeploymentStatusNew means the deployment has been accepted but not yet acted upon.
DeploymentStatusNew DeploymentStatus = "New"
// DeploymentStatusPending means the deployment been handed over to a deployment strategy,
// but the strategy has not yet declared the deployment to be running.
DeploymentStatusPending DeploymentStatus = "Pending"
// DeploymentStatusRunning means the deployment strategy has reported the deployment as
// being in-progress.
DeploymentStatusRunning DeploymentStatus = "Running"
// DeploymentStatusComplete means the deployment finished without an error.
DeploymentStatusComplete DeploymentStatus = "Complete"
// DeploymentStatusFailed means the deployment finished with an error.
DeploymentStatusFailed DeploymentStatus = "Failed"
)
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/apps/v1/types.go | vendor/github.com/openshift/api/apps/v1/types.go | package v1
import (
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
// +genclient
// +genclient:method=Instantiate,verb=create,subresource=instantiate,input=DeploymentRequest
// +genclient:method=Rollback,verb=create,subresource=rollback,input=DeploymentConfigRollback
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=3.0
// +k8s:prerelease-lifecycle-gen:deprecated=4.14
// +k8s:prerelease-lifecycle-gen:removed=4.10000
// Deployment Configs define the template for a pod and manages deploying new images or configuration changes.
// A single deployment configuration is usually analogous to a single micro-service. Can support many different
// deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as
// well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.
//
// A deployment is "triggered" when its configuration is changed or a tag in an Image Stream is changed.
// Triggers can be disabled to allow manual control over a deployment. The "strategy" determines how the deployment
// is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment
// is triggered by any means.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// Deprecated: Use deployments or other means for declarative updates for pods instead.
// +openshift:compatibility-gen:level=1
type DeploymentConfig struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec represents a desired deployment state and how to deploy to it.
Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current deployment state.
// +optional
Status DeploymentConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// DeploymentConfigSpec represents the desired state of the deployment.
type DeploymentConfigSpec struct {
// Strategy describes how a deployment is executed.
// +optional
Strategy DeploymentStrategy `json:"strategy" protobuf:"bytes,1,opt,name=strategy"`
// MinReadySeconds is the minimum number of seconds for which a newly created pod should
// be ready without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"`
// Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers
// are defined, a new deployment can only occur as a result of an explicit client update to the
// DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.
// +optional
Triggers DeploymentTriggerPolicies `json:"triggers" protobuf:"bytes,2,rep,name=triggers"`
// Replicas is the number of desired replicas.
// +optional
Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"`
// RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks.
// This field is a pointer to allow for differentiation between an explicit zero and not specified.
// Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,4,opt,name=revisionHistoryLimit"`
// Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the
// deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding
// or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.
// +optional
Test bool `json:"test" protobuf:"varint,5,opt,name=test"`
// Paused indicates that the deployment config is paused resulting in no new deployments on template
// changes or changes in the template caused by other triggers.
Paused bool `json:"paused,omitempty" protobuf:"varint,6,opt,name=paused"`
// Selector is a label query over pods that should match the Replicas count.
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,7,rep,name=selector"`
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
Template *corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,8,opt,name=template"`
}
// DeploymentStrategy describes how to perform a deployment.
type DeploymentStrategy struct {
// Type is the name of a deployment strategy.
// +optional
Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"`
// CustomParams are the input to the Custom deployment strategy, and may also
// be specified for the Recreate and Rolling strategies to customize the execution
// process that runs the deployment.
CustomParams *CustomDeploymentStrategyParams `json:"customParams,omitempty" protobuf:"bytes,2,opt,name=customParams"`
// RecreateParams are the input to the Recreate deployment strategy.
RecreateParams *RecreateDeploymentStrategyParams `json:"recreateParams,omitempty" protobuf:"bytes,3,opt,name=recreateParams"`
// RollingParams are the input to the Rolling deployment strategy.
RollingParams *RollingDeploymentStrategyParams `json:"rollingParams,omitempty" protobuf:"bytes,4,opt,name=rollingParams"`
// Resources contains resource requirements to execute the deployment and any hooks.
Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,5,opt,name=resources"`
// Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,6,rep,name=labels"`
// Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,7,rep,name=annotations"`
// ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment
// config may be active on a node before the system actively tries to terminate them.
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=activeDeadlineSeconds"`
}
// DeploymentStrategyType refers to a specific DeploymentStrategy implementation.
type DeploymentStrategyType string
const (
// DeploymentStrategyTypeRecreate is a simple strategy suitable as a default.
DeploymentStrategyTypeRecreate DeploymentStrategyType = "Recreate"
// DeploymentStrategyTypeCustom is a user defined strategy.
DeploymentStrategyTypeCustom DeploymentStrategyType = "Custom"
// DeploymentStrategyTypeRolling uses the Kubernetes RollingUpdater.
DeploymentStrategyTypeRolling DeploymentStrategyType = "Rolling"
)
// CustomDeploymentStrategyParams are the input to the Custom deployment strategy.
type CustomDeploymentStrategyParams struct {
// Image specifies a container image which can carry out a deployment.
Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"`
// Environment holds the environment which will be given to the container for Image.
Environment []corev1.EnvVar `json:"environment,omitempty" protobuf:"bytes,2,rep,name=environment"`
// Command is optional and overrides CMD in the container Image.
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
}
// RecreateDeploymentStrategyParams are the input to the Recreate deployment
// strategy.
type RecreateDeploymentStrategyParams struct {
// TimeoutSeconds is the time to wait for updates before giving up. If the
// value is nil, a default will be used.
TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"`
// Pre is a lifecycle hook which is executed before the strategy manipulates
// the deployment. All LifecycleHookFailurePolicy values are supported.
Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,2,opt,name=pre"`
// Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new
// pod is created. All LifecycleHookFailurePolicy values are supported.
Mid *LifecycleHook `json:"mid,omitempty" protobuf:"bytes,3,opt,name=mid"`
// Post is a lifecycle hook which is executed after the strategy has
// finished all deployment logic. All LifecycleHookFailurePolicy values are supported.
Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,4,opt,name=post"`
}
// RollingDeploymentStrategyParams are the input to the Rolling deployment
// strategy.
type RollingDeploymentStrategyParams struct {
// UpdatePeriodSeconds is the time to wait between individual pod updates.
// If the value is nil, a default will be used.
UpdatePeriodSeconds *int64 `json:"updatePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=updatePeriodSeconds"`
// IntervalSeconds is the time to wait between polling deployment status
// after update. If the value is nil, a default will be used.
IntervalSeconds *int64 `json:"intervalSeconds,omitempty" protobuf:"varint,2,opt,name=intervalSeconds"`
// TimeoutSeconds is the time to wait for updates before giving up. If the
// value is nil, a default will be used.
TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
// MaxUnavailable is the maximum number of pods that can be unavailable
// during the update. Value can be an absolute number (ex: 5) or a
// percentage of total pods at the start of update (ex: 10%). Absolute
// number is calculated from percentage by rounding down.
//
// This cannot be 0 if MaxSurge is 0. By default, 25% is used.
//
// Example: when this is set to 30%, the old RC can be scaled down by 30%
// immediately when the rolling update starts. Once new pods are ready, old
// RC can be scaled down further, followed by scaling up the new RC,
// ensuring that at least 70% of original number of pods are available at
// all times during the update.
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,4,opt,name=maxUnavailable"`
// MaxSurge is the maximum number of pods that can be scheduled above the
// original number of pods. Value can be an absolute number (ex: 5) or a
// percentage of total pods at the start of the update (ex: 10%). Absolute
// number is calculated from percentage by rounding up.
//
// This cannot be 0 if MaxUnavailable is 0. By default, 25% is used.
//
// Example: when this is set to 30%, the new RC can be scaled up by 30%
// immediately when the rolling update starts. Once old pods have been
// killed, new RC can be scaled up further, ensuring that total number of
// pods running at any time during the update is atmost 130% of original
// pods.
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,5,opt,name=maxSurge"`
// Pre is a lifecycle hook which is executed before the deployment process
// begins. All LifecycleHookFailurePolicy values are supported.
Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,7,opt,name=pre"`
// Post is a lifecycle hook which is executed after the strategy has
// finished all deployment logic. All LifecycleHookFailurePolicy values
// are supported.
Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,8,opt,name=post"`
}
// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.
type LifecycleHook struct {
// FailurePolicy specifies what action to take if the hook fails.
FailurePolicy LifecycleHookFailurePolicy `json:"failurePolicy" protobuf:"bytes,1,opt,name=failurePolicy,casttype=LifecycleHookFailurePolicy"`
// ExecNewPod specifies the options for a lifecycle hook backed by a pod.
ExecNewPod *ExecNewPodHook `json:"execNewPod,omitempty" protobuf:"bytes,2,opt,name=execNewPod"`
// TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.
TagImages []TagImageHook `json:"tagImages,omitempty" protobuf:"bytes,3,rep,name=tagImages"`
}
// LifecycleHookFailurePolicy describes possibles actions to take if a hook fails.
type LifecycleHookFailurePolicy string
const (
// LifecycleHookFailurePolicyRetry means retry the hook until it succeeds.
LifecycleHookFailurePolicyRetry LifecycleHookFailurePolicy = "Retry"
// LifecycleHookFailurePolicyAbort means abort the deployment.
LifecycleHookFailurePolicyAbort LifecycleHookFailurePolicy = "Abort"
// LifecycleHookFailurePolicyIgnore means ignore failure and continue the deployment.
LifecycleHookFailurePolicyIgnore LifecycleHookFailurePolicy = "Ignore"
)
// ExecNewPodHook is a hook implementation which runs a command in a new pod
// based on the specified container which is assumed to be part of the
// deployment template.
type ExecNewPodHook struct {
// Command is the action command and its arguments.
Command []string `json:"command" protobuf:"bytes,1,rep,name=command"`
// Env is a set of environment variables to supply to the hook pod's container.
Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"`
// ContainerName is the name of a container in the deployment pod template
// whose container image will be used for the hook pod's container.
ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"`
// Volumes is a list of named volumes from the pod template which should be
// copied to the hook pod. Volumes names not found in pod spec are ignored.
// An empty list means no volumes will be copied.
Volumes []string `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"`
}
// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.
type TagImageHook struct {
// ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single
// container this value will be defaulted to the name of that container.
ContainerName string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"`
// To is the target ImageStreamTag to set the container's image onto.
To corev1.ObjectReference `json:"to" protobuf:"bytes,2,opt,name=to"`
}
// DeploymentTriggerPolicies is a list of policies where nil values and different from empty arrays.
// +protobuf.nullable=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
type DeploymentTriggerPolicies []DeploymentTriggerPolicy
func (t DeploymentTriggerPolicies) String() string {
return fmt.Sprintf("%v", []DeploymentTriggerPolicy(t))
}
// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.
type DeploymentTriggerPolicy struct {
// Type of the trigger
Type DeploymentTriggerType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"`
// ImageChangeParams represents the parameters for the ImageChange trigger.
ImageChangeParams *DeploymentTriggerImageChangeParams `json:"imageChangeParams,omitempty" protobuf:"bytes,2,opt,name=imageChangeParams"`
}
// DeploymentTriggerType refers to a specific DeploymentTriggerPolicy implementation.
type DeploymentTriggerType string
const (
// DeploymentTriggerOnImageChange will create new deployments in response to updated tags from
// a container image repository.
DeploymentTriggerOnImageChange DeploymentTriggerType = "ImageChange"
// DeploymentTriggerOnConfigChange will create new deployments in response to changes to
// the ControllerTemplate of a DeploymentConfig.
DeploymentTriggerOnConfigChange DeploymentTriggerType = "ConfigChange"
)
// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.
type DeploymentTriggerImageChangeParams struct {
// Automatic means that the detection of a new tag value should result in an image update
// inside the pod template.
Automatic bool `json:"automatic,omitempty" protobuf:"varint,1,opt,name=automatic"`
// ContainerNames is used to restrict tag updates to the specified set of container names in a pod.
// If multiple triggers point to the same containers, the resulting behavior is undefined. Future
// API versions will make this a validation error. If ContainerNames does not point to a valid container,
// the trigger will be ignored. Future API versions will make this a validation error.
ContainerNames []string `json:"containerNames,omitempty" protobuf:"bytes,2,rep,name=containerNames"`
// From is a reference to an image stream tag to watch for changes. From.Name is the only
// required subfield - if From.Namespace is blank, the namespace of the current deployment
// trigger will be used.
From corev1.ObjectReference `json:"from" protobuf:"bytes,3,opt,name=from"`
// LastTriggeredImage is the last image to be triggered.
LastTriggeredImage string `json:"lastTriggeredImage,omitempty" protobuf:"bytes,4,opt,name=lastTriggeredImage"`
}
// DeploymentConfigStatus represents the current deployment state.
type DeploymentConfigStatus struct {
// LatestVersion is used to determine whether the current deployment associated with a deployment
// config is out of sync.
LatestVersion int64 `json:"latestVersion" protobuf:"varint,1,opt,name=latestVersion"`
// ObservedGeneration is the most recent generation observed by the deployment config controller.
ObservedGeneration int64 `json:"observedGeneration" protobuf:"varint,2,opt,name=observedGeneration"`
// Replicas is the total number of pods targeted by this deployment config.
Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"`
// UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config
// that have the desired template spec.
UpdatedReplicas int32 `json:"updatedReplicas" protobuf:"varint,4,opt,name=updatedReplicas"`
// AvailableReplicas is the total number of available pods targeted by this deployment config.
AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,5,opt,name=availableReplicas"`
// UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.
UnavailableReplicas int32 `json:"unavailableReplicas" protobuf:"varint,6,opt,name=unavailableReplicas"`
// Details are the reasons for the update to this deployment config.
// This could be based on a change made by the user or caused by an automatic trigger
Details *DeploymentDetails `json:"details,omitempty" protobuf:"bytes,7,opt,name=details"`
// Conditions represents the latest available observations of a deployment config's current state.
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=conditions"`
// Total number of ready pods targeted by this deployment.
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,9,opt,name=readyReplicas"`
}
// DeploymentDetails captures information about the causes of a deployment.
type DeploymentDetails struct {
// Message is the user specified change message, if this deployment was triggered manually by the user
Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"`
// Causes are extended data associated with all the causes for creating a new deployment
Causes []DeploymentCause `json:"causes" protobuf:"bytes,2,rep,name=causes"`
}
// DeploymentCause captures information about a particular cause of a deployment.
type DeploymentCause struct {
// Type of the trigger that resulted in the creation of a new deployment
Type DeploymentTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"`
// ImageTrigger contains the image trigger details, if this trigger was fired based on an image change
ImageTrigger *DeploymentCauseImageTrigger `json:"imageTrigger,omitempty" protobuf:"bytes,2,opt,name=imageTrigger"`
}
// DeploymentCauseImageTrigger represents details about the cause of a deployment originating
// from an image change trigger
type DeploymentCauseImageTrigger struct {
// From is a reference to the changed object which triggered a deployment. The field may have
// the kinds DockerImage, ImageStreamTag, or ImageStreamImage.
From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
}
type DeploymentConditionType string
// These are valid conditions of a DeploymentConfig.
const (
// DeploymentAvailable means the DeploymentConfig is available, ie. at least the minimum available
// replicas required (dc.spec.replicas in case the DeploymentConfig is of Recreate type,
// dc.spec.replicas - dc.spec.strategy.rollingParams.maxUnavailable in case it's Rolling) are up and
// running for at least dc.spec.minReadySeconds.
DeploymentAvailable DeploymentConditionType = "Available"
// DeploymentProgressing is:
// * True: the DeploymentConfig has been successfully deployed or is amidst getting deployed.
// The two different states can be determined by looking at the Reason of the Condition.
// For example, a complete DC will have {Status: True, Reason: NewReplicationControllerAvailable}
// and a DC in the middle of a rollout {Status: True, Reason: ReplicationControllerUpdated}.
// TODO: Represent a successfully deployed DC by using something else for Status like Unknown?
// * False: the DeploymentConfig has failed to deploy its latest version.
//
// This condition is purely informational and depends on the dc.spec.strategy.*params.timeoutSeconds
// field, which is responsible for the time in seconds to wait for a rollout before deciding that
// no progress can be made, thus the rollout is aborted.
//
// Progress for a DeploymentConfig is considered when new pods scale up or old pods scale down.
DeploymentProgressing DeploymentConditionType = "Progressing"
// DeploymentReplicaFailure is added in a deployment config when one of its pods
// fails to be created or deleted.
DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure"
)
// DeploymentCondition describes the state of a deployment config at a certain point.
type DeploymentCondition struct {
// Type of deployment condition.
Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"`
// Status of the condition, one of True, False, Unknown.
Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"`
// The last time this condition was updated.
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"`
// The last time the condition transitioned from one status to another.
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
// The reason for the condition's last transition.
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// A human readable message indicating details about the transition.
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=3.0
// +k8s:prerelease-lifecycle-gen:deprecated=4.14
// +k8s:prerelease-lifecycle-gen:removed=4.10000
// DeploymentConfigList is a collection of deployment configs.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type DeploymentConfigList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of deployment configs
Items []DeploymentConfig `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=3.0
// +k8s:prerelease-lifecycle-gen:deprecated=4.14
// +k8s:prerelease-lifecycle-gen:removed=4.10000
// DeploymentConfigRollback provides the input to rollback generation.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type DeploymentConfigRollback struct {
metav1.TypeMeta `json:",inline"`
// Name of the deployment config that will be rolled back.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// UpdatedAnnotations is a set of new annotations that will be added in the deployment config.
UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"`
// Spec defines the options to rollback generation.
Spec DeploymentConfigRollbackSpec `json:"spec" protobuf:"bytes,3,opt,name=spec"`
}
// DeploymentConfigRollbackSpec represents the options for rollback generation.
type DeploymentConfigRollbackSpec struct {
// From points to a ReplicationController which is a deployment.
From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
// Revision to rollback to. If set to 0, rollback to the last revision.
Revision int64 `json:"revision,omitempty" protobuf:"varint,2,opt,name=revision"`
// IncludeTriggers specifies whether to include config Triggers.
IncludeTriggers bool `json:"includeTriggers" protobuf:"varint,3,opt,name=includeTriggers"`
// IncludeTemplate specifies whether to include the PodTemplateSpec.
IncludeTemplate bool `json:"includeTemplate" protobuf:"varint,4,opt,name=includeTemplate"`
// IncludeReplicationMeta specifies whether to include the replica count and selector.
IncludeReplicationMeta bool `json:"includeReplicationMeta" protobuf:"varint,5,opt,name=includeReplicationMeta"`
// IncludeStrategy specifies whether to include the deployment Strategy.
IncludeStrategy bool `json:"includeStrategy" protobuf:"varint,6,opt,name=includeStrategy"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=3.0
// +k8s:prerelease-lifecycle-gen:deprecated=4.14
// +k8s:prerelease-lifecycle-gen:removed=4.10000
// DeploymentRequest is a request to a deployment config for a new deployment.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type DeploymentRequest struct {
metav1.TypeMeta `json:",inline"`
// Name of the deployment config for requesting a new deployment.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Latest will update the deployment config with the latest state from all triggers.
Latest bool `json:"latest" protobuf:"varint,2,opt,name=latest"`
// Force will try to force a new deployment to run. If the deployment config is paused,
// then setting this to true will return an Invalid error.
Force bool `json:"force" protobuf:"varint,3,opt,name=force"`
// ExcludeTriggers instructs the instantiator to avoid processing the specified triggers.
// This field overrides the triggers from latest and allows clients to control specific
// logic. This field is ignored if not specified.
ExcludeTriggers []DeploymentTriggerType `json:"excludeTriggers,omitempty" protobuf:"bytes,4,rep,name=excludeTriggers,casttype=DeploymentTriggerType"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=3.0
// +k8s:prerelease-lifecycle-gen:deprecated=4.14
// +k8s:prerelease-lifecycle-gen:removed=4.10000
// DeploymentLog represents the logs for a deployment
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type DeploymentLog struct {
metav1.TypeMeta `json:",inline"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=3.0
// +k8s:prerelease-lifecycle-gen:deprecated=4.14
// +k8s:prerelease-lifecycle-gen:removed=4.10000
// DeploymentLogOptions is the REST options for a deployment log
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type DeploymentLogOptions struct {
metav1.TypeMeta `json:",inline"`
// The container for which to stream logs. Defaults to only container if there is one container in the pod.
Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
// Follow if true indicates that the build log should be streamed until
// the build terminates.
Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
// Return previous deployment logs. Defaults to false.
Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
// A relative time in seconds before the current time from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
// An RFC3339 timestamp from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// of log output. Defaults to false.
Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
// If set, the number of lines from the end of the logs to show. If not specified,
// logs are shown from the creation of the container or sinceSeconds or sinceTime
TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
// If set, the number of bytes to read from the server before terminating the
// log output. This may not display a complete final line of logging, and may return
// slightly more or slightly less than the specified limit.
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
// NoWait if true causes the call to return immediately even if the deployment
// is not available yet. Otherwise the server will wait until the deployment has started.
// TODO: Fix the tag to 'noWait' in v2
NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"`
// Version of the deployment for which to view logs.
Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"`
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/apps/v1/register.go | vendor/github.com/openshift/api/apps/v1/register.go | package v1
import (
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
GroupName = "apps.openshift.io"
GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme)
// Install is a function which adds this version to a scheme
Install = schemeBuilder.AddToScheme
// SchemeGroupVersion generated code relies on this name
// Deprecated
SchemeGroupVersion = GroupVersion
// AddToScheme exists solely to keep the old generators creating valid code
// DEPRECATED
AddToScheme = schemeBuilder.AddToScheme
)
// Resource generated code relies on this being here, but it logically belongs to the group
// DEPRECATED
func Resource(resource string) schema.GroupResource {
return schema.GroupResource{Group: GroupName, Resource: resource}
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(GroupVersion,
&DeploymentConfig{},
&DeploymentConfigList{},
&DeploymentConfigRollback{},
&DeploymentRequest{},
&DeploymentLog{},
&DeploymentLogOptions{},
&extensionsv1beta1.Scale{},
)
metav1.AddToGroupVersion(scheme, GroupVersion)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go | vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go | package v1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE
var map_CustomDeploymentStrategyParams = map[string]string{
"": "CustomDeploymentStrategyParams are the input to the Custom deployment strategy.",
"image": "Image specifies a container image which can carry out a deployment.",
"environment": "Environment holds the environment which will be given to the container for Image.",
"command": "Command is optional and overrides CMD in the container Image.",
}
func (CustomDeploymentStrategyParams) SwaggerDoc() map[string]string {
return map_CustomDeploymentStrategyParams
}
var map_DeploymentCause = map[string]string{
"": "DeploymentCause captures information about a particular cause of a deployment.",
"type": "Type of the trigger that resulted in the creation of a new deployment",
"imageTrigger": "ImageTrigger contains the image trigger details, if this trigger was fired based on an image change",
}
func (DeploymentCause) SwaggerDoc() map[string]string {
return map_DeploymentCause
}
var map_DeploymentCauseImageTrigger = map[string]string{
"": "DeploymentCauseImageTrigger represents details about the cause of a deployment originating from an image change trigger",
"from": "From is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.",
}
func (DeploymentCauseImageTrigger) SwaggerDoc() map[string]string {
return map_DeploymentCauseImageTrigger
}
var map_DeploymentCondition = map[string]string{
"": "DeploymentCondition describes the state of a deployment config at a certain point.",
"type": "Type of deployment condition.",
"status": "Status of the condition, one of True, False, Unknown.",
"lastUpdateTime": "The last time this condition was updated.",
"lastTransitionTime": "The last time the condition transitioned from one status to another.",
"reason": "The reason for the condition's last transition.",
"message": "A human readable message indicating details about the transition.",
}
func (DeploymentCondition) SwaggerDoc() map[string]string {
return map_DeploymentCondition
}
var map_DeploymentConfig = map[string]string{
"": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). Deprecated: Use deployments or other means for declarative updates for pods instead.",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"spec": "Spec represents a desired deployment state and how to deploy to it.",
"status": "Status represents the current deployment state.",
}
func (DeploymentConfig) SwaggerDoc() map[string]string {
return map_DeploymentConfig
}
var map_DeploymentConfigList = map[string]string{
"": "DeploymentConfigList is a collection of deployment configs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is a list of deployment configs",
}
func (DeploymentConfigList) SwaggerDoc() map[string]string {
return map_DeploymentConfigList
}
var map_DeploymentConfigRollback = map[string]string{
"": "DeploymentConfigRollback provides the input to rollback generation.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"name": "Name of the deployment config that will be rolled back.",
"updatedAnnotations": "UpdatedAnnotations is a set of new annotations that will be added in the deployment config.",
"spec": "Spec defines the options to rollback generation.",
}
func (DeploymentConfigRollback) SwaggerDoc() map[string]string {
return map_DeploymentConfigRollback
}
var map_DeploymentConfigRollbackSpec = map[string]string{
"": "DeploymentConfigRollbackSpec represents the options for rollback generation.",
"from": "From points to a ReplicationController which is a deployment.",
"revision": "Revision to rollback to. If set to 0, rollback to the last revision.",
"includeTriggers": "IncludeTriggers specifies whether to include config Triggers.",
"includeTemplate": "IncludeTemplate specifies whether to include the PodTemplateSpec.",
"includeReplicationMeta": "IncludeReplicationMeta specifies whether to include the replica count and selector.",
"includeStrategy": "IncludeStrategy specifies whether to include the deployment Strategy.",
}
func (DeploymentConfigRollbackSpec) SwaggerDoc() map[string]string {
return map_DeploymentConfigRollbackSpec
}
var map_DeploymentConfigSpec = map[string]string{
"": "DeploymentConfigSpec represents the desired state of the deployment.",
"strategy": "Strategy describes how a deployment is executed.",
"minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
"triggers": "Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.",
"replicas": "Replicas is the number of desired replicas.",
"revisionHistoryLimit": "RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified. Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)",
"test": "Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.",
"paused": "Paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.",
"selector": "Selector is a label query over pods that should match the Replicas count.",
"template": "Template is the object that describes the pod that will be created if insufficient replicas are detected.",
}
func (DeploymentConfigSpec) SwaggerDoc() map[string]string {
return map_DeploymentConfigSpec
}
var map_DeploymentConfigStatus = map[string]string{
"": "DeploymentConfigStatus represents the current deployment state.",
"latestVersion": "LatestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.",
"observedGeneration": "ObservedGeneration is the most recent generation observed by the deployment config controller.",
"replicas": "Replicas is the total number of pods targeted by this deployment config.",
"updatedReplicas": "UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.",
"availableReplicas": "AvailableReplicas is the total number of available pods targeted by this deployment config.",
"unavailableReplicas": "UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.",
"details": "Details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger",
"conditions": "Conditions represents the latest available observations of a deployment config's current state.",
"readyReplicas": "Total number of ready pods targeted by this deployment.",
}
func (DeploymentConfigStatus) SwaggerDoc() map[string]string {
return map_DeploymentConfigStatus
}
var map_DeploymentDetails = map[string]string{
"": "DeploymentDetails captures information about the causes of a deployment.",
"message": "Message is the user specified change message, if this deployment was triggered manually by the user",
"causes": "Causes are extended data associated with all the causes for creating a new deployment",
}
func (DeploymentDetails) SwaggerDoc() map[string]string {
return map_DeploymentDetails
}
var map_DeploymentLog = map[string]string{
"": "DeploymentLog represents the logs for a deployment\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
}
func (DeploymentLog) SwaggerDoc() map[string]string {
return map_DeploymentLog
}
var map_DeploymentLogOptions = map[string]string{
"": "DeploymentLogOptions is the REST options for a deployment log\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.",
"follow": "Follow if true indicates that the build log should be streamed until the build terminates.",
"previous": "Return previous deployment logs. Defaults to false.",
"sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
"sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
"timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
"tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
"limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
"nowait": "NoWait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.",
"version": "Version of the deployment for which to view logs.",
}
func (DeploymentLogOptions) SwaggerDoc() map[string]string {
return map_DeploymentLogOptions
}
var map_DeploymentRequest = map[string]string{
"": "DeploymentRequest is a request to a deployment config for a new deployment.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"name": "Name of the deployment config for requesting a new deployment.",
"latest": "Latest will update the deployment config with the latest state from all triggers.",
"force": "Force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.",
"excludeTriggers": "ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. This field overrides the triggers from latest and allows clients to control specific logic. This field is ignored if not specified.",
}
func (DeploymentRequest) SwaggerDoc() map[string]string {
return map_DeploymentRequest
}
var map_DeploymentStrategy = map[string]string{
"": "DeploymentStrategy describes how to perform a deployment.",
"type": "Type is the name of a deployment strategy.",
"customParams": "CustomParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.",
"recreateParams": "RecreateParams are the input to the Recreate deployment strategy.",
"rollingParams": "RollingParams are the input to the Rolling deployment strategy.",
"resources": "Resources contains resource requirements to execute the deployment and any hooks.",
"labels": "Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.",
"annotations": "Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.",
"activeDeadlineSeconds": "ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment config may be active on a node before the system actively tries to terminate them.",
}
func (DeploymentStrategy) SwaggerDoc() map[string]string {
return map_DeploymentStrategy
}
var map_DeploymentTriggerImageChangeParams = map[string]string{
"": "DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.",
"automatic": "Automatic means that the detection of a new tag value should result in an image update inside the pod template.",
"containerNames": "ContainerNames is used to restrict tag updates to the specified set of container names in a pod. If multiple triggers point to the same containers, the resulting behavior is undefined. Future API versions will make this a validation error. If ContainerNames does not point to a valid container, the trigger will be ignored. Future API versions will make this a validation error.",
"from": "From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.",
"lastTriggeredImage": "LastTriggeredImage is the last image to be triggered.",
}
func (DeploymentTriggerImageChangeParams) SwaggerDoc() map[string]string {
return map_DeploymentTriggerImageChangeParams
}
var map_DeploymentTriggerPolicy = map[string]string{
"": "DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.",
"type": "Type of the trigger",
"imageChangeParams": "ImageChangeParams represents the parameters for the ImageChange trigger.",
}
func (DeploymentTriggerPolicy) SwaggerDoc() map[string]string {
return map_DeploymentTriggerPolicy
}
var map_ExecNewPodHook = map[string]string{
"": "ExecNewPodHook is a hook implementation which runs a command in a new pod based on the specified container which is assumed to be part of the deployment template.",
"command": "Command is the action command and its arguments.",
"env": "Env is a set of environment variables to supply to the hook pod's container.",
"containerName": "ContainerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.",
"volumes": "Volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.",
}
func (ExecNewPodHook) SwaggerDoc() map[string]string {
return map_ExecNewPodHook
}
var map_LifecycleHook = map[string]string{
"": "LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.",
"failurePolicy": "FailurePolicy specifies what action to take if the hook fails.",
"execNewPod": "ExecNewPod specifies the options for a lifecycle hook backed by a pod.",
"tagImages": "TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.",
}
func (LifecycleHook) SwaggerDoc() map[string]string {
return map_LifecycleHook
}
var map_RecreateDeploymentStrategyParams = map[string]string{
"": "RecreateDeploymentStrategyParams are the input to the Recreate deployment strategy.",
"timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.",
"pre": "Pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.",
"mid": "Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.",
"post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.",
}
func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string {
return map_RecreateDeploymentStrategyParams
}
var map_RollingDeploymentStrategyParams = map[string]string{
"": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.",
"updatePeriodSeconds": "UpdatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.",
"intervalSeconds": "IntervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.",
"timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.",
"maxUnavailable": "MaxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.",
"maxSurge": "MaxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.",
"pre": "Pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.",
"post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.",
}
func (RollingDeploymentStrategyParams) SwaggerDoc() map[string]string {
return map_RollingDeploymentStrategyParams
}
var map_TagImageHook = map[string]string{
"": "TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.",
"containerName": "ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.",
"to": "To is the target ImageStreamTag to set the container's image onto.",
}
func (TagImageHook) SwaggerDoc() map[string]string {
return map_TagImageHook
}
// AUTO-GENERATED FUNCTIONS END HERE
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go | vendor/github.com/openshift/api/apps/v1/deprecated_consts.go | package v1
// This file contains consts that are not shared between components and set just internally.
// They will likely be removed in (near) future.
const (
// DeployerPodCreatedAtAnnotation is an annotation on a deployment that
// records the time in RFC3339 format of when the deployer pod for this particular
// deployment was created.
// This is set by deployer controller, but not consumed by any command or internally.
// DEPRECATED: will be removed soon
DeployerPodCreatedAtAnnotation = "openshift.io/deployer-pod.created-at"
// DeployerPodStartedAtAnnotation is an annotation on a deployment that
// records the time in RFC3339 format of when the deployer pod for this particular
// deployment was started.
// This is set by deployer controller, but not consumed by any command or internally.
// DEPRECATED: will be removed soon
DeployerPodStartedAtAnnotation = "openshift.io/deployer-pod.started-at"
// DeployerPodCompletedAtAnnotation is an annotation on deployment that records
// the time in RFC3339 format of when the deployer pod finished.
// This is set by deployer controller, but not consumed by any command or internally.
// DEPRECATED: will be removed soon
DeployerPodCompletedAtAnnotation = "openshift.io/deployer-pod.completed-at"
// DesiredReplicasAnnotation represents the desired number of replicas for a
// new deployment.
// This is set by deployer controller, but not consumed by any command or internally.
// DEPRECATED: will be removed soon
DesiredReplicasAnnotation = "kubectl.kubernetes.io/desired-replicas"
// DeploymentAnnotation is an annotation on a deployer Pod. The annotation value is the name
// of the deployment (a ReplicationController) on which the deployer Pod acts.
// This is set by deployer controller and consumed internally and in oc adm top command.
// DEPRECATED: will be removed soon
DeploymentAnnotation = "openshift.io/deployment.name"
)
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/apps/v1/doc.go | vendor/github.com/openshift/api/apps/v1/doc.go | // +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/origin/pkg/apps/apis/apps
// +k8s:defaulter-gen=TypeMeta
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
// +groupName=apps.openshift.io
// Package v1 is the v1 version of the API.
package v1
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/apps/v1/generated.pb.go | vendor/github.com/openshift/api/apps/v1/generated.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/openshift/api/apps/v1/generated.proto
package v1
import (
fmt "fmt"
io "io"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
v11 "k8s.io/apimachinery/pkg/apis/meta/v1"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *CustomDeploymentStrategyParams) Reset() { *m = CustomDeploymentStrategyParams{} }
func (*CustomDeploymentStrategyParams) ProtoMessage() {}
func (*CustomDeploymentStrategyParams) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{0}
}
func (m *CustomDeploymentStrategyParams) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CustomDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *CustomDeploymentStrategyParams) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomDeploymentStrategyParams.Merge(m, src)
}
func (m *CustomDeploymentStrategyParams) XXX_Size() int {
return m.Size()
}
func (m *CustomDeploymentStrategyParams) XXX_DiscardUnknown() {
xxx_messageInfo_CustomDeploymentStrategyParams.DiscardUnknown(m)
}
var xxx_messageInfo_CustomDeploymentStrategyParams proto.InternalMessageInfo
func (m *DeploymentCause) Reset() { *m = DeploymentCause{} }
func (*DeploymentCause) ProtoMessage() {}
func (*DeploymentCause) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{1}
}
func (m *DeploymentCause) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentCause) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentCause.Merge(m, src)
}
func (m *DeploymentCause) XXX_Size() int {
return m.Size()
}
func (m *DeploymentCause) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentCause.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentCause proto.InternalMessageInfo
func (m *DeploymentCauseImageTrigger) Reset() { *m = DeploymentCauseImageTrigger{} }
func (*DeploymentCauseImageTrigger) ProtoMessage() {}
func (*DeploymentCauseImageTrigger) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{2}
}
func (m *DeploymentCauseImageTrigger) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentCauseImageTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentCauseImageTrigger) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentCauseImageTrigger.Merge(m, src)
}
func (m *DeploymentCauseImageTrigger) XXX_Size() int {
return m.Size()
}
func (m *DeploymentCauseImageTrigger) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentCauseImageTrigger.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentCauseImageTrigger proto.InternalMessageInfo
func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
func (*DeploymentCondition) ProtoMessage() {}
func (*DeploymentCondition) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{3}
}
func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentCondition) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentCondition.Merge(m, src)
}
func (m *DeploymentCondition) XXX_Size() int {
return m.Size()
}
func (m *DeploymentCondition) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentCondition.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo
func (m *DeploymentConfig) Reset() { *m = DeploymentConfig{} }
func (*DeploymentConfig) ProtoMessage() {}
func (*DeploymentConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{4}
}
func (m *DeploymentConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentConfig.Merge(m, src)
}
func (m *DeploymentConfig) XXX_Size() int {
return m.Size()
}
func (m *DeploymentConfig) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentConfig.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentConfig proto.InternalMessageInfo
func (m *DeploymentConfigList) Reset() { *m = DeploymentConfigList{} }
func (*DeploymentConfigList) ProtoMessage() {}
func (*DeploymentConfigList) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{5}
}
func (m *DeploymentConfigList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentConfigList) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentConfigList.Merge(m, src)
}
func (m *DeploymentConfigList) XXX_Size() int {
return m.Size()
}
func (m *DeploymentConfigList) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentConfigList.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentConfigList proto.InternalMessageInfo
func (m *DeploymentConfigRollback) Reset() { *m = DeploymentConfigRollback{} }
func (*DeploymentConfigRollback) ProtoMessage() {}
func (*DeploymentConfigRollback) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{6}
}
func (m *DeploymentConfigRollback) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentConfigRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentConfigRollback) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentConfigRollback.Merge(m, src)
}
func (m *DeploymentConfigRollback) XXX_Size() int {
return m.Size()
}
func (m *DeploymentConfigRollback) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentConfigRollback.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentConfigRollback proto.InternalMessageInfo
func (m *DeploymentConfigRollbackSpec) Reset() { *m = DeploymentConfigRollbackSpec{} }
func (*DeploymentConfigRollbackSpec) ProtoMessage() {}
func (*DeploymentConfigRollbackSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{7}
}
func (m *DeploymentConfigRollbackSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentConfigRollbackSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentConfigRollbackSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentConfigRollbackSpec.Merge(m, src)
}
func (m *DeploymentConfigRollbackSpec) XXX_Size() int {
return m.Size()
}
func (m *DeploymentConfigRollbackSpec) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentConfigRollbackSpec.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentConfigRollbackSpec proto.InternalMessageInfo
func (m *DeploymentConfigSpec) Reset() { *m = DeploymentConfigSpec{} }
func (*DeploymentConfigSpec) ProtoMessage() {}
func (*DeploymentConfigSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{8}
}
func (m *DeploymentConfigSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentConfigSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentConfigSpec.Merge(m, src)
}
func (m *DeploymentConfigSpec) XXX_Size() int {
return m.Size()
}
func (m *DeploymentConfigSpec) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentConfigSpec.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentConfigSpec proto.InternalMessageInfo
func (m *DeploymentConfigStatus) Reset() { *m = DeploymentConfigStatus{} }
func (*DeploymentConfigStatus) ProtoMessage() {}
func (*DeploymentConfigStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{9}
}
func (m *DeploymentConfigStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentConfigStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentConfigStatus.Merge(m, src)
}
func (m *DeploymentConfigStatus) XXX_Size() int {
return m.Size()
}
func (m *DeploymentConfigStatus) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentConfigStatus.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentConfigStatus proto.InternalMessageInfo
func (m *DeploymentDetails) Reset() { *m = DeploymentDetails{} }
func (*DeploymentDetails) ProtoMessage() {}
func (*DeploymentDetails) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{10}
}
func (m *DeploymentDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentDetails) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentDetails.Merge(m, src)
}
func (m *DeploymentDetails) XXX_Size() int {
return m.Size()
}
func (m *DeploymentDetails) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentDetails.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentDetails proto.InternalMessageInfo
func (m *DeploymentLog) Reset() { *m = DeploymentLog{} }
func (*DeploymentLog) ProtoMessage() {}
func (*DeploymentLog) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{11}
}
func (m *DeploymentLog) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentLog) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentLog.Merge(m, src)
}
func (m *DeploymentLog) XXX_Size() int {
return m.Size()
}
func (m *DeploymentLog) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentLog.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentLog proto.InternalMessageInfo
func (m *DeploymentLogOptions) Reset() { *m = DeploymentLogOptions{} }
func (*DeploymentLogOptions) ProtoMessage() {}
func (*DeploymentLogOptions) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{12}
}
func (m *DeploymentLogOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentLogOptions) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentLogOptions.Merge(m, src)
}
func (m *DeploymentLogOptions) XXX_Size() int {
return m.Size()
}
func (m *DeploymentLogOptions) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentLogOptions.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentLogOptions proto.InternalMessageInfo
func (m *DeploymentRequest) Reset() { *m = DeploymentRequest{} }
func (*DeploymentRequest) ProtoMessage() {}
func (*DeploymentRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{13}
}
func (m *DeploymentRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentRequest.Merge(m, src)
}
func (m *DeploymentRequest) XXX_Size() int {
return m.Size()
}
func (m *DeploymentRequest) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentRequest.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentRequest proto.InternalMessageInfo
func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
func (*DeploymentStrategy) ProtoMessage() {}
func (*DeploymentStrategy) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{14}
}
func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentStrategy) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentStrategy.Merge(m, src)
}
func (m *DeploymentStrategy) XXX_Size() int {
return m.Size()
}
func (m *DeploymentStrategy) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo
func (m *DeploymentTriggerImageChangeParams) Reset() { *m = DeploymentTriggerImageChangeParams{} }
func (*DeploymentTriggerImageChangeParams) ProtoMessage() {}
func (*DeploymentTriggerImageChangeParams) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{15}
}
func (m *DeploymentTriggerImageChangeParams) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentTriggerImageChangeParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentTriggerImageChangeParams) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentTriggerImageChangeParams.Merge(m, src)
}
func (m *DeploymentTriggerImageChangeParams) XXX_Size() int {
return m.Size()
}
func (m *DeploymentTriggerImageChangeParams) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentTriggerImageChangeParams.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentTriggerImageChangeParams proto.InternalMessageInfo
func (m *DeploymentTriggerPolicies) Reset() { *m = DeploymentTriggerPolicies{} }
func (*DeploymentTriggerPolicies) ProtoMessage() {}
func (*DeploymentTriggerPolicies) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{16}
}
func (m *DeploymentTriggerPolicies) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentTriggerPolicies) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentTriggerPolicies) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentTriggerPolicies.Merge(m, src)
}
func (m *DeploymentTriggerPolicies) XXX_Size() int {
return m.Size()
}
func (m *DeploymentTriggerPolicies) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentTriggerPolicies.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentTriggerPolicies proto.InternalMessageInfo
func (m *DeploymentTriggerPolicy) Reset() { *m = DeploymentTriggerPolicy{} }
func (*DeploymentTriggerPolicy) ProtoMessage() {}
func (*DeploymentTriggerPolicy) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{17}
}
func (m *DeploymentTriggerPolicy) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeploymentTriggerPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DeploymentTriggerPolicy) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeploymentTriggerPolicy.Merge(m, src)
}
func (m *DeploymentTriggerPolicy) XXX_Size() int {
return m.Size()
}
func (m *DeploymentTriggerPolicy) XXX_DiscardUnknown() {
xxx_messageInfo_DeploymentTriggerPolicy.DiscardUnknown(m)
}
var xxx_messageInfo_DeploymentTriggerPolicy proto.InternalMessageInfo
func (m *ExecNewPodHook) Reset() { *m = ExecNewPodHook{} }
func (*ExecNewPodHook) ProtoMessage() {}
func (*ExecNewPodHook) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{18}
}
func (m *ExecNewPodHook) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExecNewPodHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ExecNewPodHook) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExecNewPodHook.Merge(m, src)
}
func (m *ExecNewPodHook) XXX_Size() int {
return m.Size()
}
func (m *ExecNewPodHook) XXX_DiscardUnknown() {
xxx_messageInfo_ExecNewPodHook.DiscardUnknown(m)
}
var xxx_messageInfo_ExecNewPodHook proto.InternalMessageInfo
func (m *LifecycleHook) Reset() { *m = LifecycleHook{} }
func (*LifecycleHook) ProtoMessage() {}
func (*LifecycleHook) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{19}
}
func (m *LifecycleHook) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LifecycleHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *LifecycleHook) XXX_Merge(src proto.Message) {
xxx_messageInfo_LifecycleHook.Merge(m, src)
}
func (m *LifecycleHook) XXX_Size() int {
return m.Size()
}
func (m *LifecycleHook) XXX_DiscardUnknown() {
xxx_messageInfo_LifecycleHook.DiscardUnknown(m)
}
var xxx_messageInfo_LifecycleHook proto.InternalMessageInfo
func (m *RecreateDeploymentStrategyParams) Reset() { *m = RecreateDeploymentStrategyParams{} }
func (*RecreateDeploymentStrategyParams) ProtoMessage() {}
func (*RecreateDeploymentStrategyParams) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{20}
}
func (m *RecreateDeploymentStrategyParams) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RecreateDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RecreateDeploymentStrategyParams) XXX_Merge(src proto.Message) {
xxx_messageInfo_RecreateDeploymentStrategyParams.Merge(m, src)
}
func (m *RecreateDeploymentStrategyParams) XXX_Size() int {
return m.Size()
}
func (m *RecreateDeploymentStrategyParams) XXX_DiscardUnknown() {
xxx_messageInfo_RecreateDeploymentStrategyParams.DiscardUnknown(m)
}
var xxx_messageInfo_RecreateDeploymentStrategyParams proto.InternalMessageInfo
func (m *RollingDeploymentStrategyParams) Reset() { *m = RollingDeploymentStrategyParams{} }
func (*RollingDeploymentStrategyParams) ProtoMessage() {}
func (*RollingDeploymentStrategyParams) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{21}
}
func (m *RollingDeploymentStrategyParams) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RollingDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RollingDeploymentStrategyParams) XXX_Merge(src proto.Message) {
xxx_messageInfo_RollingDeploymentStrategyParams.Merge(m, src)
}
func (m *RollingDeploymentStrategyParams) XXX_Size() int {
return m.Size()
}
func (m *RollingDeploymentStrategyParams) XXX_DiscardUnknown() {
xxx_messageInfo_RollingDeploymentStrategyParams.DiscardUnknown(m)
}
var xxx_messageInfo_RollingDeploymentStrategyParams proto.InternalMessageInfo
func (m *TagImageHook) Reset() { *m = TagImageHook{} }
func (*TagImageHook) ProtoMessage() {}
func (*TagImageHook) Descriptor() ([]byte, []int) {
return fileDescriptor_8f1b1bee37da74c1, []int{22}
}
func (m *TagImageHook) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TagImageHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *TagImageHook) XXX_Merge(src proto.Message) {
xxx_messageInfo_TagImageHook.Merge(m, src)
}
func (m *TagImageHook) XXX_Size() int {
return m.Size()
}
func (m *TagImageHook) XXX_DiscardUnknown() {
xxx_messageInfo_TagImageHook.DiscardUnknown(m)
}
var xxx_messageInfo_TagImageHook proto.InternalMessageInfo
func init() {
proto.RegisterType((*CustomDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.CustomDeploymentStrategyParams")
proto.RegisterType((*DeploymentCause)(nil), "github.com.openshift.api.apps.v1.DeploymentCause")
proto.RegisterType((*DeploymentCauseImageTrigger)(nil), "github.com.openshift.api.apps.v1.DeploymentCauseImageTrigger")
proto.RegisterType((*DeploymentCondition)(nil), "github.com.openshift.api.apps.v1.DeploymentCondition")
proto.RegisterType((*DeploymentConfig)(nil), "github.com.openshift.api.apps.v1.DeploymentConfig")
proto.RegisterType((*DeploymentConfigList)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigList")
proto.RegisterType((*DeploymentConfigRollback)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback")
proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback.UpdatedAnnotationsEntry")
proto.RegisterType((*DeploymentConfigRollbackSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollbackSpec")
proto.RegisterType((*DeploymentConfigSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec")
proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec.SelectorEntry")
proto.RegisterType((*DeploymentConfigStatus)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigStatus")
proto.RegisterType((*DeploymentDetails)(nil), "github.com.openshift.api.apps.v1.DeploymentDetails")
proto.RegisterType((*DeploymentLog)(nil), "github.com.openshift.api.apps.v1.DeploymentLog")
proto.RegisterType((*DeploymentLogOptions)(nil), "github.com.openshift.api.apps.v1.DeploymentLogOptions")
proto.RegisterType((*DeploymentRequest)(nil), "github.com.openshift.api.apps.v1.DeploymentRequest")
proto.RegisterType((*DeploymentStrategy)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy")
proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy.AnnotationsEntry")
proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy.LabelsEntry")
proto.RegisterType((*DeploymentTriggerImageChangeParams)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerImageChangeParams")
proto.RegisterType((*DeploymentTriggerPolicies)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicies")
proto.RegisterType((*DeploymentTriggerPolicy)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicy")
proto.RegisterType((*ExecNewPodHook)(nil), "github.com.openshift.api.apps.v1.ExecNewPodHook")
proto.RegisterType((*LifecycleHook)(nil), "github.com.openshift.api.apps.v1.LifecycleHook")
proto.RegisterType((*RecreateDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.RecreateDeploymentStrategyParams")
proto.RegisterType((*RollingDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.RollingDeploymentStrategyParams")
proto.RegisterType((*TagImageHook)(nil), "github.com.openshift.api.apps.v1.TagImageHook")
}
func init() {
proto.RegisterFile("github.com/openshift/api/apps/v1/generated.proto", fileDescriptor_8f1b1bee37da74c1)
}
var fileDescriptor_8f1b1bee37da74c1 = []byte{
// 2523 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0xcd, 0x6f, 0x1c, 0x49,
0x15, 0x77, 0x7b, 0x66, 0xec, 0x99, 0xe7, 0xaf, 0xb8, 0x9c, 0x8f, 0x59, 0x2f, 0xf2, 0x58, 0xb3,
0xda, 0xc5, 0xc0, 0x32, 0xb3, 0xf1, 0x86, 0xd5, 0x26, 0xd1, 0x2e, 0x78, 0x1c, 0x67, 0xd7, 0xd1,
0x38, 0x31, 0x65, 0x27, 0x21, 0x11, 0x82, 0x94, 0x7b, 0xca, 0xe3, 0x5a, 0x77, 0x77, 0x0d, 0xdd,
0x35, 0x93, 0x0c, 0x42, 0x68, 0x2f, 0x20, 0x21, 0xed, 0x81, 0x23, 0x5c, 0x10, 0x07, 0xae, 0x20,
0x0e, 0xdc, 0x11, 0x07, 0xa4, 0x1c, 0x40, 0x5a, 0x09, 0x09, 0x56, 0x08, 0x59, 0x1b, 0x73, 0xe3,
0x4f, 0xc8, 0x09, 0xd5, 0x47, 0x7f, 0xcd, 0x47, 0xec, 0x71, 0x72, 0x73, 0xbf, 0x8f, 0xdf, 0x7b,
0xf5, 0xea, 0xbd, 0x57, 0xaf, 0x6a, 0x0c, 0xef, 0x34, 0x99, 0x38, 0x68, 0xef, 0x55, 0x6c, 0xee,
0x56, 0x79, 0x8b, 0x7a, 0xc1, 0x01, 0xdb, 0x17, 0x55, 0xd2, 0x62, 0x55, 0xd2, 0x6a, 0x05, 0xd5,
0xce, 0xe5, 0x6a, 0x93, 0x7a, 0xd4, 0x27, 0x82, 0x36, 0x2a, 0x2d, 0x9f, 0x0b, 0x8e, 0x96, 0x63,
0x8d, 0x4a, 0xa4, 0x51, 0x21, 0x2d, 0x56, 0x91, 0x1a, 0x95, 0xce, 0xe5, 0xc5, 0x6f, 0x26, 0x30,
0x9b, 0xbc, 0xc9, 0xab, 0x4a, 0x71, 0xaf, 0xbd, 0xaf, 0xbe, 0xd4, 0x87, 0xfa, 0x4b, 0x03, 0x2e,
0x96, 0x0f, 0xdf, 0x0f, 0x2a, 0x8c, 0x2b, 0xa3, 0x36, 0xf7, 0xe9, 0x00, 0xa3, 0x8b, 0x57, 0x62,
0x19, 0x97, 0xd8, 0x07, 0xcc, 0xa3, 0x7e, 0xb7, 0xda, 0x3a, 0x6c, 0x4a, 0x42, 0x50, 0x75, 0xa9,
0x20, 0x83, 0xb4, 0xde, 0x1b, 0xa6, 0xe5, 0xb7, 0x3d, 0xc1, 0x5c, 0x5a, 0x0d, 0xec, 0x03, 0xea,
0x92, 0x3e, 0xbd, 0x77, 0x87, 0xe9, 0xb5, 0x05, 0x73, 0xaa, 0xcc, 0x13, 0x81, 0xf0, 0x7b, 0x95,
0xca, 0x7f, 0xb6, 0x60, 0x69, 0xbd, 0x1d, 0x08, 0xee, 0xde, 0xa0, 0x2d, 0x87, 0x77, 0x5d, 0xea,
0x89, 0x1d, 0x21, 0x25, 0x9a, 0xdd, 0x6d, 0xe2, 0x13, 0x37, 0x40, 0x6f, 0x40, 0x8e, 0xb9, 0xa4,
0x49, 0x8b, 0xd6, 0xb2, 0xb5, 0x52, 0xa8, 0xcd, 0x3c, 0x3d, 0x2a, 0x8d, 0x1d, 0x1f, 0x95, 0x72,
0x9b, 0x92, 0x88, 0x35, 0x0f, 0x7d, 0x17, 0xa6, 0xa8, 0xd7, 0x61, 0x3e, 0xf7, 0x24, 0x42, 0x71,
0x7c, 0x39, 0xb3, 0x32, 0xb5, 0xba, 0x58, 0xd1, 0x2e, 0xa9, 0x38, 0xcb, 0x20, 0x55, 0x3a, 0x97,
0x2b, 0x1b, 0x5e, 0xe7, 0x1e, 0xf1, 0x6b, 0x0b, 0x06, 0x66, 0x6a, 0x23, 0x56, 0xc3, 0x49, 0x0c,
0xf4, 0x26, 0x4c, 0xda, 0xdc, 0x75, 0x89, 0xd7, 0x28, 0x66, 0x96, 0x33, 0x2b, 0x85, 0xda, 0xd4,
0xf1, 0x51, 0x69, 0x72, 0x5d, 0x93, 0x70, 0xc8, 0x2b, 0xff, 0xc5, 0x82, 0xb9, 0xd8, 0xf7, 0x75,
0xd2, 0x0e, 0x28, 0xba, 0x0a, 0x59, 0xd1, 0x6d, 0x85, 0x1e, 0xbf, 0x69, 0x4c, 0x65, 0x77, 0xbb,
0x2d, 0xfa, 0xfc, 0xa8, 0x74, 0x21, 0x16, 0xdf, 0xf5, 0x59, 0xb3, 0x49, 0x7d, 0xc9, 0xc0, 0x4a,
0x05, 0x05, 0x30, 0xad, 0x56, 0x64, 0x38, 0xc5, 0xf1, 0x65, 0x6b, 0x65, 0x6a, 0xf5, 0x83, 0xca,
0x49, 0xf9, 0x53, 0xe9, 0xf1, 0x61, 0x33, 0x01, 0x52, 0x3b, 0x77, 0x7c, 0x54, 0x9a, 0x4e, 0x52,
0x70, 0xca, 0x48, 0xb9, 0x01, 0xaf, 0xbf, 0x40, 0x1d, 0x6d, 0x40, 0x76, 0xdf, 0xe7, 0xae, 0x5a,
0xce, 0xd4, 0xea, 0x1b, 0x83, 0xa2, 0x7a, 0x67, 0xef, 0x13, 0x6a, 0x0b, 0x4c, 0xf7, 0xa9, 0x4f,
0x3d, 0x9b, 0xd6, 0xa6, 0xc3, 0x35, 0xdf, 0xf4, 0xb9, 0x8b, 0x95, 0x7a, 0xf9, 0x5f, 0x19, 0x58,
0x48, 0x98, 0xe1, 0x5e, 0x83, 0x09, 0xc6, 0x3d, 0x74, 0x3d, 0x15, 0xad, 0xaf, 0xf6, 0x44, 0xeb,
0xd2, 0x00, 0x95, 0x44, 0xbc, 0xea, 0x30, 0x11, 0x08, 0x22, 0xda, 0x81, 0x8a, 0x54, 0xa1, 0x76,
0xc5, 0xa8, 0x4f, 0xec, 0x28, 0xea, 0xf3, 0xa3, 0xd2, 0x80, 0x4a, 0xa9, 0x44, 0x48, 0x5a, 0x0a,
0x1b, 0x0c, 0xf4, 0x09, 0xcc, 0x3a, 0x24, 0x10, 0x77, 0x5b, 0x0d, 0x22, 0xe8, 0x2e, 0x73, 0x69,
0x71, 0x42, 0xad, 0xf9, 0xeb, 0x89, 0x35, 0x47, 0xc9, 0x5d, 0x69, 0x1d, 0x36, 0x25, 0x21, 0xa8,
0xc8, 0x52, 0x92, 0x51, 0x90, 0x1a, 0xb5, 0x8b, 0xc6, 0x83, 0xd9, 0x7a, 0x0a, 0x09, 0xf7, 0x20,
0xa3, 0x0e, 0x20, 0x49, 0xd9, 0xf5, 0x89, 0x17, 0xe8, 0x55, 0x49, 0x7b, 0x99, 0x91, 0xed, 0x2d,
0x1a, 0x7b, 0xa8, 0xde, 0x87, 0x86, 0x07, 0x58, 0x40, 0x6f, 0xc1, 0x84, 0x4f, 0x49, 0xc0, 0xbd,
0x62, 0x56, 0x45, 0x6c, 0x36, 0x8c, 0x18, 0x56, 0x54, 0x6c, 0xb8, 0xe8, 0x6b, 0x30, 0xe9, 0xd2,
0x20, 0x90, 0x95, 0x97, 0x53, 0x82, 0x73, 0x46, 0x70, 0x72, 0x4b, 0x93, 0x71, 0xc8, 0x2f, 0xff,
0x71, 0x1c, 0xce, 0xa5, 0xb6, 0x69, 0x9f, 0x35, 0xd1, 0x23, 0xc8, 0x4b, 0x3f, 0x1b, 0x44, 0x10,
0x93, 0x39, 0xef, 0x9c, 0x6e, 0x55, 0x3a, 0x97, 0xb6, 0xa8, 0x20, 0x35, 0x64, 0x4c, 0x42, 0x4c,
0xc3, 0x11, 0x2a, 0xfa, 0x1e, 0x64, 0x83, 0x16, 0xb5, 0x4d, 0x8d, 0xbc, 0x37, 0x52, 0x8d, 0x28,
0x1f, 0x77, 0x5a, 0xd4, 0x8e, 0x53, 0x55, 0x7e, 0x61, 0x85, 0x88, 0x1e, 0x45, 0x59, 0xa5, 0xf7,
0xe3, 0xfd, 0x33, 0x60, 0x2b, 0xfd, 0x38, 0xba, 0xe9, 0x4c, 0x2b, 0xff, 0xdd, 0x82, 0xf3, 0xbd,
0x2a, 0x75, 0x16, 0x08, 0xf4, 0xfd, 0xbe, 0xb0, 0x55, 0x4e, 0x17, 0x36, 0xa9, 0xad, 0x82, 0x76,
0xce, 0x98, 0xcc, 0x87, 0x94, 0x44, 0xc8, 0xee, 0x43, 0x8e, 0x09, 0xea, 0x06, 0xa6, 0x43, 0xae,
0x8e, 0xbe, 0xae, 0x44, 0x03, 0x96, 0x40, 0x58, 0xe3, 0x95, 0x7f, 0x9e, 0x81, 0x62, 0xaf, 0x28,
0xe6, 0x8e, 0xb3, 0x47, 0xec, 0x43, 0xb4, 0x0c, 0x59, 0x8f, 0xb8, 0x61, 0x85, 0x47, 0x01, 0xbf,
0x4d, 0x5c, 0x8a, 0x15, 0x07, 0xfd, 0xc6, 0x02, 0xd4, 0x56, 0xb5, 0xd1, 0x58, 0xf3, 0x3c, 0x2e,
0x88, 0x4c, 0xd7, 0xd0, 0x4b, 0x3c, 0xba, 0x97, 0xa1, 0xe9, 0xca, 0xdd, 0x3e, 0xd0, 0x0d, 0x4f,
0xf8, 0xdd, 0xb8, 0x6a, 0xfa, 0x05, 0xf0, 0x00, 0x4f, 0xd0, 0x23, 0x93, 0x6b, 0x3a, 0x1f, 0x3e,
0x3c, 0xbb, 0x47, 0xc3, 0x72, 0x6e, 0x71, 0x03, 0x2e, 0x0d, 0x71, 0x16, 0x9d, 0x83, 0xcc, 0x21,
0xed, 0xea, 0xf0, 0x61, 0xf9, 0x27, 0x3a, 0x0f, 0xb9, 0x0e, 0x71, 0xda, 0x54, 0x77, 0x3d, 0xac,
0x3f, 0xae, 0x8d, 0xbf, 0x6f, 0x95, 0xff, 0x94, 0x81, 0xaf, 0xbc, 0xc8, 0xf6, 0x2b, 0xea, 0xe6,
0xe8, 0x6d, 0xc8, 0xfb, 0xb4, 0xc3, 0x02, 0xc6, 0x3d, 0xe5, 0x44, 0x26, 0xce, 0x3b, 0x6c, 0xe8,
0x38, 0x92, 0x40, 0x6b, 0x30, 0xc7, 0x3c, 0xdb, 0x69, 0x37, 0xc2, 0x43, 0x45, 0x57, 0x56, 0xbe,
0x76, 0xc9, 0x28, 0xcd, 0x6d, 0xa6, 0xd9, 0xb8, 0x57, 0x3e, 0x09, 0x41, 0xdd, 0x96, 0x43, 0x04,
0x55, 0x0d, 0x6c, 0x00, 0x84, 0x61, 0xe3, 0x5e, 0x79, 0x74, 0x0f, 0x2e, 0x1a, 0x12, 0xa6, 0x2d,
0x87, 0xd9, 0x2a, 0xc6, 0xb2, 0x42, 0x54, 0x87, 0xcb, 0xd7, 0x96, 0x0c, 0xd2, 0xc5, 0xcd, 0x81,
0x52, 0x78, 0x88, 0x76, 0xc2, 0xb5, 0x70, 0x76, 0x51, 0xe7, 0x46, 0xbf, 0x6b, 0x21, 0x1b, 0xf7,
0xca, 0x97, 0xff, 0x97, 0xeb, 0xef, 0x07, 0x6a, 0xbb, 0xf6, 0x20, 0x1f, 0x84, 0xa0, 0x7a, 0xcb,
0xae, 0x8c, 0x92, 0x7c, 0xa1, 0x81, 0x78, 0x77, 0x22, 0x1f, 0x22, 0x5c, 0xe9, 0xbf, 0xcb, 0x3c,
0x4c, 0x49, 0xa3, 0xbb, 0x43, 0x6d, 0xee, 0x35, 0x82, 0x62, 0x61, 0xd9, 0x5a, 0xc9, 0xc5, 0xfe,
0x6f, 0xa5, 0xd9, 0xb8, 0x57, 0x1e, 0x51, 0xc8, 0x8b, 0x70, 0x67, 0x75, 0x3f, 0xbe, 0x3e, 0x8a,
0x9b, 0x66, 0x97, 0xb7, 0xb9, 0xc3, 0x6c, 0x46, 0x83, 0xda, 0xb4, 0xf4, 0x34, 0xca, 0x85, 0x08,
0x5a, 0x67, 0x9d, 0x0a, 0xbe, 0x4e, 0xa0, 0x5c, 0x32, 0xeb, 0x34, 0x1d, 0x47, 0x12, 0xa8, 0x0e,
0xe7, 0xc3, 0x0c, 0xfc, 0x98, 0x05, 0x82, 0xfb, 0xdd, 0x3a, 0x73, 0x99, 0x50, 0x79, 0x93, 0xab,
0x15, 0x8f, 0x8f, 0x4a, 0xe7, 0xf1, 0x00, 0x3e, 0x1e, 0xa8, 0x25, 0xbb, 0x98, 0xa0, 0x81, 0x30,
0xb9, 0x12, 0xd5, 0xc4, 0x2e, 0x0d, 0x04, 0x56, 0x1c, 0x79, 0xb4, 0xb6, 0xe4, 0xf4, 0xd4, 0x30,
0xdb, 0x1f, 0x35, 0xff, 0x6d, 0x45, 0xc5, 0x86, 0x8b, 0x7c, 0xc8, 0x07, 0xd4, 0xa1, 0xb6, 0xe0,
0x7e, 0x71, 0x52, 0xb5, 0xb8, 0x1b, 0x67, 0x3b, 0xbc, 0x2a, 0x3b, 0x06, 0x46, 0x37, 0xb5, 0x78,
0x8f, 0x0d, 0x19, 0x47, 0x76, 0xd0, 0x16, 0xe4, 0x45, 0x58, 0x37, 0xf9, 0xe1, 0xa5, 0xbf, 0xcd,
0x1b, 0x61, 0xb9, 0xe8, 0x4e, 0xa5, 0x36, 0x22, 0xac, 0xa8, 0x08, 0x62, 0xf1, 0x3a, 0xcc, 0xa4,
0x6c, 0x8f, 0xd4, 0xa3, 0xfe, 0x90, 0x83, 0x8b, 0x83, 0xcf, 0x4b, 0x74, 0x1d, 0x66, 0x24, 0x7e,
0x20, 0xee, 0x51, 0x5f, 0xf5, 0x16, 0x4b, 0xf5, 0x96, 0x0b, 0x66, 0x65, 0x33, 0xf5, 0x24, 0x13,
0xa7, 0x65, 0xd1, 0x2d, 0x40, 0x7c, 0x2f, 0xa0, 0x7e, 0x87, 0x36, 0x3e, 0xd2, 0x17, 0x8d, 0xb8,
0x3b, 0x45, 0x0d, 0xff, 0x4e, 0x9f, 0x04, 0x1e, 0xa0, 0x35, 0x62, 0xa6, 0xad, 0xc1, 0x9c, 0x39,
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/apps/v1/legacy.go | vendor/github.com/openshift/api/apps/v1/legacy.go | package v1
import (
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme)
DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
)
func addLegacyKnownTypes(scheme *runtime.Scheme) error {
types := []runtime.Object{
&DeploymentConfig{},
&DeploymentConfigList{},
&DeploymentConfigRollback{},
&DeploymentRequest{},
&DeploymentLog{},
&DeploymentLogOptions{},
&extensionsv1beta1.Scale{},
}
scheme.AddKnownTypes(legacyGroupVersion, types...)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/apps/v1/zz_prerelease_lifecycle_generated.go | vendor/github.com/openshift/api/apps/v1/zz_prerelease_lifecycle_generated.go | //go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
package v1
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *DeploymentConfig) APILifecycleIntroduced() (major, minor int) {
return 3, 0
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *DeploymentConfig) APILifecycleDeprecated() (major, minor int) {
return 4, 14
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *DeploymentConfig) APILifecycleRemoved() (major, minor int) {
return 4, 10000
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *DeploymentConfigList) APILifecycleIntroduced() (major, minor int) {
return 3, 0
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *DeploymentConfigList) APILifecycleDeprecated() (major, minor int) {
return 4, 14
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *DeploymentConfigList) APILifecycleRemoved() (major, minor int) {
return 4, 10000
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *DeploymentConfigRollback) APILifecycleIntroduced() (major, minor int) {
return 3, 0
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *DeploymentConfigRollback) APILifecycleDeprecated() (major, minor int) {
return 4, 14
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *DeploymentConfigRollback) APILifecycleRemoved() (major, minor int) {
return 4, 10000
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *DeploymentLog) APILifecycleIntroduced() (major, minor int) {
return 3, 0
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *DeploymentLog) APILifecycleDeprecated() (major, minor int) {
return 4, 14
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *DeploymentLog) APILifecycleRemoved() (major, minor int) {
return 4, 10000
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *DeploymentLogOptions) APILifecycleIntroduced() (major, minor int) {
return 3, 0
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *DeploymentLogOptions) APILifecycleDeprecated() (major, minor int) {
return 4, 14
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *DeploymentLogOptions) APILifecycleRemoved() (major, minor int) {
return 4, 10000
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *DeploymentRequest) APILifecycleIntroduced() (major, minor int) {
return 3, 0
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *DeploymentRequest) APILifecycleDeprecated() (major, minor int) {
return 4, 14
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *DeploymentRequest) APILifecycleRemoved() (major, minor int) {
return 4, 10000
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go | vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go | //go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package dockerpre012
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Config) DeepCopyInto(out *Config) {
*out = *in
if in.PortSpecs != nil {
in, out := &in.PortSpecs, &out.PortSpecs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ExposedPorts != nil {
in, out := &in.ExposedPorts, &out.ExposedPorts
*out = make(map[Port]struct{}, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Cmd != nil {
in, out := &in.Cmd, &out.Cmd
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DNS != nil {
in, out := &in.DNS, &out.DNS
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make(map[string]struct{}, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Entrypoint != nil {
in, out := &in.Entrypoint, &out.Entrypoint
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecurityOpts != nil {
in, out := &in.SecurityOpts, &out.SecurityOpts
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.OnBuild != nil {
in, out := &in.OnBuild, &out.OnBuild
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Mounts != nil {
in, out := &in.Mounts, &out.Mounts
*out = make([]Mount, len(*in))
copy(*out, *in)
}
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
func (in *Config) DeepCopy() *Config {
if in == nil {
return nil
}
out := new(Config)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DockerConfig) DeepCopyInto(out *DockerConfig) {
*out = *in
if in.PortSpecs != nil {
in, out := &in.PortSpecs, &out.PortSpecs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ExposedPorts != nil {
in, out := &in.ExposedPorts, &out.ExposedPorts
*out = make(map[string]struct{}, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Cmd != nil {
in, out := &in.Cmd, &out.Cmd
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DNS != nil {
in, out := &in.DNS, &out.DNS
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make(map[string]struct{}, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Entrypoint != nil {
in, out := &in.Entrypoint, &out.Entrypoint
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecurityOpts != nil {
in, out := &in.SecurityOpts, &out.SecurityOpts
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.OnBuild != nil {
in, out := &in.OnBuild, &out.OnBuild
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig.
func (in *DockerConfig) DeepCopy() *DockerConfig {
if in == nil {
return nil
}
out := new(DockerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DockerImage) DeepCopyInto(out *DockerImage) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Created.DeepCopyInto(&out.Created)
in.ContainerConfig.DeepCopyInto(&out.ContainerConfig)
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(DockerConfig)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage.
func (in *DockerImage) DeepCopy() *DockerImage {
if in == nil {
return nil
}
out := new(DockerImage)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DockerImage) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePre012.
func (in *ImagePre012) DeepCopy() *ImagePre012 {
if in == nil {
return nil
}
out := new(ImagePre012)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Mount) DeepCopyInto(out *Mount) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mount.
func (in *Mount) DeepCopy() *Mount {
if in == nil {
return nil
}
out := new(Mount)
in.DeepCopyInto(out)
return out
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/dockerpre012/register.go | vendor/github.com/openshift/api/image/dockerpre012/register.go | package dockerpre012
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const (
GroupName = "image.openshift.io"
LegacyGroupName = ""
)
var (
GroupVersion = schema.GroupVersion{Group: GroupName, Version: "pre012"}
LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "pre012"}
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes)
AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme
// Install is a function which adds this version to a scheme
Install = SchemeBuilder.AddToScheme
// SchemeGroupVersion generated code relies on this name
// Deprecated
SchemeGroupVersion = GroupVersion
// AddToScheme exists solely to keep the old generators creating valid code
// DEPRECATED
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&DockerImage{},
)
return nil
}
func addLegacyKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(LegacySchemeGroupVersion,
&DockerImage{},
)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go | vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go | package dockerpre012
// DeepCopyInto is manually built to copy the (probably bugged) time.Time
func (in *ImagePre012) DeepCopyInto(out *ImagePre012) {
*out = *in
out.Created = in.Created
in.ContainerConfig.DeepCopyInto(&out.ContainerConfig)
if in.Config != nil {
in, out := &in.Config, &out.Config
if *in == nil {
*out = nil
} else {
*out = new(Config)
(*in).DeepCopyInto(*out)
}
}
return
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go | vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go | package dockerpre012
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE
var map_Config = map[string]string{
"": "Config is the list of configuration options used when creating a container. Config does not contain the options that are specific to starting a container on a given host. Those are contained in HostConfig Exists only for legacy conversion, copy of type from fsouza/go-dockerclient",
}
func (Config) SwaggerDoc() map[string]string {
return map_Config
}
var map_DockerConfig = map[string]string{
"": "DockerConfig is the list of configuration options used when creating a container.",
"Labels": "This field is not supported in pre012 and will always be empty.",
}
func (DockerConfig) SwaggerDoc() map[string]string {
return map_DockerConfig
}
var map_DockerImage = map[string]string{
"": "DockerImage is for earlier versions of the Docker API (pre-012 to be specific). It is also the version of metadata that the container image registry uses to persist metadata.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
}
func (DockerImage) SwaggerDoc() map[string]string {
return map_DockerImage
}
var map_ImagePre012 = map[string]string{
"": "ImagePre012 serves the same purpose as the Image type except that it is for earlier versions of the Docker API (pre-012 to be specific) Exists only for legacy conversion, copy of type from fsouza/go-dockerclient",
}
func (ImagePre012) SwaggerDoc() map[string]string {
return map_ImagePre012
}
var map_Mount = map[string]string{
"": "Mount represents a mount point in the container.\n\nIt has been added in the version 1.20 of the Docker API, available since Docker 1.8. Exists only for legacy conversion, copy of type from fsouza/go-dockerclient",
}
func (Mount) SwaggerDoc() map[string]string {
return map_Mount
}
// AUTO-GENERATED FUNCTIONS END HERE
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/dockerpre012/doc.go | vendor/github.com/openshift/api/image/dockerpre012/doc.go | // +k8s:deepcopy-gen=package,register
// Package dockerpre012 is the dockerpre012 version of the API.
package dockerpre012
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/dockerpre012/types_docker.go | vendor/github.com/openshift/api/image/dockerpre012/types_docker.go | package dockerpre012
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DockerImage is for earlier versions of the Docker API (pre-012 to be specific). It is also the
// version of metadata that the container image registry uses to persist metadata.
//
// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
// +openshift:compatibility-gen:level=4
// +openshift:compatibility-gen:internal
type DockerImage struct {
metav1.TypeMeta `json:",inline"`
ID string `json:"id"`
Parent string `json:"parent,omitempty"`
Comment string `json:"comment,omitempty"`
Created metav1.Time `json:"created"`
Container string `json:"container,omitempty"`
ContainerConfig DockerConfig `json:"container_config,omitempty"`
DockerVersion string `json:"docker_version,omitempty"`
Author string `json:"author,omitempty"`
Config *DockerConfig `json:"config,omitempty"`
Architecture string `json:"architecture,omitempty"`
Size int64 `json:"size,omitempty"`
}
// DockerConfig is the list of configuration options used when creating a container.
type DockerConfig struct {
Hostname string `json:"Hostname,omitempty"`
Domainname string `json:"Domainname,omitempty"`
User string `json:"User,omitempty"`
Memory int64 `json:"Memory,omitempty"`
MemorySwap int64 `json:"MemorySwap,omitempty"`
CPUShares int64 `json:"CpuShares,omitempty"`
CPUSet string `json:"Cpuset,omitempty"`
AttachStdin bool `json:"AttachStdin,omitempty"`
AttachStdout bool `json:"AttachStdout,omitempty"`
AttachStderr bool `json:"AttachStderr,omitempty"`
PortSpecs []string `json:"PortSpecs,omitempty"`
ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"`
Tty bool `json:"Tty,omitempty"`
OpenStdin bool `json:"OpenStdin,omitempty"`
StdinOnce bool `json:"StdinOnce,omitempty"`
Env []string `json:"Env,omitempty"`
Cmd []string `json:"Cmd,omitempty"`
DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only
Image string `json:"Image,omitempty"`
Volumes map[string]struct{} `json:"Volumes,omitempty"`
VolumesFrom string `json:"VolumesFrom,omitempty"`
WorkingDir string `json:"WorkingDir,omitempty"`
Entrypoint []string `json:"Entrypoint,omitempty"`
NetworkDisabled bool `json:"NetworkDisabled,omitempty"`
SecurityOpts []string `json:"SecurityOpts,omitempty"`
OnBuild []string `json:"OnBuild,omitempty"`
// This field is not supported in pre012 and will always be empty.
Labels map[string]string `json:"Labels,omitempty"`
}
// ImagePre012 serves the same purpose as the Image type except that it is for
// earlier versions of the Docker API (pre-012 to be specific)
// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient
type ImagePre012 struct {
ID string `json:"id"`
Parent string `json:"parent,omitempty"`
Comment string `json:"comment,omitempty"`
Created time.Time `json:"created"`
Container string `json:"container,omitempty"`
ContainerConfig Config `json:"container_config,omitempty"`
DockerVersion string `json:"docker_version,omitempty"`
Author string `json:"author,omitempty"`
Config *Config `json:"config,omitempty"`
Architecture string `json:"architecture,omitempty"`
Size int64 `json:"size,omitempty"`
}
// Config is the list of configuration options used when creating a container.
// Config does not contain the options that are specific to starting a container on a
// given host. Those are contained in HostConfig
// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient
type Config struct {
Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"`
Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"`
User string `json:"User,omitempty" yaml:"User,omitempty"`
Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty"`
KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty"`
PidsLimit int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty"`
CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"`
ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"`
StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty"`
Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"`
Env []string `json:"Env,omitempty" yaml:"Env,omitempty"`
Cmd []string `json:"Cmd" yaml:"Cmd"`
DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only
Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"`
VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"`
MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"`
NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"`
SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"`
OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"`
Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"`
Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
}
// Mount represents a mount point in the container.
//
// It has been added in the version 1.20 of the Docker API, available since
// Docker 1.8.
// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient
type Mount struct {
Name string
Source string
Destination string
Driver string
Mode string
RW bool
}
// Port represents the port number and the protocol, in the form
// <number>/<protocol>. For example: 80/tcp.
// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient
type Port string
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go | vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go | //go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
corev1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DockerImageReference) DeepCopyInto(out *DockerImageReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImageReference.
func (in *DockerImageReference) DeepCopy() *DockerImageReference {
if in == nil {
return nil
}
out := new(DockerImageReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Image) DeepCopyInto(out *Image) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.DockerImageMetadata.DeepCopyInto(&out.DockerImageMetadata)
if in.DockerImageLayers != nil {
in, out := &in.DockerImageLayers, &out.DockerImageLayers
*out = make([]ImageLayer, len(*in))
copy(*out, *in)
}
if in.Signatures != nil {
in, out := &in.Signatures, &out.Signatures
*out = make([]ImageSignature, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.DockerImageSignatures != nil {
in, out := &in.DockerImageSignatures, &out.DockerImageSignatures
*out = make([][]byte, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = make([]byte, len(*in))
copy(*out, *in)
}
}
}
if in.DockerImageManifests != nil {
in, out := &in.DockerImageManifests, &out.DockerImageManifests
*out = make([]ImageManifest, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image.
func (in *Image) DeepCopy() *Image {
if in == nil {
return nil
}
out := new(Image)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Image) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageBlobReferences) DeepCopyInto(out *ImageBlobReferences) {
*out = *in
if in.Layers != nil {
in, out := &in.Layers, &out.Layers
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(string)
**out = **in
}
if in.Manifests != nil {
in, out := &in.Manifests, &out.Manifests
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBlobReferences.
func (in *ImageBlobReferences) DeepCopy() *ImageBlobReferences {
if in == nil {
return nil
}
out := new(ImageBlobReferences)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageImportSpec) DeepCopyInto(out *ImageImportSpec) {
*out = *in
out.From = in.From
if in.To != nil {
in, out := &in.To, &out.To
*out = new(corev1.LocalObjectReference)
**out = **in
}
out.ImportPolicy = in.ImportPolicy
out.ReferencePolicy = in.ReferencePolicy
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportSpec.
func (in *ImageImportSpec) DeepCopy() *ImageImportSpec {
if in == nil {
return nil
}
out := new(ImageImportSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageImportStatus) DeepCopyInto(out *ImageImportStatus) {
*out = *in
in.Status.DeepCopyInto(&out.Status)
if in.Image != nil {
in, out := &in.Image, &out.Image
*out = new(Image)
(*in).DeepCopyInto(*out)
}
if in.Manifests != nil {
in, out := &in.Manifests, &out.Manifests
*out = make([]Image, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportStatus.
func (in *ImageImportStatus) DeepCopy() *ImageImportStatus {
if in == nil {
return nil
}
out := new(ImageImportStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageLayer) DeepCopyInto(out *ImageLayer) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayer.
func (in *ImageLayer) DeepCopy() *ImageLayer {
if in == nil {
return nil
}
out := new(ImageLayer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageLayerData) DeepCopyInto(out *ImageLayerData) {
*out = *in
if in.LayerSize != nil {
in, out := &in.LayerSize, &out.LayerSize
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayerData.
func (in *ImageLayerData) DeepCopy() *ImageLayerData {
if in == nil {
return nil
}
out := new(ImageLayerData)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageList) DeepCopyInto(out *ImageList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Image, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList.
func (in *ImageList) DeepCopy() *ImageList {
if in == nil {
return nil
}
out := new(ImageList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageLookupPolicy) DeepCopyInto(out *ImageLookupPolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLookupPolicy.
func (in *ImageLookupPolicy) DeepCopy() *ImageLookupPolicy {
if in == nil {
return nil
}
out := new(ImageLookupPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageManifest) DeepCopyInto(out *ImageManifest) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageManifest.
func (in *ImageManifest) DeepCopy() *ImageManifest {
if in == nil {
return nil
}
out := new(ImageManifest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageSignature) DeepCopyInto(out *ImageSignature) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Content != nil {
in, out := &in.Content, &out.Content
*out = make([]byte, len(*in))
copy(*out, *in)
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]SignatureCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SignedClaims != nil {
in, out := &in.SignedClaims, &out.SignedClaims
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Created != nil {
in, out := &in.Created, &out.Created
*out = (*in).DeepCopy()
}
if in.IssuedBy != nil {
in, out := &in.IssuedBy, &out.IssuedBy
*out = new(SignatureIssuer)
**out = **in
}
if in.IssuedTo != nil {
in, out := &in.IssuedTo, &out.IssuedTo
*out = new(SignatureSubject)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSignature.
func (in *ImageSignature) DeepCopy() *ImageSignature {
if in == nil {
return nil
}
out := new(ImageSignature)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageSignature) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageStream) DeepCopyInto(out *ImageStream) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStream.
func (in *ImageStream) DeepCopy() *ImageStream {
if in == nil {
return nil
}
out := new(ImageStream)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageStream) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageStreamImage) DeepCopyInto(out *ImageStreamImage) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Image.DeepCopyInto(&out.Image)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImage.
func (in *ImageStreamImage) DeepCopy() *ImageStreamImage {
if in == nil {
return nil
}
out := new(ImageStreamImage)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageStreamImage) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageStreamImport) DeepCopyInto(out *ImageStreamImport) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImport.
func (in *ImageStreamImport) DeepCopy() *ImageStreamImport {
if in == nil {
return nil
}
out := new(ImageStreamImport)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageStreamImport) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageStreamImportSpec) DeepCopyInto(out *ImageStreamImportSpec) {
*out = *in
if in.Repository != nil {
in, out := &in.Repository, &out.Repository
*out = new(RepositoryImportSpec)
**out = **in
}
if in.Images != nil {
in, out := &in.Images, &out.Images
*out = make([]ImageImportSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImportSpec.
func (in *ImageStreamImportSpec) DeepCopy() *ImageStreamImportSpec {
if in == nil {
return nil
}
out := new(ImageStreamImportSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageStreamImportStatus) DeepCopyInto(out *ImageStreamImportStatus) {
*out = *in
if in.Import != nil {
in, out := &in.Import, &out.Import
*out = new(ImageStream)
(*in).DeepCopyInto(*out)
}
if in.Repository != nil {
in, out := &in.Repository, &out.Repository
*out = new(RepositoryImportStatus)
(*in).DeepCopyInto(*out)
}
if in.Images != nil {
in, out := &in.Images, &out.Images
*out = make([]ImageImportStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImportStatus.
func (in *ImageStreamImportStatus) DeepCopy() *ImageStreamImportStatus {
if in == nil {
return nil
}
out := new(ImageStreamImportStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageStreamLayers) DeepCopyInto(out *ImageStreamLayers) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Blobs != nil {
in, out := &in.Blobs, &out.Blobs
*out = make(map[string]ImageLayerData, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.Images != nil {
in, out := &in.Images, &out.Images
*out = make(map[string]ImageBlobReferences, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamLayers.
func (in *ImageStreamLayers) DeepCopy() *ImageStreamLayers {
if in == nil {
return nil
}
out := new(ImageStreamLayers)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageStreamLayers) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageStreamList) DeepCopyInto(out *ImageStreamList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ImageStream, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamList.
func (in *ImageStreamList) DeepCopy() *ImageStreamList {
if in == nil {
return nil
}
out := new(ImageStreamList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageStreamList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageStreamMapping) DeepCopyInto(out *ImageStreamMapping) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Image.DeepCopyInto(&out.Image)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamMapping.
func (in *ImageStreamMapping) DeepCopy() *ImageStreamMapping {
if in == nil {
return nil
}
out := new(ImageStreamMapping)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageStreamMapping) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageStreamSpec) DeepCopyInto(out *ImageStreamSpec) {
*out = *in
out.LookupPolicy = in.LookupPolicy
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make([]TagReference, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamSpec.
func (in *ImageStreamSpec) DeepCopy() *ImageStreamSpec {
if in == nil {
return nil
}
out := new(ImageStreamSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageStreamStatus) DeepCopyInto(out *ImageStreamStatus) {
*out = *in
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make([]NamedTagEventList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamStatus.
func (in *ImageStreamStatus) DeepCopy() *ImageStreamStatus {
if in == nil {
return nil
}
out := new(ImageStreamStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageStreamTag) DeepCopyInto(out *ImageStreamTag) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Tag != nil {
in, out := &in.Tag, &out.Tag
*out = new(TagReference)
(*in).DeepCopyInto(*out)
}
out.LookupPolicy = in.LookupPolicy
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]TagEventCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.Image.DeepCopyInto(&out.Image)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTag.
func (in *ImageStreamTag) DeepCopy() *ImageStreamTag {
if in == nil {
return nil
}
out := new(ImageStreamTag)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageStreamTag) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageStreamTagList) DeepCopyInto(out *ImageStreamTagList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ImageStreamTag, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTagList.
func (in *ImageStreamTagList) DeepCopy() *ImageStreamTagList {
if in == nil {
return nil
}
out := new(ImageStreamTagList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageStreamTagList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageTag) DeepCopyInto(out *ImageTag) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Spec != nil {
in, out := &in.Spec, &out.Spec
*out = new(TagReference)
(*in).DeepCopyInto(*out)
}
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(NamedTagEventList)
(*in).DeepCopyInto(*out)
}
if in.Image != nil {
in, out := &in.Image, &out.Image
*out = new(Image)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTag.
func (in *ImageTag) DeepCopy() *ImageTag {
if in == nil {
return nil
}
out := new(ImageTag)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageTag) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageTagList) DeepCopyInto(out *ImageTagList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ImageTag, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagList.
func (in *ImageTagList) DeepCopy() *ImageTagList {
if in == nil {
return nil
}
out := new(ImageTagList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageTagList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedTagEventList) DeepCopyInto(out *NamedTagEventList) {
*out = *in
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]TagEvent, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]TagEventCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedTagEventList.
func (in *NamedTagEventList) DeepCopy() *NamedTagEventList {
if in == nil {
return nil
}
out := new(NamedTagEventList)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RepositoryImportSpec) DeepCopyInto(out *RepositoryImportSpec) {
*out = *in
out.From = in.From
out.ImportPolicy = in.ImportPolicy
out.ReferencePolicy = in.ReferencePolicy
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryImportSpec.
func (in *RepositoryImportSpec) DeepCopy() *RepositoryImportSpec {
if in == nil {
return nil
}
out := new(RepositoryImportSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RepositoryImportStatus) DeepCopyInto(out *RepositoryImportStatus) {
*out = *in
in.Status.DeepCopyInto(&out.Status)
if in.Images != nil {
in, out := &in.Images, &out.Images
*out = make([]ImageImportStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AdditionalTags != nil {
in, out := &in.AdditionalTags, &out.AdditionalTags
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryImportStatus.
func (in *RepositoryImportStatus) DeepCopy() *RepositoryImportStatus {
if in == nil {
return nil
}
out := new(RepositoryImportStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretList) DeepCopyInto(out *SecretList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]corev1.Secret, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList.
func (in *SecretList) DeepCopy() *SecretList {
if in == nil {
return nil
}
out := new(SecretList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SecretList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SignatureCondition) DeepCopyInto(out *SignatureCondition) {
*out = *in
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureCondition.
func (in *SignatureCondition) DeepCopy() *SignatureCondition {
if in == nil {
return nil
}
out := new(SignatureCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SignatureGenericEntity) DeepCopyInto(out *SignatureGenericEntity) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureGenericEntity.
func (in *SignatureGenericEntity) DeepCopy() *SignatureGenericEntity {
if in == nil {
return nil
}
out := new(SignatureGenericEntity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SignatureIssuer) DeepCopyInto(out *SignatureIssuer) {
*out = *in
out.SignatureGenericEntity = in.SignatureGenericEntity
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureIssuer.
func (in *SignatureIssuer) DeepCopy() *SignatureIssuer {
if in == nil {
return nil
}
out := new(SignatureIssuer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SignatureSubject) DeepCopyInto(out *SignatureSubject) {
*out = *in
out.SignatureGenericEntity = in.SignatureGenericEntity
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureSubject.
func (in *SignatureSubject) DeepCopy() *SignatureSubject {
if in == nil {
return nil
}
out := new(SignatureSubject)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TagEvent) DeepCopyInto(out *TagEvent) {
*out = *in
in.Created.DeepCopyInto(&out.Created)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagEvent.
func (in *TagEvent) DeepCopy() *TagEvent {
if in == nil {
return nil
}
out := new(TagEvent)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TagEventCondition) DeepCopyInto(out *TagEventCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagEventCondition.
func (in *TagEventCondition) DeepCopy() *TagEventCondition {
if in == nil {
return nil
}
out := new(TagEventCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TagImportPolicy) DeepCopyInto(out *TagImportPolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagImportPolicy.
func (in *TagImportPolicy) DeepCopy() *TagImportPolicy {
if in == nil {
return nil
}
out := new(TagImportPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TagReference) DeepCopyInto(out *TagReference) {
*out = *in
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.From != nil {
in, out := &in.From, &out.From
*out = new(corev1.ObjectReference)
**out = **in
}
if in.Generation != nil {
in, out := &in.Generation, &out.Generation
*out = new(int64)
**out = **in
}
out.ImportPolicy = in.ImportPolicy
out.ReferencePolicy = in.ReferencePolicy
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagReference.
func (in *TagReference) DeepCopy() *TagReference {
if in == nil {
return nil
}
out := new(TagReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TagReferencePolicy) DeepCopyInto(out *TagReferencePolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagReferencePolicy.
func (in *TagReferencePolicy) DeepCopy() *TagReferencePolicy {
if in == nil {
return nil
}
out := new(TagReferencePolicy)
in.DeepCopyInto(out)
return out
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/v1/consts.go | vendor/github.com/openshift/api/image/v1/consts.go | package v1
import corev1 "k8s.io/api/core/v1"
const (
// ManagedByOpenShiftAnnotation indicates that an image is managed by OpenShift's registry.
ManagedByOpenShiftAnnotation = "openshift.io/image.managed"
// DockerImageRepositoryCheckAnnotation indicates that OpenShift has
// attempted to import tag and image information from an external Docker
// image repository.
DockerImageRepositoryCheckAnnotation = "openshift.io/image.dockerRepositoryCheck"
// InsecureRepositoryAnnotation may be set true on an image stream to allow insecure access to pull content.
InsecureRepositoryAnnotation = "openshift.io/image.insecureRepository"
// ExcludeImageSecretAnnotation indicates that a secret should not be returned by imagestream/secrets.
ExcludeImageSecretAnnotation = "openshift.io/image.excludeSecret"
// DockerImageLayersOrderAnnotation describes layers order in the docker image.
DockerImageLayersOrderAnnotation = "image.openshift.io/dockerLayersOrder"
// DockerImageLayersOrderAscending indicates that image layers are sorted in
// the order of their addition (from oldest to latest)
DockerImageLayersOrderAscending = "ascending"
// DockerImageLayersOrderDescending indicates that layers are sorted in
// reversed order of their addition (from newest to oldest).
DockerImageLayersOrderDescending = "descending"
// ImporterPreferArchAnnotation represents an architecture that should be
// selected if an image uses a manifest list and it should be
// downconverted.
ImporterPreferArchAnnotation = "importer.image.openshift.io/prefer-arch"
// ImporterPreferOSAnnotation represents an operation system that should
// be selected if an image uses a manifest list and it should be
// downconverted.
ImporterPreferOSAnnotation = "importer.image.openshift.io/prefer-os"
// ImageManifestBlobStoredAnnotation indicates that manifest and config blobs of image are stored in on
// storage of integrated Docker registry.
ImageManifestBlobStoredAnnotation = "image.openshift.io/manifestBlobStored"
// DefaultImageTag is used when an image tag is needed and the configuration does not specify a tag to use.
DefaultImageTag = "latest"
// ResourceImageStreams represents a number of image streams in a project.
ResourceImageStreams corev1.ResourceName = "openshift.io/imagestreams"
// ResourceImageStreamImages represents a number of unique references to images in all image stream
// statuses of a project.
ResourceImageStreamImages corev1.ResourceName = "openshift.io/images"
// ResourceImageStreamTags represents a number of unique references to images in all image stream specs
// of a project.
ResourceImageStreamTags corev1.ResourceName = "openshift.io/image-tags"
// Limit that applies to images. Used with a max["storage"] LimitRangeItem to set
// the maximum size of an image.
LimitTypeImage corev1.LimitType = "openshift.io/Image"
// Limit that applies to image streams. Used with a max[resource] LimitRangeItem to set the maximum number
// of resource. Where the resource is one of "openshift.io/images" and "openshift.io/image-tags".
LimitTypeImageStream corev1.LimitType = "openshift.io/ImageStream"
// The supported type of image signature.
ImageSignatureTypeAtomicImageV1 string = "AtomicImageV1"
)
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/v1/types.go | vendor/github.com/openshift/api/image/v1/types.go | package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ImageList is a list of Image objects.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ImageList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of images
Items []Image `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Image is an immutable representation of a container image and metadata at a point in time.
// Images are named by taking a hash of their contents (metadata and content) and any change
// in format, content, or metadata results in a new name. The images resource is primarily
// for use by cluster administrators and integrations like the cluster image registry - end
// users instead access images via the imagestreamtags or imagestreamimages resources. While
// image metadata is stored in the API, any integration that implements the container image
// registry API must provide its own storage for the raw manifest data, image config, and
// layer contents.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type Image struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// DockerImageReference is the string that can be used to pull this image.
DockerImageReference string `json:"dockerImageReference,omitempty" protobuf:"bytes,2,opt,name=dockerImageReference"`
// DockerImageMetadata contains metadata about this image
// +patchStrategy=replace
// +kubebuilder:pruning:PreserveUnknownFields
DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty" patchStrategy:"replace" protobuf:"bytes,3,opt,name=dockerImageMetadata"`
// DockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0"
DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty" protobuf:"bytes,4,opt,name=dockerImageMetadataVersion"`
// DockerImageManifest is the raw JSON of the manifest
DockerImageManifest string `json:"dockerImageManifest,omitempty" protobuf:"bytes,5,opt,name=dockerImageManifest"`
// DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list.
DockerImageLayers []ImageLayer `json:"dockerImageLayers,omitempty" protobuf:"bytes,6,rep,name=dockerImageLayers"`
// Signatures holds all signatures of the image.
// +patchMergeKey=name
// +patchStrategy=merge
Signatures []ImageSignature `json:"signatures,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=signatures"`
// DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.
DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty" protobuf:"bytes,8,rep,name=dockerImageSignatures"`
// DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.
DockerImageManifestMediaType string `json:"dockerImageManifestMediaType,omitempty" protobuf:"bytes,9,opt,name=dockerImageManifestMediaType"`
// DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2.
// Will not be set when the image represents a manifest list.
DockerImageConfig string `json:"dockerImageConfig,omitempty" protobuf:"bytes,10,opt,name=dockerImageConfig"`
// DockerImageManifests holds information about sub-manifests when the image represents a manifest list.
// When this field is present, no DockerImageLayers should be specified.
DockerImageManifests []ImageManifest `json:"dockerImageManifests,omitempty" protobuf:"bytes,11,rep,name=dockerImageManifests"`
}
// ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular
// Image object.
type ImageManifest struct {
// Digest is the unique identifier for the manifest. It refers to an Image object.
Digest string `json:"digest" protobuf:"bytes,1,opt,name=digest"`
// MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json,
// application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json.
MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"`
// ManifestSize represents the size of the raw object contents, in bytes.
ManifestSize int64 `json:"manifestSize" protobuf:"varint,3,opt,name=manifestSize"`
// Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`.
Architecture string `json:"architecture" protobuf:"bytes,4,opt,name=architecture"`
// OS specifies the operating system, for example `linux`.
OS string `json:"os" protobuf:"bytes,5,opt,name=os"`
// Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU
// variant of the ARM CPU.
Variant string `json:"variant,omitempty" protobuf:"bytes,6,opt,name=variant"`
}
// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.
type ImageLayer struct {
// Name of the layer as defined by the underlying store.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Size of the layer in bytes as defined by the underlying store.
LayerSize int64 `json:"size" protobuf:"varint,2,opt,name=size"`
// MediaType of the referenced object.
MediaType string `json:"mediaType" protobuf:"bytes,3,opt,name=mediaType"`
}
// +genclient
// +genclient:onlyVerbs=create,delete
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims
// as long as the signature is trusted. Based on this information it is possible to restrict runnable images
// to those matching cluster-wide policy.
// Mandatory fields should be parsed by clients doing image verification. The others are parsed from
// signature's content by the server. They serve just an informative purpose.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ImageSignature struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Required: Describes a type of stored blob.
Type string `json:"type" protobuf:"bytes,2,opt,name=type"`
// Required: An opaque binary string which is an image's signature.
Content []byte `json:"content" protobuf:"bytes,3,opt,name=content"`
// Conditions represent the latest available observations of a signature's current state.
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
// Following metadata fields will be set by server if the signature content is successfully parsed and
// the information available.
// A human readable string representing image's identity. It could be a product name and version, or an
// image pull spec (e.g. "registry.access.redhat.com/rhel7/rhel:7.2").
ImageIdentity string `json:"imageIdentity,omitempty" protobuf:"bytes,5,opt,name=imageIdentity"`
// Contains claims from the signature.
SignedClaims map[string]string `json:"signedClaims,omitempty" protobuf:"bytes,6,rep,name=signedClaims"`
// If specified, it is the time of signature's creation.
Created *metav1.Time `json:"created,omitempty" protobuf:"bytes,7,opt,name=created"`
// If specified, it holds information about an issuer of signing certificate or key (a person or entity
// who signed the signing certificate or key).
IssuedBy *SignatureIssuer `json:"issuedBy,omitempty" protobuf:"bytes,8,opt,name=issuedBy"`
// If specified, it holds information about a subject of signing certificate or key (a person or entity
// who signed the image).
IssuedTo *SignatureSubject `json:"issuedTo,omitempty" protobuf:"bytes,9,opt,name=issuedTo"`
}
// SignatureConditionType is a type of image signature condition.
type SignatureConditionType string
// SignatureCondition describes an image signature condition of particular kind at particular probe time.
type SignatureCondition struct {
// Type of signature condition, Complete or Failed.
Type SignatureConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=SignatureConditionType"`
// Status of the condition, one of True, False, Unknown.
Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
// Last time the condition was checked.
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transit from one status to another.
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// (brief) reason for the condition's last transition.
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human readable message indicating details about last transition.
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject
// of signing certificate or key.
type SignatureGenericEntity struct {
// Organization name.
Organization string `json:"organization,omitempty" protobuf:"bytes,1,opt,name=organization"`
// Common name (e.g. openshift-signing-service).
CommonName string `json:"commonName,omitempty" protobuf:"bytes,2,opt,name=commonName"`
}
// SignatureIssuer holds information about an issuer of signing certificate or key.
type SignatureIssuer struct {
SignatureGenericEntity `json:",inline" protobuf:"bytes,1,opt,name=signatureGenericEntity"`
}
// SignatureSubject holds information about a person or entity who created the signature.
type SignatureSubject struct {
SignatureGenericEntity `json:",inline" protobuf:"bytes,1,opt,name=signatureGenericEntity"`
// If present, it is a human readable key id of public key belonging to the subject used to verify image
// signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g.
// 0x685ebe62bf278440).
PublicKeyID string `json:"publicKeyID" protobuf:"bytes,2,opt,name=publicKeyID"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ImageStreamList is a list of ImageStream objects.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ImageStreamList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of imageStreams
Items []ImageStream `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:method=Secrets,verb=get,subresource=secrets,result=github.com/openshift/api/image/v1.SecretList
// +genclient:method=Layers,verb=get,subresource=layers,result=github.com/openshift/api/image/v1.ImageStreamLayers
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// An ImageStream stores a mapping of tags to images, metadata overrides that are applied
// when images are tagged in a stream, and an optional reference to a container image
// repository on a registry. Users typically update the spec.tags field to point to external
// images which are imported from container registries using credentials in your namespace
// with the pull secret type, or to existing image stream tags and images which are
// immediately accessible for tagging or pulling. The history of images applied to a tag
// is visible in the status.tags field and any user who can view an image stream is allowed
// to tag that image into their own image streams. Access to pull images from the integrated
// registry is granted by having the "get imagestreams/layers" permission on a given image
// stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both
// spec and status for that tag to be removed. Image stream history is retained until an
// administrator runs the prune operation, which removes references that are no longer in
// use. To preserve a historical image, ensure there is a tag in spec pointing to that image
// by its digest.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ImageStream struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec describes the desired state of this stream
// +optional
Spec ImageStreamSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
// Status describes the current state of this stream
// +optional
Status ImageStreamStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// ImageStreamSpec represents options for ImageStreams.
type ImageStreamSpec struct {
// lookupPolicy controls how other resources reference images within this namespace.
LookupPolicy ImageLookupPolicy `json:"lookupPolicy,omitempty" protobuf:"bytes,3,opt,name=lookupPolicy"`
// dockerImageRepository is optional, if specified this stream is backed by a container repository on this server
// Deprecated: This field is deprecated as of v3.7 and will be removed in a future release.
// Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead.
DockerImageRepository string `json:"dockerImageRepository,omitempty" protobuf:"bytes,1,opt,name=dockerImageRepository"`
// tags map arbitrary string values to specific image locators
// +patchMergeKey=name
// +patchStrategy=merge
Tags []TagReference `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=tags"`
}
// ImageLookupPolicy describes how an image stream can be used to override the image references
// used by pods, builds, and other resources in a namespace.
type ImageLookupPolicy struct {
// local will change the docker short image references (like "mysql" or
// "php:latest") on objects in this namespace to the image ID whenever they match
// this image stream, instead of reaching out to a remote registry. The name will
// be fully qualified to an image ID if found. The tag's referencePolicy is taken
// into account on the replaced value. Only works within the current namespace.
Local bool `json:"local" protobuf:"varint,3,opt,name=local"`
}
// TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track.
type TagReference struct {
// Name of the tag
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags.
// +optional
Annotations map[string]string `json:"annotations" protobuf:"bytes,2,rep,name=annotations"`
// Optional; if specified, a reference to another image that this tag should point to. Valid values
// are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references
// can only reference a tag within this same ImageStream.
From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,3,opt,name=from"`
// Reference states if the tag will be imported. Default value is false, which means the tag will
// be imported.
Reference bool `json:"reference,omitempty" protobuf:"varint,4,opt,name=reference"`
// Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference
// is changed the generation is set to match the current stream generation (which is incremented every
// time spec is changed). Other processes in the system like the image importer observe that the
// generation of spec tag is newer than the generation recorded in the status and use that as a trigger
// to import the newest remote tag. To trigger a new import, clients may set this value to zero which
// will reset the generation to the latest stream generation. Legacy clients will send this value as
// nil which will be merged with the current tag generation.
// +optional
Generation *int64 `json:"generation" protobuf:"varint,5,opt,name=generation"`
// ImportPolicy is information that controls how images may be imported by the server.
ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,6,opt,name=importPolicy"`
// ReferencePolicy defines how other components should consume the image.
ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,7,opt,name=referencePolicy"`
}
// TagImportPolicy controls how images related to this tag will be imported.
type TagImportPolicy struct {
// Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.
Insecure bool `json:"insecure,omitempty" protobuf:"varint,1,opt,name=insecure"`
// Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported
Scheduled bool `json:"scheduled,omitempty" protobuf:"varint,2,opt,name=scheduled"`
// ImportMode describes how to import an image manifest.
ImportMode ImportModeType `json:"importMode,omitempty" protobuf:"bytes,3,opt,name=importMode,casttype=ImportModeType"`
}
// ImportModeType describes how to import an image manifest.
type ImportModeType string
const (
// ImportModeLegacy indicates that the legacy behaviour should be used.
// For manifest lists, the legacy behaviour will discard the manifest list and import a single
// sub-manifest. In this case, the platform is chosen in the following order of priority:
// 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list.
// This mode is the default.
ImportModeLegacy ImportModeType = "Legacy"
// ImportModePreserveOriginal indicates that the original manifest will be preserved.
// For manifest lists, the manifest list and all its sub-manifests will be imported.
ImportModePreserveOriginal ImportModeType = "PreserveOriginal"
)
// TagReferencePolicyType describes how pull-specs for images in an image stream tag are generated when
// image change triggers are fired.
type TagReferencePolicyType string
const (
// SourceTagReferencePolicy indicates the image's original location should be used when the image stream tag
// is resolved into other resources (builds and deployment configurations).
SourceTagReferencePolicy TagReferencePolicyType = "Source"
// LocalTagReferencePolicy indicates the image should prefer to pull via the local integrated registry,
// falling back to the remote location if the integrated registry has not been configured. The reference will
// use the internal DNS name or registry service IP.
LocalTagReferencePolicy TagReferencePolicyType = "Local"
)
// TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when
// image change triggers in deployment configs or builds are resolved. This allows the image stream
// author to control how images are accessed.
type TagReferencePolicy struct {
// Type determines how the image pull spec should be transformed when the image stream tag is used in
// deployment config triggers or new builds. The default value is `Source`, indicating the original
// location of the image should be used (if imported). The user may also specify `Local`, indicating
// that the pull spec should point to the integrated container image registry and leverage the registry's
// ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this
// image to be managed from the image stream's namespace, so others on the platform can access a remote
// image but have no access to the remote secret. It also allows the image layers to be mirrored into
// the local registry which the images can still be pulled even if the upstream registry is unavailable.
Type TagReferencePolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagReferencePolicyType"`
}
// ImageStreamStatus contains information about the state of this image stream.
type ImageStreamStatus struct {
// DockerImageRepository represents the effective location this stream may be accessed at.
// May be empty until the server determines where the repository is located
DockerImageRepository string `json:"dockerImageRepository" protobuf:"bytes,1,opt,name=dockerImageRepository"`
// PublicDockerImageRepository represents the public location from where the image can
// be pulled outside the cluster. This field may be empty if the administrator
// has not exposed the integrated registry externally.
PublicDockerImageRepository string `json:"publicDockerImageRepository,omitempty" protobuf:"bytes,3,opt,name=publicDockerImageRepository"`
// Tags are a historical record of images associated with each tag. The first entry in the
// TagEvent array is the currently tagged image.
// +patchMergeKey=tag
// +patchStrategy=merge
Tags []NamedTagEventList `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"tag" protobuf:"bytes,2,rep,name=tags"`
}
// NamedTagEventList relates a tag to its image history.
type NamedTagEventList struct {
// Tag is the tag for which the history is recorded
Tag string `json:"tag" protobuf:"bytes,1,opt,name=tag"`
// Standard object's metadata.
Items []TagEvent `json:"items" protobuf:"bytes,2,rep,name=items"`
// Conditions is an array of conditions that apply to the tag event list.
Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"`
}
// TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag.
type TagEvent struct {
// Created holds the time the TagEvent was created
Created metav1.Time `json:"created" protobuf:"bytes,1,opt,name=created"`
// DockerImageReference is the string that can be used to pull this image
DockerImageReference string `json:"dockerImageReference" protobuf:"bytes,2,opt,name=dockerImageReference"`
// Image is the image
Image string `json:"image" protobuf:"bytes,3,opt,name=image"`
// Generation is the spec tag generation that resulted in this tag being updated
Generation int64 `json:"generation" protobuf:"varint,4,opt,name=generation"`
}
type TagEventConditionType string
// These are valid conditions of TagEvents.
const (
// ImportSuccess with status False means the import of the specific tag failed
ImportSuccess TagEventConditionType = "ImportSuccess"
)
// TagEventCondition contains condition information for a tag event.
type TagEventCondition struct {
// Type of tag event condition, currently only ImportSuccess
Type TagEventConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagEventConditionType"`
// Status of the condition, one of True, False, Unknown.
Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
// LastTransitionTIme is the time the condition transitioned from one status to another.
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
// Reason is a brief machine readable explanation for the condition's last transition.
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// Message is a human readable description of the details about last transition, complementing reason.
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
// Generation is the spec tag generation that this status corresponds to
Generation int64 `json:"generation" protobuf:"varint,6,opt,name=generation"`
}
// +genclient
// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch
// +genclient:method=Create,verb=create,result=k8s.io/apimachinery/pkg/apis/meta/v1.Status
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ImageStreamMapping represents a mapping from a single image stream tag to a container
// image as well as the reference to the container image stream the image came from. This
// resource is used by privileged integrators to create an image resource and to associate
// it with an image stream in the status tags field. Creating an ImageStreamMapping will
// allow any user who can view the image stream to tag or pull that image, so only create
// mappings where the user has proven they have access to the image contents directly.
// The only operation supported for this resource is create and the metadata name and
// namespace should be set to the image stream containing the tag that should be updated.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ImageStreamMapping struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Image is a container image.
Image Image `json:"image" protobuf:"bytes,2,opt,name=image"`
// Tag is a string value this image can be located with inside the stream.
Tag string `json:"tag" protobuf:"bytes,3,opt,name=tag"`
}
// +genclient
// +genclient:onlyVerbs=get,list,create,update,delete
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream.
// Use this resource to interact with the tags and images in an image stream by tag, or
// to see the image details for a particular tag. The image associated with this resource
// is the most recently successfully tagged, imported, or pushed image (as described in the
// image stream status.tags.items list for this tag). If an import is in progress or has
// failed the previous image will be shown. Deleting an image stream tag clears both the
// status and spec fields of an image stream. If no image can be retrieved for a given tag,
// a not found error will be returned.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ImageStreamTag struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// tag is the spec tag associated with this image stream tag, and it may be null
// if only pushes have occurred to this image stream.
Tag *TagReference `json:"tag" protobuf:"bytes,2,opt,name=tag"`
// generation is the current generation of the tagged image - if tag is provided
// and this value is not equal to the tag generation, a user has requested an
// import that has not completed, or conditions will be filled out indicating any
// error.
Generation int64 `json:"generation" protobuf:"varint,3,opt,name=generation"`
// lookupPolicy indicates whether this tag will handle image references in this
// namespace.
LookupPolicy ImageLookupPolicy `json:"lookupPolicy" protobuf:"varint,6,opt,name=lookupPolicy"`
// conditions is an array of conditions that apply to the image stream tag.
Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,4,rep,name=conditions"`
// image associated with the ImageStream and tag.
Image Image `json:"image" protobuf:"bytes,5,opt,name=image"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ImageStreamTagList is a list of ImageStreamTag objects.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ImageStreamTagList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of image stream tags
Items []ImageStreamTag `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:onlyVerbs=get,list,create,update,delete
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ImageTag represents a single tag within an image stream and includes the spec,
// the status history, and the currently referenced image (if any) of the provided
// tag. This type replaces the ImageStreamTag by providing a full view of the tag.
// ImageTags are returned for every spec or status tag present on the image stream.
// If no tag exists in either form a not found error will be returned by the API.
// A create operation will succeed if no spec tag has already been defined and the
// spec field is set. Delete will remove both spec and status elements from the
// image stream.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ImageTag struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec is the spec tag associated with this image stream tag, and it may be null
// if only pushes have occurred to this image stream.
Spec *TagReference `json:"spec" protobuf:"bytes,2,opt,name=spec"`
// status is the status tag details associated with this image stream tag, and it
// may be null if no push or import has been performed.
Status *NamedTagEventList `json:"status" protobuf:"bytes,3,opt,name=status"`
// image is the details of the most recent image stream status tag, and it may be
// null if import has not completed or an administrator has deleted the image
// object. To verify this is the most recent image, you must verify the generation
// of the most recent status.items entry matches the spec tag (if a spec tag is
// set). This field will not be set when listing image tags.
Image *Image `json:"image" protobuf:"bytes,4,opt,name=image"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ImageTagList is a list of ImageTag objects. When listing image tags, the image
// field is not populated. Tags are returned in alphabetical order by image stream
// and then tag.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ImageTagList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/v1/register.go | vendor/github.com/openshift/api/image/v1/register.go | package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/openshift/api/image/docker10"
"github.com/openshift/api/image/dockerpre012"
)
var (
GroupName = "image.openshift.io"
GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, docker10.AddToScheme, dockerpre012.AddToScheme, corev1.AddToScheme)
// Install is a function which adds this version to a scheme
Install = schemeBuilder.AddToScheme
// SchemeGroupVersion generated code relies on this name
// Deprecated
SchemeGroupVersion = GroupVersion
// AddToScheme exists solely to keep the old generators creating valid code
// DEPRECATED
AddToScheme = schemeBuilder.AddToScheme
)
// Resource generated code relies on this being here, but it logically belongs to the group
// DEPRECATED
func Resource(resource string) schema.GroupResource {
return schema.GroupResource{Group: GroupName, Resource: resource}
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(GroupVersion,
&Image{},
&ImageList{},
&ImageSignature{},
&ImageStream{},
&ImageStreamList{},
&ImageStreamMapping{},
&ImageStreamTag{},
&ImageStreamTagList{},
&ImageStreamImage{},
&ImageStreamLayers{},
&ImageStreamImport{},
&ImageTag{},
&ImageTagList{},
&SecretList{},
)
metav1.AddToGroupVersion(scheme, GroupVersion)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go | vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go | package v1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE
var map_DockerImageReference = map[string]string{
"": "DockerImageReference points to a container image.",
"Registry": "Registry is the registry that contains the container image",
"Namespace": "Namespace is the namespace that contains the container image",
"Name": "Name is the name of the container image",
"Tag": "Tag is which tag of the container image is being referenced",
"ID": "ID is the identifier for the container image",
}
func (DockerImageReference) SwaggerDoc() map[string]string {
return map_DockerImageReference
}
var map_Image = map[string]string{
"": "Image is an immutable representation of a container image and metadata at a point in time. Images are named by taking a hash of their contents (metadata and content) and any change in format, content, or metadata results in a new name. The images resource is primarily for use by cluster administrators and integrations like the cluster image registry - end users instead access images via the imagestreamtags or imagestreamimages resources. While image metadata is stored in the API, any integration that implements the container image registry API must provide its own storage for the raw manifest data, image config, and layer contents.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"dockerImageReference": "DockerImageReference is the string that can be used to pull this image.",
"dockerImageMetadata": "DockerImageMetadata contains metadata about this image",
"dockerImageMetadataVersion": "DockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"",
"dockerImageManifest": "DockerImageManifest is the raw JSON of the manifest",
"dockerImageLayers": "DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list.",
"signatures": "Signatures holds all signatures of the image.",
"dockerImageSignatures": "DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.",
"dockerImageManifestMediaType": "DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.",
"dockerImageConfig": "DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. Will not be set when the image represents a manifest list.",
"dockerImageManifests": "DockerImageManifests holds information about sub-manifests when the image represents a manifest list. When this field is present, no DockerImageLayers should be specified.",
}
func (Image) SwaggerDoc() map[string]string {
return map_Image
}
var map_ImageBlobReferences = map[string]string{
"": "ImageBlobReferences describes the blob references within an image.",
"imageMissing": "imageMissing is true if the image is referenced by the image stream but the image object has been deleted from the API by an administrator. When this field is set, layers and config fields may be empty and callers that depend on the image metadata should consider the image to be unavailable for download or viewing.",
"layers": "layers is the list of blobs that compose this image, from base layer to top layer. All layers referenced by this array will be defined in the blobs map. Some images may have zero layers.",
"config": "config, if set, is the blob that contains the image config. Some images do not have separate config blobs and this field will be set to nil if so.",
"manifests": "manifests is the list of other image names that this image points to. For a single architecture image, it is empty. For a multi-arch image, it consists of the digests of single architecture images, such images shouldn't have layers nor config.",
}
func (ImageBlobReferences) SwaggerDoc() map[string]string {
return map_ImageBlobReferences
}
var map_ImageImportSpec = map[string]string{
"": "ImageImportSpec describes a request to import a specific image.",
"from": "From is the source of an image to import; only kind DockerImage is allowed",
"to": "To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used",
"importPolicy": "ImportPolicy is the policy controlling how the image is imported",
"referencePolicy": "ReferencePolicy defines how other components should consume the image",
"includeManifest": "IncludeManifest determines if the manifest for each image is returned in the response",
}
func (ImageImportSpec) SwaggerDoc() map[string]string {
return map_ImageImportSpec
}
var map_ImageImportStatus = map[string]string{
"": "ImageImportStatus describes the result of an image import.",
"status": "Status is the status of the image import, including errors encountered while retrieving the image",
"image": "Image is the metadata of that image, if the image was located",
"tag": "Tag is the tag this image was located under, if any",
"manifests": "Manifests holds sub-manifests metadata when importing a manifest list",
}
func (ImageImportStatus) SwaggerDoc() map[string]string {
return map_ImageImportStatus
}
var map_ImageLayer = map[string]string{
"": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.",
"name": "Name of the layer as defined by the underlying store.",
"size": "Size of the layer in bytes as defined by the underlying store.",
"mediaType": "MediaType of the referenced object.",
}
func (ImageLayer) SwaggerDoc() map[string]string {
return map_ImageLayer
}
var map_ImageLayerData = map[string]string{
"": "ImageLayerData contains metadata about an image layer.",
"size": "Size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.",
"mediaType": "MediaType of the referenced object.",
}
func (ImageLayerData) SwaggerDoc() map[string]string {
return map_ImageLayerData
}
var map_ImageList = map[string]string{
"": "ImageList is a list of Image objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is a list of images",
}
func (ImageList) SwaggerDoc() map[string]string {
return map_ImageList
}
var map_ImageLookupPolicy = map[string]string{
"": "ImageLookupPolicy describes how an image stream can be used to override the image references used by pods, builds, and other resources in a namespace.",
"local": "local will change the docker short image references (like \"mysql\" or \"php:latest\") on objects in this namespace to the image ID whenever they match this image stream, instead of reaching out to a remote registry. The name will be fully qualified to an image ID if found. The tag's referencePolicy is taken into account on the replaced value. Only works within the current namespace.",
}
func (ImageLookupPolicy) SwaggerDoc() map[string]string {
return map_ImageLookupPolicy
}
var map_ImageManifest = map[string]string{
"": "ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular Image object.",
"digest": "Digest is the unique identifier for the manifest. It refers to an Image object.",
"mediaType": "MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json.",
"manifestSize": "ManifestSize represents the size of the raw object contents, in bytes.",
"architecture": "Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`.",
"os": "OS specifies the operating system, for example `linux`.",
"variant": "Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU variant of the ARM CPU.",
}
func (ImageManifest) SwaggerDoc() map[string]string {
return map_ImageManifest
}
var map_ImageSignature = map[string]string{
"": "ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims as long as the signature is trusted. Based on this information it is possible to restrict runnable images to those matching cluster-wide policy. Mandatory fields should be parsed by clients doing image verification. The others are parsed from signature's content by the server. They serve just an informative purpose.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"type": "Required: Describes a type of stored blob.",
"content": "Required: An opaque binary string which is an image's signature.",
"conditions": "Conditions represent the latest available observations of a signature's current state.",
"imageIdentity": "A human readable string representing image's identity. It could be a product name and version, or an image pull spec (e.g. \"registry.access.redhat.com/rhel7/rhel:7.2\").",
"signedClaims": "Contains claims from the signature.",
"created": "If specified, it is the time of signature's creation.",
"issuedBy": "If specified, it holds information about an issuer of signing certificate or key (a person or entity who signed the signing certificate or key).",
"issuedTo": "If specified, it holds information about a subject of signing certificate or key (a person or entity who signed the image).",
}
func (ImageSignature) SwaggerDoc() map[string]string {
return map_ImageSignature
}
var map_ImageStream = map[string]string{
"": "An ImageStream stores a mapping of tags to images, metadata overrides that are applied when images are tagged in a stream, and an optional reference to a container image repository on a registry. Users typically update the spec.tags field to point to external images which are imported from container registries using credentials in your namespace with the pull secret type, or to existing image stream tags and images which are immediately accessible for tagging or pulling. The history of images applied to a tag is visible in the status.tags field and any user who can view an image stream is allowed to tag that image into their own image streams. Access to pull images from the integrated registry is granted by having the \"get imagestreams/layers\" permission on a given image stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both spec and status for that tag to be removed. Image stream history is retained until an administrator runs the prune operation, which removes references that are no longer in use. To preserve a historical image, ensure there is a tag in spec pointing to that image by its digest.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"spec": "Spec describes the desired state of this stream",
"status": "Status describes the current state of this stream",
}
func (ImageStream) SwaggerDoc() map[string]string {
return map_ImageStream
}
var map_ImageStreamImage = map[string]string{
"": "ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. User interfaces and regular users can use this resource to access the metadata details of a tagged image in the image stream history for viewing, since Image resources are not directly accessible to end users. A not found error will be returned if no such image is referenced by a tag within the ImageStream. Images are created when spec tags are set on an image stream that represent an image in an external registry, when pushing to the integrated registry, or when tagging an existing image from one image stream to another. The name of an image stream image is in the form \"<STREAM>@<DIGEST>\", where the digest is the content addressible identifier for the image (sha256:xxxxx...). You can use ImageStreamImages as the from.kind of an image stream spec tag to reference an image exactly. The only operations supported on the imagestreamimage endpoint are retrieving the image.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"image": "Image associated with the ImageStream and image name.",
}
func (ImageStreamImage) SwaggerDoc() map[string]string {
return map_ImageStreamImage
}
var map_ImageStreamImport = map[string]string{
"": "The image stream import resource provides an easy way for a user to find and import container images from other container image registries into the server. Individual images or an entire image repository may be imported, and users may choose to see the results of the import prior to tagging the resulting images into the specified image stream.\n\nThis API is intended for end-user tools that need to see the metadata of the image prior to import (for instance, to generate an application from it). Clients that know the desired image can continue to create spec.tags directly into their image streams.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"spec": "Spec is a description of the images that the user wishes to import",
"status": "Status is the result of importing the image",
}
func (ImageStreamImport) SwaggerDoc() map[string]string {
return map_ImageStreamImport
}
var map_ImageStreamImportSpec = map[string]string{
"": "ImageStreamImportSpec defines what images should be imported.",
"import": "Import indicates whether to perform an import - if so, the specified tags are set on the spec and status of the image stream defined by the type meta.",
"repository": "Repository is an optional import of an entire container image repository. A maximum limit on the number of tags imported this way is imposed by the server.",
"images": "Images are a list of individual images to import.",
}
func (ImageStreamImportSpec) SwaggerDoc() map[string]string {
return map_ImageStreamImportSpec
}
var map_ImageStreamImportStatus = map[string]string{
"": "ImageStreamImportStatus contains information about the status of an image stream import.",
"import": "Import is the image stream that was successfully updated or created when 'to' was set.",
"repository": "Repository is set if spec.repository was set to the outcome of the import",
"images": "Images is set with the result of importing spec.images",
}
func (ImageStreamImportStatus) SwaggerDoc() map[string]string {
return map_ImageStreamImportStatus
}
var map_ImageStreamLayers = map[string]string{
"": "ImageStreamLayers describes information about the layers referenced by images in this image stream.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"blobs": "blobs is a map of blob name to metadata about the blob.",
"images": "images is a map between an image name and the names of the blobs and config that comprise the image.",
}
func (ImageStreamLayers) SwaggerDoc() map[string]string {
return map_ImageStreamLayers
}
var map_ImageStreamList = map[string]string{
"": "ImageStreamList is a list of ImageStream objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is a list of imageStreams",
}
func (ImageStreamList) SwaggerDoc() map[string]string {
return map_ImageStreamList
}
var map_ImageStreamMapping = map[string]string{
"": "ImageStreamMapping represents a mapping from a single image stream tag to a container image as well as the reference to the container image stream the image came from. This resource is used by privileged integrators to create an image resource and to associate it with an image stream in the status tags field. Creating an ImageStreamMapping will allow any user who can view the image stream to tag or pull that image, so only create mappings where the user has proven they have access to the image contents directly. The only operation supported for this resource is create and the metadata name and namespace should be set to the image stream containing the tag that should be updated.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"image": "Image is a container image.",
"tag": "Tag is a string value this image can be located with inside the stream.",
}
func (ImageStreamMapping) SwaggerDoc() map[string]string {
return map_ImageStreamMapping
}
var map_ImageStreamSpec = map[string]string{
"": "ImageStreamSpec represents options for ImageStreams.",
"lookupPolicy": "lookupPolicy controls how other resources reference images within this namespace.",
"dockerImageRepository": "dockerImageRepository is optional, if specified this stream is backed by a container repository on this server Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead.",
"tags": "tags map arbitrary string values to specific image locators",
}
func (ImageStreamSpec) SwaggerDoc() map[string]string {
return map_ImageStreamSpec
}
var map_ImageStreamStatus = map[string]string{
"": "ImageStreamStatus contains information about the state of this image stream.",
"dockerImageRepository": "DockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server determines where the repository is located",
"publicDockerImageRepository": "PublicDockerImageRepository represents the public location from where the image can be pulled outside the cluster. This field may be empty if the administrator has not exposed the integrated registry externally.",
"tags": "Tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.",
}
func (ImageStreamStatus) SwaggerDoc() map[string]string {
return map_ImageStreamStatus
}
var map_ImageStreamTag = map[string]string{
"": "ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. Use this resource to interact with the tags and images in an image stream by tag, or to see the image details for a particular tag. The image associated with this resource is the most recently successfully tagged, imported, or pushed image (as described in the image stream status.tags.items list for this tag). If an import is in progress or has failed the previous image will be shown. Deleting an image stream tag clears both the status and spec fields of an image stream. If no image can be retrieved for a given tag, a not found error will be returned.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"tag": "tag is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.",
"generation": "generation is the current generation of the tagged image - if tag is provided and this value is not equal to the tag generation, a user has requested an import that has not completed, or conditions will be filled out indicating any error.",
"lookupPolicy": "lookupPolicy indicates whether this tag will handle image references in this namespace.",
"conditions": "conditions is an array of conditions that apply to the image stream tag.",
"image": "image associated with the ImageStream and tag.",
}
func (ImageStreamTag) SwaggerDoc() map[string]string {
return map_ImageStreamTag
}
var map_ImageStreamTagList = map[string]string{
"": "ImageStreamTagList is a list of ImageStreamTag objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is the list of image stream tags",
}
func (ImageStreamTagList) SwaggerDoc() map[string]string {
return map_ImageStreamTagList
}
var map_ImageTag = map[string]string{
"": "ImageTag represents a single tag within an image stream and includes the spec, the status history, and the currently referenced image (if any) of the provided tag. This type replaces the ImageStreamTag by providing a full view of the tag. ImageTags are returned for every spec or status tag present on the image stream. If no tag exists in either form a not found error will be returned by the API. A create operation will succeed if no spec tag has already been defined and the spec field is set. Delete will remove both spec and status elements from the image stream.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"spec": "spec is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.",
"status": "status is the status tag details associated with this image stream tag, and it may be null if no push or import has been performed.",
"image": "image is the details of the most recent image stream status tag, and it may be null if import has not completed or an administrator has deleted the image object. To verify this is the most recent image, you must verify the generation of the most recent status.items entry matches the spec tag (if a spec tag is set). This field will not be set when listing image tags.",
}
func (ImageTag) SwaggerDoc() map[string]string {
return map_ImageTag
}
var map_ImageTagList = map[string]string{
"": "ImageTagList is a list of ImageTag objects. When listing image tags, the image field is not populated. Tags are returned in alphabetical order by image stream and then tag.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is the list of image stream tags",
}
func (ImageTagList) SwaggerDoc() map[string]string {
return map_ImageTagList
}
var map_NamedTagEventList = map[string]string{
"": "NamedTagEventList relates a tag to its image history.",
"tag": "Tag is the tag for which the history is recorded",
"items": "Standard object's metadata.",
"conditions": "Conditions is an array of conditions that apply to the tag event list.",
}
func (NamedTagEventList) SwaggerDoc() map[string]string {
return map_NamedTagEventList
}
var map_RepositoryImportSpec = map[string]string{
"": "RepositoryImportSpec describes a request to import images from a container image repository.",
"from": "From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed",
"importPolicy": "ImportPolicy is the policy controlling how the image is imported",
"referencePolicy": "ReferencePolicy defines how other components should consume the image",
"includeManifest": "IncludeManifest determines if the manifest for each image is returned in the response",
}
func (RepositoryImportSpec) SwaggerDoc() map[string]string {
return map_RepositoryImportSpec
}
var map_RepositoryImportStatus = map[string]string{
"": "RepositoryImportStatus describes the result of an image repository import",
"status": "Status reflects whether any failure occurred during import",
"images": "Images is a list of images successfully retrieved by the import of the repository.",
"additionalTags": "AdditionalTags are tags that exist in the repository but were not imported because a maximum limit of automatic imports was applied.",
}
func (RepositoryImportStatus) SwaggerDoc() map[string]string {
return map_RepositoryImportStatus
}
var map_SignatureCondition = map[string]string{
"": "SignatureCondition describes an image signature condition of particular kind at particular probe time.",
"type": "Type of signature condition, Complete or Failed.",
"status": "Status of the condition, one of True, False, Unknown.",
"lastProbeTime": "Last time the condition was checked.",
"lastTransitionTime": "Last time the condition transit from one status to another.",
"reason": "(brief) reason for the condition's last transition.",
"message": "Human readable message indicating details about last transition.",
}
func (SignatureCondition) SwaggerDoc() map[string]string {
return map_SignatureCondition
}
var map_SignatureGenericEntity = map[string]string{
"": "SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject of signing certificate or key.",
"organization": "Organization name.",
"commonName": "Common name (e.g. openshift-signing-service).",
}
func (SignatureGenericEntity) SwaggerDoc() map[string]string {
return map_SignatureGenericEntity
}
var map_SignatureIssuer = map[string]string{
"": "SignatureIssuer holds information about an issuer of signing certificate or key.",
}
func (SignatureIssuer) SwaggerDoc() map[string]string {
return map_SignatureIssuer
}
var map_SignatureSubject = map[string]string{
"": "SignatureSubject holds information about a person or entity who created the signature.",
"publicKeyID": "If present, it is a human readable key id of public key belonging to the subject used to verify image signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. 0x685ebe62bf278440).",
}
func (SignatureSubject) SwaggerDoc() map[string]string {
return map_SignatureSubject
}
var map_TagEvent = map[string]string{
"": "TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag.",
"created": "Created holds the time the TagEvent was created",
"dockerImageReference": "DockerImageReference is the string that can be used to pull this image",
"image": "Image is the image",
"generation": "Generation is the spec tag generation that resulted in this tag being updated",
}
func (TagEvent) SwaggerDoc() map[string]string {
return map_TagEvent
}
var map_TagEventCondition = map[string]string{
"": "TagEventCondition contains condition information for a tag event.",
"type": "Type of tag event condition, currently only ImportSuccess",
"status": "Status of the condition, one of True, False, Unknown.",
"lastTransitionTime": "LastTransitionTIme is the time the condition transitioned from one status to another.",
"reason": "Reason is a brief machine readable explanation for the condition's last transition.",
"message": "Message is a human readable description of the details about last transition, complementing reason.",
"generation": "Generation is the spec tag generation that this status corresponds to",
}
func (TagEventCondition) SwaggerDoc() map[string]string {
return map_TagEventCondition
}
var map_TagImportPolicy = map[string]string{
"": "TagImportPolicy controls how images related to this tag will be imported.",
"insecure": "Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.",
"scheduled": "Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported",
"importMode": "ImportMode describes how to import an image manifest.",
}
func (TagImportPolicy) SwaggerDoc() map[string]string {
return map_TagImportPolicy
}
var map_TagReference = map[string]string{
"": "TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track.",
"name": "Name of the tag",
"annotations": "Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags.",
"from": "Optional; if specified, a reference to another image that this tag should point to. Valid values are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references can only reference a tag within this same ImageStream.",
"reference": "Reference states if the tag will be imported. Default value is false, which means the tag will be imported.",
"generation": "Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference is changed the generation is set to match the current stream generation (which is incremented every time spec is changed). Other processes in the system like the image importer observe that the generation of spec tag is newer than the generation recorded in the status and use that as a trigger to import the newest remote tag. To trigger a new import, clients may set this value to zero which will reset the generation to the latest stream generation. Legacy clients will send this value as nil which will be merged with the current tag generation.",
"importPolicy": "ImportPolicy is information that controls how images may be imported by the server.",
"referencePolicy": "ReferencePolicy defines how other components should consume the image.",
}
func (TagReference) SwaggerDoc() map[string]string {
return map_TagReference
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/v1/doc.go | vendor/github.com/openshift/api/image/v1/doc.go | // +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/origin/pkg/image/apis/image
// +k8s:defaulter-gen=TypeMeta
// +k8s:openapi-gen=true
// +groupName=image.openshift.io
// Package v1 is the v1 version of the API.
package v1
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/v1/generated.pb.go | vendor/github.com/openshift/api/image/v1/generated.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/openshift/api/image/v1/generated.proto
package v1
import (
fmt "fmt"
io "io"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v11 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *DockerImageReference) Reset() { *m = DockerImageReference{} }
func (*DockerImageReference) ProtoMessage() {}
func (*DockerImageReference) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{0}
}
func (m *DockerImageReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DockerImageReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *DockerImageReference) XXX_Merge(src proto.Message) {
xxx_messageInfo_DockerImageReference.Merge(m, src)
}
func (m *DockerImageReference) XXX_Size() int {
return m.Size()
}
func (m *DockerImageReference) XXX_DiscardUnknown() {
xxx_messageInfo_DockerImageReference.DiscardUnknown(m)
}
var xxx_messageInfo_DockerImageReference proto.InternalMessageInfo
func (m *Image) Reset() { *m = Image{} }
func (*Image) ProtoMessage() {}
func (*Image) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{1}
}
func (m *Image) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Image) XXX_Merge(src proto.Message) {
xxx_messageInfo_Image.Merge(m, src)
}
func (m *Image) XXX_Size() int {
return m.Size()
}
func (m *Image) XXX_DiscardUnknown() {
xxx_messageInfo_Image.DiscardUnknown(m)
}
var xxx_messageInfo_Image proto.InternalMessageInfo
func (m *ImageBlobReferences) Reset() { *m = ImageBlobReferences{} }
func (*ImageBlobReferences) ProtoMessage() {}
func (*ImageBlobReferences) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{2}
}
func (m *ImageBlobReferences) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageBlobReferences) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageBlobReferences) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageBlobReferences.Merge(m, src)
}
func (m *ImageBlobReferences) XXX_Size() int {
return m.Size()
}
func (m *ImageBlobReferences) XXX_DiscardUnknown() {
xxx_messageInfo_ImageBlobReferences.DiscardUnknown(m)
}
var xxx_messageInfo_ImageBlobReferences proto.InternalMessageInfo
func (m *ImageImportSpec) Reset() { *m = ImageImportSpec{} }
func (*ImageImportSpec) ProtoMessage() {}
func (*ImageImportSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{3}
}
func (m *ImageImportSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageImportSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageImportSpec.Merge(m, src)
}
func (m *ImageImportSpec) XXX_Size() int {
return m.Size()
}
func (m *ImageImportSpec) XXX_DiscardUnknown() {
xxx_messageInfo_ImageImportSpec.DiscardUnknown(m)
}
var xxx_messageInfo_ImageImportSpec proto.InternalMessageInfo
func (m *ImageImportStatus) Reset() { *m = ImageImportStatus{} }
func (*ImageImportStatus) ProtoMessage() {}
func (*ImageImportStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{4}
}
func (m *ImageImportStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageImportStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageImportStatus.Merge(m, src)
}
func (m *ImageImportStatus) XXX_Size() int {
return m.Size()
}
func (m *ImageImportStatus) XXX_DiscardUnknown() {
xxx_messageInfo_ImageImportStatus.DiscardUnknown(m)
}
var xxx_messageInfo_ImageImportStatus proto.InternalMessageInfo
func (m *ImageLayer) Reset() { *m = ImageLayer{} }
func (*ImageLayer) ProtoMessage() {}
func (*ImageLayer) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{5}
}
func (m *ImageLayer) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageLayer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageLayer) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageLayer.Merge(m, src)
}
func (m *ImageLayer) XXX_Size() int {
return m.Size()
}
func (m *ImageLayer) XXX_DiscardUnknown() {
xxx_messageInfo_ImageLayer.DiscardUnknown(m)
}
var xxx_messageInfo_ImageLayer proto.InternalMessageInfo
func (m *ImageLayerData) Reset() { *m = ImageLayerData{} }
func (*ImageLayerData) ProtoMessage() {}
func (*ImageLayerData) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{6}
}
func (m *ImageLayerData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageLayerData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageLayerData) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageLayerData.Merge(m, src)
}
func (m *ImageLayerData) XXX_Size() int {
return m.Size()
}
func (m *ImageLayerData) XXX_DiscardUnknown() {
xxx_messageInfo_ImageLayerData.DiscardUnknown(m)
}
var xxx_messageInfo_ImageLayerData proto.InternalMessageInfo
func (m *ImageList) Reset() { *m = ImageList{} }
func (*ImageList) ProtoMessage() {}
func (*ImageList) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{7}
}
func (m *ImageList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageList) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageList.Merge(m, src)
}
func (m *ImageList) XXX_Size() int {
return m.Size()
}
func (m *ImageList) XXX_DiscardUnknown() {
xxx_messageInfo_ImageList.DiscardUnknown(m)
}
var xxx_messageInfo_ImageList proto.InternalMessageInfo
func (m *ImageLookupPolicy) Reset() { *m = ImageLookupPolicy{} }
func (*ImageLookupPolicy) ProtoMessage() {}
func (*ImageLookupPolicy) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{8}
}
func (m *ImageLookupPolicy) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageLookupPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageLookupPolicy) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageLookupPolicy.Merge(m, src)
}
func (m *ImageLookupPolicy) XXX_Size() int {
return m.Size()
}
func (m *ImageLookupPolicy) XXX_DiscardUnknown() {
xxx_messageInfo_ImageLookupPolicy.DiscardUnknown(m)
}
var xxx_messageInfo_ImageLookupPolicy proto.InternalMessageInfo
func (m *ImageManifest) Reset() { *m = ImageManifest{} }
func (*ImageManifest) ProtoMessage() {}
func (*ImageManifest) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{9}
}
func (m *ImageManifest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageManifest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageManifest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageManifest.Merge(m, src)
}
func (m *ImageManifest) XXX_Size() int {
return m.Size()
}
func (m *ImageManifest) XXX_DiscardUnknown() {
xxx_messageInfo_ImageManifest.DiscardUnknown(m)
}
var xxx_messageInfo_ImageManifest proto.InternalMessageInfo
func (m *ImageSignature) Reset() { *m = ImageSignature{} }
func (*ImageSignature) ProtoMessage() {}
func (*ImageSignature) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{10}
}
func (m *ImageSignature) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageSignature) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageSignature.Merge(m, src)
}
func (m *ImageSignature) XXX_Size() int {
return m.Size()
}
func (m *ImageSignature) XXX_DiscardUnknown() {
xxx_messageInfo_ImageSignature.DiscardUnknown(m)
}
var xxx_messageInfo_ImageSignature proto.InternalMessageInfo
func (m *ImageStream) Reset() { *m = ImageStream{} }
func (*ImageStream) ProtoMessage() {}
func (*ImageStream) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{11}
}
func (m *ImageStream) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageStream) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageStream.Merge(m, src)
}
func (m *ImageStream) XXX_Size() int {
return m.Size()
}
func (m *ImageStream) XXX_DiscardUnknown() {
xxx_messageInfo_ImageStream.DiscardUnknown(m)
}
var xxx_messageInfo_ImageStream proto.InternalMessageInfo
func (m *ImageStreamImage) Reset() { *m = ImageStreamImage{} }
func (*ImageStreamImage) ProtoMessage() {}
func (*ImageStreamImage) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{12}
}
func (m *ImageStreamImage) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageStreamImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageStreamImage) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageStreamImage.Merge(m, src)
}
func (m *ImageStreamImage) XXX_Size() int {
return m.Size()
}
func (m *ImageStreamImage) XXX_DiscardUnknown() {
xxx_messageInfo_ImageStreamImage.DiscardUnknown(m)
}
var xxx_messageInfo_ImageStreamImage proto.InternalMessageInfo
func (m *ImageStreamImport) Reset() { *m = ImageStreamImport{} }
func (*ImageStreamImport) ProtoMessage() {}
func (*ImageStreamImport) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{13}
}
func (m *ImageStreamImport) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageStreamImport) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageStreamImport) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageStreamImport.Merge(m, src)
}
func (m *ImageStreamImport) XXX_Size() int {
return m.Size()
}
func (m *ImageStreamImport) XXX_DiscardUnknown() {
xxx_messageInfo_ImageStreamImport.DiscardUnknown(m)
}
var xxx_messageInfo_ImageStreamImport proto.InternalMessageInfo
func (m *ImageStreamImportSpec) Reset() { *m = ImageStreamImportSpec{} }
func (*ImageStreamImportSpec) ProtoMessage() {}
func (*ImageStreamImportSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{14}
}
func (m *ImageStreamImportSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageStreamImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageStreamImportSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageStreamImportSpec.Merge(m, src)
}
func (m *ImageStreamImportSpec) XXX_Size() int {
return m.Size()
}
func (m *ImageStreamImportSpec) XXX_DiscardUnknown() {
xxx_messageInfo_ImageStreamImportSpec.DiscardUnknown(m)
}
var xxx_messageInfo_ImageStreamImportSpec proto.InternalMessageInfo
func (m *ImageStreamImportStatus) Reset() { *m = ImageStreamImportStatus{} }
func (*ImageStreamImportStatus) ProtoMessage() {}
func (*ImageStreamImportStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{15}
}
func (m *ImageStreamImportStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageStreamImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageStreamImportStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageStreamImportStatus.Merge(m, src)
}
func (m *ImageStreamImportStatus) XXX_Size() int {
return m.Size()
}
func (m *ImageStreamImportStatus) XXX_DiscardUnknown() {
xxx_messageInfo_ImageStreamImportStatus.DiscardUnknown(m)
}
var xxx_messageInfo_ImageStreamImportStatus proto.InternalMessageInfo
func (m *ImageStreamLayers) Reset() { *m = ImageStreamLayers{} }
func (*ImageStreamLayers) ProtoMessage() {}
func (*ImageStreamLayers) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{16}
}
func (m *ImageStreamLayers) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageStreamLayers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageStreamLayers) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageStreamLayers.Merge(m, src)
}
func (m *ImageStreamLayers) XXX_Size() int {
return m.Size()
}
func (m *ImageStreamLayers) XXX_DiscardUnknown() {
xxx_messageInfo_ImageStreamLayers.DiscardUnknown(m)
}
var xxx_messageInfo_ImageStreamLayers proto.InternalMessageInfo
func (m *ImageStreamList) Reset() { *m = ImageStreamList{} }
func (*ImageStreamList) ProtoMessage() {}
func (*ImageStreamList) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{17}
}
func (m *ImageStreamList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageStreamList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageStreamList) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageStreamList.Merge(m, src)
}
func (m *ImageStreamList) XXX_Size() int {
return m.Size()
}
func (m *ImageStreamList) XXX_DiscardUnknown() {
xxx_messageInfo_ImageStreamList.DiscardUnknown(m)
}
var xxx_messageInfo_ImageStreamList proto.InternalMessageInfo
func (m *ImageStreamMapping) Reset() { *m = ImageStreamMapping{} }
func (*ImageStreamMapping) ProtoMessage() {}
func (*ImageStreamMapping) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{18}
}
func (m *ImageStreamMapping) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageStreamMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageStreamMapping) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageStreamMapping.Merge(m, src)
}
func (m *ImageStreamMapping) XXX_Size() int {
return m.Size()
}
func (m *ImageStreamMapping) XXX_DiscardUnknown() {
xxx_messageInfo_ImageStreamMapping.DiscardUnknown(m)
}
var xxx_messageInfo_ImageStreamMapping proto.InternalMessageInfo
func (m *ImageStreamSpec) Reset() { *m = ImageStreamSpec{} }
func (*ImageStreamSpec) ProtoMessage() {}
func (*ImageStreamSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{19}
}
func (m *ImageStreamSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageStreamSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageStreamSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageStreamSpec.Merge(m, src)
}
func (m *ImageStreamSpec) XXX_Size() int {
return m.Size()
}
func (m *ImageStreamSpec) XXX_DiscardUnknown() {
xxx_messageInfo_ImageStreamSpec.DiscardUnknown(m)
}
var xxx_messageInfo_ImageStreamSpec proto.InternalMessageInfo
func (m *ImageStreamStatus) Reset() { *m = ImageStreamStatus{} }
func (*ImageStreamStatus) ProtoMessage() {}
func (*ImageStreamStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{20}
}
func (m *ImageStreamStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageStreamStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageStreamStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageStreamStatus.Merge(m, src)
}
func (m *ImageStreamStatus) XXX_Size() int {
return m.Size()
}
func (m *ImageStreamStatus) XXX_DiscardUnknown() {
xxx_messageInfo_ImageStreamStatus.DiscardUnknown(m)
}
var xxx_messageInfo_ImageStreamStatus proto.InternalMessageInfo
func (m *ImageStreamTag) Reset() { *m = ImageStreamTag{} }
func (*ImageStreamTag) ProtoMessage() {}
func (*ImageStreamTag) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{21}
}
func (m *ImageStreamTag) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageStreamTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageStreamTag) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageStreamTag.Merge(m, src)
}
func (m *ImageStreamTag) XXX_Size() int {
return m.Size()
}
func (m *ImageStreamTag) XXX_DiscardUnknown() {
xxx_messageInfo_ImageStreamTag.DiscardUnknown(m)
}
var xxx_messageInfo_ImageStreamTag proto.InternalMessageInfo
func (m *ImageStreamTagList) Reset() { *m = ImageStreamTagList{} }
func (*ImageStreamTagList) ProtoMessage() {}
func (*ImageStreamTagList) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{22}
}
func (m *ImageStreamTagList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageStreamTagList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageStreamTagList) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageStreamTagList.Merge(m, src)
}
func (m *ImageStreamTagList) XXX_Size() int {
return m.Size()
}
func (m *ImageStreamTagList) XXX_DiscardUnknown() {
xxx_messageInfo_ImageStreamTagList.DiscardUnknown(m)
}
var xxx_messageInfo_ImageStreamTagList proto.InternalMessageInfo
func (m *ImageTag) Reset() { *m = ImageTag{} }
func (*ImageTag) ProtoMessage() {}
func (*ImageTag) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{23}
}
func (m *ImageTag) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageTag) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageTag.Merge(m, src)
}
func (m *ImageTag) XXX_Size() int {
return m.Size()
}
func (m *ImageTag) XXX_DiscardUnknown() {
xxx_messageInfo_ImageTag.DiscardUnknown(m)
}
var xxx_messageInfo_ImageTag proto.InternalMessageInfo
func (m *ImageTagList) Reset() { *m = ImageTagList{} }
func (*ImageTagList) ProtoMessage() {}
func (*ImageTagList) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{24}
}
func (m *ImageTagList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImageTagList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ImageTagList) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImageTagList.Merge(m, src)
}
func (m *ImageTagList) XXX_Size() int {
return m.Size()
}
func (m *ImageTagList) XXX_DiscardUnknown() {
xxx_messageInfo_ImageTagList.DiscardUnknown(m)
}
var xxx_messageInfo_ImageTagList proto.InternalMessageInfo
func (m *NamedTagEventList) Reset() { *m = NamedTagEventList{} }
func (*NamedTagEventList) ProtoMessage() {}
func (*NamedTagEventList) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{25}
}
func (m *NamedTagEventList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NamedTagEventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NamedTagEventList) XXX_Merge(src proto.Message) {
xxx_messageInfo_NamedTagEventList.Merge(m, src)
}
func (m *NamedTagEventList) XXX_Size() int {
return m.Size()
}
func (m *NamedTagEventList) XXX_DiscardUnknown() {
xxx_messageInfo_NamedTagEventList.DiscardUnknown(m)
}
var xxx_messageInfo_NamedTagEventList proto.InternalMessageInfo
func (m *RepositoryImportSpec) Reset() { *m = RepositoryImportSpec{} }
func (*RepositoryImportSpec) ProtoMessage() {}
func (*RepositoryImportSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{26}
}
func (m *RepositoryImportSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RepositoryImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RepositoryImportSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_RepositoryImportSpec.Merge(m, src)
}
func (m *RepositoryImportSpec) XXX_Size() int {
return m.Size()
}
func (m *RepositoryImportSpec) XXX_DiscardUnknown() {
xxx_messageInfo_RepositoryImportSpec.DiscardUnknown(m)
}
var xxx_messageInfo_RepositoryImportSpec proto.InternalMessageInfo
func (m *RepositoryImportStatus) Reset() { *m = RepositoryImportStatus{} }
func (*RepositoryImportStatus) ProtoMessage() {}
func (*RepositoryImportStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{27}
}
func (m *RepositoryImportStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RepositoryImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RepositoryImportStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_RepositoryImportStatus.Merge(m, src)
}
func (m *RepositoryImportStatus) XXX_Size() int {
return m.Size()
}
func (m *RepositoryImportStatus) XXX_DiscardUnknown() {
xxx_messageInfo_RepositoryImportStatus.DiscardUnknown(m)
}
var xxx_messageInfo_RepositoryImportStatus proto.InternalMessageInfo
func (m *SecretList) Reset() { *m = SecretList{} }
func (*SecretList) ProtoMessage() {}
func (*SecretList) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{28}
}
func (m *SecretList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SecretList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SecretList) XXX_Merge(src proto.Message) {
xxx_messageInfo_SecretList.Merge(m, src)
}
func (m *SecretList) XXX_Size() int {
return m.Size()
}
func (m *SecretList) XXX_DiscardUnknown() {
xxx_messageInfo_SecretList.DiscardUnknown(m)
}
var xxx_messageInfo_SecretList proto.InternalMessageInfo
func (m *SignatureCondition) Reset() { *m = SignatureCondition{} }
func (*SignatureCondition) ProtoMessage() {}
func (*SignatureCondition) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{29}
}
func (m *SignatureCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SignatureCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SignatureCondition) XXX_Merge(src proto.Message) {
xxx_messageInfo_SignatureCondition.Merge(m, src)
}
func (m *SignatureCondition) XXX_Size() int {
return m.Size()
}
func (m *SignatureCondition) XXX_DiscardUnknown() {
xxx_messageInfo_SignatureCondition.DiscardUnknown(m)
}
var xxx_messageInfo_SignatureCondition proto.InternalMessageInfo
func (m *SignatureGenericEntity) Reset() { *m = SignatureGenericEntity{} }
func (*SignatureGenericEntity) ProtoMessage() {}
func (*SignatureGenericEntity) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{30}
}
func (m *SignatureGenericEntity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SignatureGenericEntity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SignatureGenericEntity) XXX_Merge(src proto.Message) {
xxx_messageInfo_SignatureGenericEntity.Merge(m, src)
}
func (m *SignatureGenericEntity) XXX_Size() int {
return m.Size()
}
func (m *SignatureGenericEntity) XXX_DiscardUnknown() {
xxx_messageInfo_SignatureGenericEntity.DiscardUnknown(m)
}
var xxx_messageInfo_SignatureGenericEntity proto.InternalMessageInfo
func (m *SignatureIssuer) Reset() { *m = SignatureIssuer{} }
func (*SignatureIssuer) ProtoMessage() {}
func (*SignatureIssuer) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{31}
}
func (m *SignatureIssuer) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SignatureIssuer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SignatureIssuer) XXX_Merge(src proto.Message) {
xxx_messageInfo_SignatureIssuer.Merge(m, src)
}
func (m *SignatureIssuer) XXX_Size() int {
return m.Size()
}
func (m *SignatureIssuer) XXX_DiscardUnknown() {
xxx_messageInfo_SignatureIssuer.DiscardUnknown(m)
}
var xxx_messageInfo_SignatureIssuer proto.InternalMessageInfo
func (m *SignatureSubject) Reset() { *m = SignatureSubject{} }
func (*SignatureSubject) ProtoMessage() {}
func (*SignatureSubject) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{32}
}
func (m *SignatureSubject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SignatureSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SignatureSubject) XXX_Merge(src proto.Message) {
xxx_messageInfo_SignatureSubject.Merge(m, src)
}
func (m *SignatureSubject) XXX_Size() int {
return m.Size()
}
func (m *SignatureSubject) XXX_DiscardUnknown() {
xxx_messageInfo_SignatureSubject.DiscardUnknown(m)
}
var xxx_messageInfo_SignatureSubject proto.InternalMessageInfo
func (m *TagEvent) Reset() { *m = TagEvent{} }
func (*TagEvent) ProtoMessage() {}
func (*TagEvent) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{33}
}
func (m *TagEvent) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TagEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *TagEvent) XXX_Merge(src proto.Message) {
xxx_messageInfo_TagEvent.Merge(m, src)
}
func (m *TagEvent) XXX_Size() int {
return m.Size()
}
func (m *TagEvent) XXX_DiscardUnknown() {
xxx_messageInfo_TagEvent.DiscardUnknown(m)
}
var xxx_messageInfo_TagEvent proto.InternalMessageInfo
func (m *TagEventCondition) Reset() { *m = TagEventCondition{} }
func (*TagEventCondition) ProtoMessage() {}
func (*TagEventCondition) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{34}
}
func (m *TagEventCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TagEventCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *TagEventCondition) XXX_Merge(src proto.Message) {
xxx_messageInfo_TagEventCondition.Merge(m, src)
}
func (m *TagEventCondition) XXX_Size() int {
return m.Size()
}
func (m *TagEventCondition) XXX_DiscardUnknown() {
xxx_messageInfo_TagEventCondition.DiscardUnknown(m)
}
var xxx_messageInfo_TagEventCondition proto.InternalMessageInfo
func (m *TagImportPolicy) Reset() { *m = TagImportPolicy{} }
func (*TagImportPolicy) ProtoMessage() {}
func (*TagImportPolicy) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{35}
}
func (m *TagImportPolicy) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TagImportPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *TagImportPolicy) XXX_Merge(src proto.Message) {
xxx_messageInfo_TagImportPolicy.Merge(m, src)
}
func (m *TagImportPolicy) XXX_Size() int {
return m.Size()
}
func (m *TagImportPolicy) XXX_DiscardUnknown() {
xxx_messageInfo_TagImportPolicy.DiscardUnknown(m)
}
var xxx_messageInfo_TagImportPolicy proto.InternalMessageInfo
func (m *TagReference) Reset() { *m = TagReference{} }
func (*TagReference) ProtoMessage() {}
func (*TagReference) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{36}
}
func (m *TagReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TagReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *TagReference) XXX_Merge(src proto.Message) {
xxx_messageInfo_TagReference.Merge(m, src)
}
func (m *TagReference) XXX_Size() int {
return m.Size()
}
func (m *TagReference) XXX_DiscardUnknown() {
xxx_messageInfo_TagReference.DiscardUnknown(m)
}
var xxx_messageInfo_TagReference proto.InternalMessageInfo
func (m *TagReferencePolicy) Reset() { *m = TagReferencePolicy{} }
func (*TagReferencePolicy) ProtoMessage() {}
func (*TagReferencePolicy) Descriptor() ([]byte, []int) {
return fileDescriptor_650a0b34f65fde60, []int{37}
}
func (m *TagReferencePolicy) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TagReferencePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *TagReferencePolicy) XXX_Merge(src proto.Message) {
xxx_messageInfo_TagReferencePolicy.Merge(m, src)
}
func (m *TagReferencePolicy) XXX_Size() int {
return m.Size()
}
func (m *TagReferencePolicy) XXX_DiscardUnknown() {
xxx_messageInfo_TagReferencePolicy.DiscardUnknown(m)
}
var xxx_messageInfo_TagReferencePolicy proto.InternalMessageInfo
func init() {
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/v1/legacy.go | vendor/github.com/openshift/api/image/v1/legacy.go | package v1
import (
"github.com/openshift/api/image/docker10"
"github.com/openshift/api/image/dockerpre012"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, docker10.AddToSchemeInCoreGroup, dockerpre012.AddToSchemeInCoreGroup, corev1.AddToScheme)
DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
)
// Adds the list of known types to api.Scheme.
func addLegacyKnownTypes(scheme *runtime.Scheme) error {
types := []runtime.Object{
&Image{},
&ImageList{},
&ImageSignature{},
&ImageStream{},
&ImageStreamList{},
&ImageStreamMapping{},
&ImageStreamTag{},
&ImageStreamTagList{},
&ImageStreamImage{},
&ImageStreamImport{},
}
scheme.AddKnownTypes(legacyGroupVersion, types...)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go | vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go | //go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package docker10
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DockerConfig) DeepCopyInto(out *DockerConfig) {
*out = *in
if in.PortSpecs != nil {
in, out := &in.PortSpecs, &out.PortSpecs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ExposedPorts != nil {
in, out := &in.ExposedPorts, &out.ExposedPorts
*out = make(map[string]struct{}, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Cmd != nil {
in, out := &in.Cmd, &out.Cmd
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DNS != nil {
in, out := &in.DNS, &out.DNS
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make(map[string]struct{}, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Entrypoint != nil {
in, out := &in.Entrypoint, &out.Entrypoint
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecurityOpts != nil {
in, out := &in.SecurityOpts, &out.SecurityOpts
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.OnBuild != nil {
in, out := &in.OnBuild, &out.OnBuild
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig.
func (in *DockerConfig) DeepCopy() *DockerConfig {
if in == nil {
return nil
}
out := new(DockerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DockerImage) DeepCopyInto(out *DockerImage) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Created.DeepCopyInto(&out.Created)
in.ContainerConfig.DeepCopyInto(&out.ContainerConfig)
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(DockerConfig)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage.
func (in *DockerImage) DeepCopy() *DockerImage {
if in == nil {
return nil
}
out := new(DockerImage)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DockerImage) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/docker10/register.go | vendor/github.com/openshift/api/image/docker10/register.go | package docker10
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const (
GroupName = "image.openshift.io"
LegacyGroupName = ""
)
// SchemeGroupVersion is group version used to register these objects
var (
GroupVersion = schema.GroupVersion{Group: GroupName, Version: "1.0"}
LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "1.0"}
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes)
AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme
// Install is a function which adds this version to a scheme
Install = SchemeBuilder.AddToScheme
// SchemeGroupVersion generated code relies on this name
// Deprecated
SchemeGroupVersion = GroupVersion
// AddToScheme exists solely to keep the old generators creating valid code
// DEPRECATED
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&DockerImage{},
)
return nil
}
func addLegacyKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(LegacySchemeGroupVersion,
&DockerImage{},
)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go | vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go | package docker10
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE
var map_DockerConfig = map[string]string{
"": "DockerConfig is the list of configuration options used when creating a container.",
}
func (DockerConfig) SwaggerDoc() map[string]string {
return map_DockerConfig
}
var map_DockerImage = map[string]string{
"": "DockerImage is the type representing a container image and its various properties when retrieved from the Docker client API.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
}
func (DockerImage) SwaggerDoc() map[string]string {
return map_DockerImage
}
// AUTO-GENERATED FUNCTIONS END HERE
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/docker10/doc.go | vendor/github.com/openshift/api/image/docker10/doc.go | // +k8s:deepcopy-gen=package,register
// Package docker10 is the docker10 version of the API.
package docker10
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/image/docker10/types_docker.go | vendor/github.com/openshift/api/image/docker10/types_docker.go | package docker10
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DockerImage is the type representing a container image and its various properties when
// retrieved from the Docker client API.
//
// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
// +openshift:compatibility-gen:level=4
// +openshift:compatibility-gen:internal
type DockerImage struct {
metav1.TypeMeta `json:",inline"`
ID string `json:"Id"`
Parent string `json:"Parent,omitempty"`
Comment string `json:"Comment,omitempty"`
Created metav1.Time `json:"Created,omitempty"`
Container string `json:"Container,omitempty"`
ContainerConfig DockerConfig `json:"ContainerConfig,omitempty"`
DockerVersion string `json:"DockerVersion,omitempty"`
Author string `json:"Author,omitempty"`
Config *DockerConfig `json:"Config,omitempty"`
Architecture string `json:"Architecture,omitempty"`
Size int64 `json:"Size,omitempty"`
}
// DockerConfig is the list of configuration options used when creating a container.
type DockerConfig struct {
Hostname string `json:"Hostname,omitempty"`
Domainname string `json:"Domainname,omitempty"`
User string `json:"User,omitempty"`
Memory int64 `json:"Memory,omitempty"`
MemorySwap int64 `json:"MemorySwap,omitempty"`
CPUShares int64 `json:"CpuShares,omitempty"`
CPUSet string `json:"Cpuset,omitempty"`
AttachStdin bool `json:"AttachStdin,omitempty"`
AttachStdout bool `json:"AttachStdout,omitempty"`
AttachStderr bool `json:"AttachStderr,omitempty"`
PortSpecs []string `json:"PortSpecs,omitempty"`
ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"`
Tty bool `json:"Tty,omitempty"`
OpenStdin bool `json:"OpenStdin,omitempty"`
StdinOnce bool `json:"StdinOnce,omitempty"`
Env []string `json:"Env,omitempty"`
Cmd []string `json:"Cmd,omitempty"`
DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only
Image string `json:"Image,omitempty"`
Volumes map[string]struct{} `json:"Volumes,omitempty"`
VolumesFrom string `json:"VolumesFrom,omitempty"`
WorkingDir string `json:"WorkingDir,omitempty"`
Entrypoint []string `json:"Entrypoint,omitempty"`
NetworkDisabled bool `json:"NetworkDisabled,omitempty"`
SecurityOpts []string `json:"SecurityOpts,omitempty"`
OnBuild []string `json:"OnBuild,omitempty"`
Labels map[string]string `json:"Labels,omitempty"`
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go | vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go | //go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Group) DeepCopyInto(out *Group) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Users != nil {
in, out := &in.Users, &out.Users
*out = make(OptionalNames, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Group.
func (in *Group) DeepCopy() *Group {
if in == nil {
return nil
}
out := new(Group)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Group) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GroupList) DeepCopyInto(out *GroupList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Group, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupList.
func (in *GroupList) DeepCopy() *GroupList {
if in == nil {
return nil
}
out := new(GroupList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *GroupList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Identity) DeepCopyInto(out *Identity) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.User = in.User
if in.Extra != nil {
in, out := &in.Extra, &out.Extra
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Identity.
func (in *Identity) DeepCopy() *Identity {
if in == nil {
return nil
}
out := new(Identity)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Identity) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IdentityList) DeepCopyInto(out *IdentityList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Identity, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityList.
func (in *IdentityList) DeepCopy() *IdentityList {
if in == nil {
return nil
}
out := new(IdentityList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IdentityList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in OptionalNames) DeepCopyInto(out *OptionalNames) {
{
in := &in
*out = make(OptionalNames, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNames.
func (in OptionalNames) DeepCopy() OptionalNames {
if in == nil {
return nil
}
out := new(OptionalNames)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *User) DeepCopyInto(out *User) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Identities != nil {
in, out := &in.Identities, &out.Identities
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User.
func (in *User) DeepCopy() *User {
if in == nil {
return nil
}
out := new(User)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *User) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UserIdentityMapping) DeepCopyInto(out *UserIdentityMapping) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Identity = in.Identity
out.User = in.User
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserIdentityMapping.
func (in *UserIdentityMapping) DeepCopy() *UserIdentityMapping {
if in == nil {
return nil
}
out := new(UserIdentityMapping)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *UserIdentityMapping) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UserList) DeepCopyInto(out *UserList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]User, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList.
func (in *UserList) DeepCopy() *UserList {
if in == nil {
return nil
}
out := new(UserList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *UserList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/user/v1/types.go | vendor/github.com/openshift/api/user/v1/types.go | package v1
import (
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Upon log in, every user of the system receives a User and Identity resource. Administrators
// may directly manipulate the attributes of the users for their own tracking, or set groups
// via the API. The user name is unique and is chosen based on the value provided by the
// identity provider - if a user already exists with the incoming name, the user name may have
// a number appended to it depending on the configuration of the system.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type User struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// FullName is the full name of user
FullName string `json:"fullName,omitempty" protobuf:"bytes,2,opt,name=fullName"`
// Identities are the identities associated with this user
// +optional
Identities []string `json:"identities,omitempty" protobuf:"bytes,3,rep,name=identities"`
// Groups specifies group names this user is a member of.
// This field is deprecated and will be removed in a future release.
// Instead, create a Group object containing the name of this User.
Groups []string `json:"groups" protobuf:"bytes,4,rep,name=groups"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// UserList is a collection of Users
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type UserList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of users
Items []User `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Identity records a successful authentication of a user with an identity provider. The
// information about the source of authentication is stored on the identity, and the identity
// is then associated with a single user object. Multiple identities can reference a single
// user. Information retrieved from the authentication provider is stored in the extra field
// using a schema determined by the provider.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type Identity struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// ProviderName is the source of identity information
ProviderName string `json:"providerName" protobuf:"bytes,2,opt,name=providerName"`
// ProviderUserName uniquely represents this identity in the scope of the provider
ProviderUserName string `json:"providerUserName" protobuf:"bytes,3,opt,name=providerUserName"`
// User is a reference to the user this identity is associated with
// Both Name and UID must be set
User corev1.ObjectReference `json:"user" protobuf:"bytes,4,opt,name=user"`
// Extra holds extra information about this identity
Extra map[string]string `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IdentityList is a collection of Identities
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type IdentityList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of identities
Items []Identity `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:onlyVerbs=get,create,update,delete
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// UserIdentityMapping maps a user to an identity
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type UserIdentityMapping struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Identity is a reference to an identity
Identity corev1.ObjectReference `json:"identity,omitempty" protobuf:"bytes,2,opt,name=identity"`
// User is a reference to a user
User corev1.ObjectReference `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
}
// OptionalNames is an array that may also be left nil to distinguish between set and unset.
// +protobuf.nullable=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
type OptionalNames []string
func (t OptionalNames) String() string {
return fmt.Sprintf("%v", []string(t))
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Group represents a referenceable set of Users
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type Group struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Users is the list of users in this group.
Users OptionalNames `json:"users" protobuf:"bytes,2,rep,name=users"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// GroupList is a collection of Groups
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type GroupList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of groups
Items []Group `json:"items" protobuf:"bytes,2,rep,name=items"`
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/user/v1/register.go | vendor/github.com/openshift/api/user/v1/register.go | package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
GroupName = "user.openshift.io"
GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme)
// Install is a function which adds this version to a scheme
Install = schemeBuilder.AddToScheme
// SchemeGroupVersion generated code relies on this name
// Deprecated
SchemeGroupVersion = GroupVersion
// AddToScheme exists solely to keep the old generators creating valid code
// DEPRECATED
AddToScheme = schemeBuilder.AddToScheme
)
// Resource generated code relies on this being here, but it logically belongs to the group
// DEPRECATED
func Resource(resource string) schema.GroupResource {
return schema.GroupResource{Group: GroupName, Resource: resource}
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(GroupVersion,
&User{},
&UserList{},
&Identity{},
&IdentityList{},
&UserIdentityMapping{},
&Group{},
&GroupList{},
)
metav1.AddToGroupVersion(scheme, GroupVersion)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go | vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go | package v1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE
var map_Group = map[string]string{
"": "Group represents a referenceable set of Users\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"users": "Users is the list of users in this group.",
}
func (Group) SwaggerDoc() map[string]string {
return map_Group
}
var map_GroupList = map[string]string{
"": "GroupList is a collection of Groups\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is the list of groups",
}
func (GroupList) SwaggerDoc() map[string]string {
return map_GroupList
}
var map_Identity = map[string]string{
"": "Identity records a successful authentication of a user with an identity provider. The information about the source of authentication is stored on the identity, and the identity is then associated with a single user object. Multiple identities can reference a single user. Information retrieved from the authentication provider is stored in the extra field using a schema determined by the provider.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"providerName": "ProviderName is the source of identity information",
"providerUserName": "ProviderUserName uniquely represents this identity in the scope of the provider",
"user": "User is a reference to the user this identity is associated with Both Name and UID must be set",
"extra": "Extra holds extra information about this identity",
}
func (Identity) SwaggerDoc() map[string]string {
return map_Identity
}
var map_IdentityList = map[string]string{
"": "IdentityList is a collection of Identities\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is the list of identities",
}
func (IdentityList) SwaggerDoc() map[string]string {
return map_IdentityList
}
var map_User = map[string]string{
"": "Upon log in, every user of the system receives a User and Identity resource. Administrators may directly manipulate the attributes of the users for their own tracking, or set groups via the API. The user name is unique and is chosen based on the value provided by the identity provider - if a user already exists with the incoming name, the user name may have a number appended to it depending on the configuration of the system.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"fullName": "FullName is the full name of user",
"identities": "Identities are the identities associated with this user",
"groups": "Groups specifies group names this user is a member of. This field is deprecated and will be removed in a future release. Instead, create a Group object containing the name of this User.",
}
func (User) SwaggerDoc() map[string]string {
return map_User
}
var map_UserIdentityMapping = map[string]string{
"": "UserIdentityMapping maps a user to an identity\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"identity": "Identity is a reference to an identity",
"user": "User is a reference to a user",
}
func (UserIdentityMapping) SwaggerDoc() map[string]string {
return map_UserIdentityMapping
}
var map_UserList = map[string]string{
"": "UserList is a collection of Users\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is the list of users",
}
func (UserList) SwaggerDoc() map[string]string {
return map_UserList
}
// AUTO-GENERATED FUNCTIONS END HERE
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/user/v1/doc.go | vendor/github.com/openshift/api/user/v1/doc.go | // +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/origin/pkg/user/apis/user
// +k8s:defaulter-gen=TypeMeta
// +k8s:openapi-gen=true
// +groupName=user.openshift.io
// Package v1 is the v1 version of the API.
package v1
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/user/v1/generated.pb.go | vendor/github.com/openshift/api/user/v1/generated.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/openshift/api/user/v1/generated.proto
package v1
import (
fmt "fmt"
io "io"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *Group) Reset() { *m = Group{} }
func (*Group) ProtoMessage() {}
func (*Group) Descriptor() ([]byte, []int) {
return fileDescriptor_ea159b02d89a1362, []int{0}
}
func (m *Group) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Group) XXX_Merge(src proto.Message) {
xxx_messageInfo_Group.Merge(m, src)
}
func (m *Group) XXX_Size() int {
return m.Size()
}
func (m *Group) XXX_DiscardUnknown() {
xxx_messageInfo_Group.DiscardUnknown(m)
}
var xxx_messageInfo_Group proto.InternalMessageInfo
func (m *GroupList) Reset() { *m = GroupList{} }
func (*GroupList) ProtoMessage() {}
func (*GroupList) Descriptor() ([]byte, []int) {
return fileDescriptor_ea159b02d89a1362, []int{1}
}
func (m *GroupList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *GroupList) XXX_Merge(src proto.Message) {
xxx_messageInfo_GroupList.Merge(m, src)
}
func (m *GroupList) XXX_Size() int {
return m.Size()
}
func (m *GroupList) XXX_DiscardUnknown() {
xxx_messageInfo_GroupList.DiscardUnknown(m)
}
var xxx_messageInfo_GroupList proto.InternalMessageInfo
func (m *Identity) Reset() { *m = Identity{} }
func (*Identity) ProtoMessage() {}
func (*Identity) Descriptor() ([]byte, []int) {
return fileDescriptor_ea159b02d89a1362, []int{2}
}
func (m *Identity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Identity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Identity) XXX_Merge(src proto.Message) {
xxx_messageInfo_Identity.Merge(m, src)
}
func (m *Identity) XXX_Size() int {
return m.Size()
}
func (m *Identity) XXX_DiscardUnknown() {
xxx_messageInfo_Identity.DiscardUnknown(m)
}
var xxx_messageInfo_Identity proto.InternalMessageInfo
func (m *IdentityList) Reset() { *m = IdentityList{} }
func (*IdentityList) ProtoMessage() {}
func (*IdentityList) Descriptor() ([]byte, []int) {
return fileDescriptor_ea159b02d89a1362, []int{3}
}
func (m *IdentityList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *IdentityList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *IdentityList) XXX_Merge(src proto.Message) {
xxx_messageInfo_IdentityList.Merge(m, src)
}
func (m *IdentityList) XXX_Size() int {
return m.Size()
}
func (m *IdentityList) XXX_DiscardUnknown() {
xxx_messageInfo_IdentityList.DiscardUnknown(m)
}
var xxx_messageInfo_IdentityList proto.InternalMessageInfo
func (m *OptionalNames) Reset() { *m = OptionalNames{} }
func (*OptionalNames) ProtoMessage() {}
func (*OptionalNames) Descriptor() ([]byte, []int) {
return fileDescriptor_ea159b02d89a1362, []int{4}
}
func (m *OptionalNames) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *OptionalNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *OptionalNames) XXX_Merge(src proto.Message) {
xxx_messageInfo_OptionalNames.Merge(m, src)
}
func (m *OptionalNames) XXX_Size() int {
return m.Size()
}
func (m *OptionalNames) XXX_DiscardUnknown() {
xxx_messageInfo_OptionalNames.DiscardUnknown(m)
}
var xxx_messageInfo_OptionalNames proto.InternalMessageInfo
func (m *User) Reset() { *m = User{} }
func (*User) ProtoMessage() {}
func (*User) Descriptor() ([]byte, []int) {
return fileDescriptor_ea159b02d89a1362, []int{5}
}
func (m *User) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *User) XXX_Merge(src proto.Message) {
xxx_messageInfo_User.Merge(m, src)
}
func (m *User) XXX_Size() int {
return m.Size()
}
func (m *User) XXX_DiscardUnknown() {
xxx_messageInfo_User.DiscardUnknown(m)
}
var xxx_messageInfo_User proto.InternalMessageInfo
func (m *UserIdentityMapping) Reset() { *m = UserIdentityMapping{} }
func (*UserIdentityMapping) ProtoMessage() {}
func (*UserIdentityMapping) Descriptor() ([]byte, []int) {
return fileDescriptor_ea159b02d89a1362, []int{6}
}
func (m *UserIdentityMapping) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *UserIdentityMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *UserIdentityMapping) XXX_Merge(src proto.Message) {
xxx_messageInfo_UserIdentityMapping.Merge(m, src)
}
func (m *UserIdentityMapping) XXX_Size() int {
return m.Size()
}
func (m *UserIdentityMapping) XXX_DiscardUnknown() {
xxx_messageInfo_UserIdentityMapping.DiscardUnknown(m)
}
var xxx_messageInfo_UserIdentityMapping proto.InternalMessageInfo
func (m *UserList) Reset() { *m = UserList{} }
func (*UserList) ProtoMessage() {}
func (*UserList) Descriptor() ([]byte, []int) {
return fileDescriptor_ea159b02d89a1362, []int{7}
}
func (m *UserList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *UserList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *UserList) XXX_Merge(src proto.Message) {
xxx_messageInfo_UserList.Merge(m, src)
}
func (m *UserList) XXX_Size() int {
return m.Size()
}
func (m *UserList) XXX_DiscardUnknown() {
xxx_messageInfo_UserList.DiscardUnknown(m)
}
var xxx_messageInfo_UserList proto.InternalMessageInfo
func init() {
proto.RegisterType((*Group)(nil), "github.com.openshift.api.user.v1.Group")
proto.RegisterType((*GroupList)(nil), "github.com.openshift.api.user.v1.GroupList")
proto.RegisterType((*Identity)(nil), "github.com.openshift.api.user.v1.Identity")
proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.user.v1.Identity.ExtraEntry")
proto.RegisterType((*IdentityList)(nil), "github.com.openshift.api.user.v1.IdentityList")
proto.RegisterType((*OptionalNames)(nil), "github.com.openshift.api.user.v1.OptionalNames")
proto.RegisterType((*User)(nil), "github.com.openshift.api.user.v1.User")
proto.RegisterType((*UserIdentityMapping)(nil), "github.com.openshift.api.user.v1.UserIdentityMapping")
proto.RegisterType((*UserList)(nil), "github.com.openshift.api.user.v1.UserList")
}
func init() {
proto.RegisterFile("github.com/openshift/api/user/v1/generated.proto", fileDescriptor_ea159b02d89a1362)
}
var fileDescriptor_ea159b02d89a1362 = []byte{
// 726 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3d, 0x6f, 0x13, 0x4b,
0x14, 0xf5, 0xc4, 0xde, 0xc8, 0x9e, 0x38, 0x4f, 0xd6, 0xbe, 0x14, 0x2b, 0x17, 0x6b, 0x6b, 0x9f,
0xf4, 0x88, 0x10, 0xcc, 0x26, 0x11, 0x20, 0x2b, 0xa5, 0x45, 0x82, 0x22, 0x12, 0x12, 0x46, 0xa2,
0x89, 0x28, 0x98, 0xd8, 0xe3, 0xf5, 0x60, 0xef, 0x87, 0x76, 0x67, 0x2d, 0xdc, 0xe5, 0x27, 0x40,
0x47, 0xc9, 0x9f, 0x40, 0x14, 0x88, 0x3e, 0x74, 0x29, 0x53, 0x20, 0x8b, 0x2c, 0x1d, 0xbf, 0x02,
0xcd, 0xec, 0x87, 0xd7, 0xf9, 0x90, 0x23, 0x21, 0xb9, 0xdb, 0xb9, 0x73, 0xcf, 0x99, 0x73, 0xcf,
0xbd, 0xd7, 0x32, 0xdc, 0xb0, 0x18, 0xef, 0x87, 0x27, 0xa8, 0xe3, 0xda, 0xa6, 0xeb, 0x51, 0x27,
0xe8, 0xb3, 0x1e, 0x37, 0x89, 0xc7, 0xcc, 0x30, 0xa0, 0xbe, 0x39, 0xda, 0x34, 0x2d, 0xea, 0x50,
0x9f, 0x70, 0xda, 0x45, 0x9e, 0xef, 0x72, 0x57, 0x6d, 0x4e, 0x11, 0x28, 0x43, 0x20, 0xe2, 0x31,
0x24, 0x10, 0x68, 0xb4, 0x59, 0x7f, 0x98, 0xe3, 0xb4, 0x5c, 0xcb, 0x35, 0x25, 0xf0, 0x24, 0xec,
0xc9, 0x93, 0x3c, 0xc8, 0xaf, 0x98, 0xb0, 0x6e, 0x0c, 0x5a, 0x01, 0x62, 0xae, 0x7c, 0xb4, 0xe3,
0xfa, 0xf4, 0x86, 0x47, 0xeb, 0x8f, 0xa6, 0x39, 0x36, 0xe9, 0xf4, 0x99, 0x43, 0xfd, 0xb1, 0xe9,
0x0d, 0x2c, 0x11, 0x08, 0x4c, 0x9b, 0x72, 0x72, 0x13, 0xea, 0xc9, 0x6d, 0x28, 0x3f, 0x74, 0x38,
0xb3, 0xa9, 0x19, 0x74, 0xfa, 0xd4, 0x26, 0x57, 0x71, 0xc6, 0x57, 0x00, 0x95, 0x67, 0xbe, 0x1b,
0x7a, 0xea, 0x1b, 0x58, 0x16, 0xe4, 0x5d, 0xc2, 0x89, 0x06, 0x9a, 0x60, 0x7d, 0x65, 0x6b, 0x03,
0xc5, 0xa4, 0x28, 0x4f, 0x8a, 0xbc, 0x81, 0x25, 0x02, 0x01, 0x12, 0xd9, 0x68, 0xb4, 0x89, 0x0e,
0x4f, 0xde, 0xd2, 0x0e, 0x3f, 0xa0, 0x9c, 0xb4, 0xd5, 0xb3, 0x49, 0xa3, 0x10, 0x4d, 0x1a, 0x70,
0x1a, 0xc3, 0x19, 0xab, 0x7a, 0x04, 0x15, 0xe1, 0x5b, 0xa0, 0x2d, 0x49, 0x7a, 0x13, 0xcd, 0xb3,
0x17, 0x1d, 0x7a, 0x9c, 0xb9, 0x0e, 0x19, 0xbe, 0x20, 0x36, 0x0d, 0xda, 0x95, 0x68, 0xd2, 0x50,
0x5e, 0x09, 0x06, 0x1c, 0x13, 0x19, 0x5f, 0x00, 0xac, 0x48, 0xf5, 0xfb, 0x2c, 0xe0, 0xea, 0xeb,
0x6b, 0x15, 0xa0, 0xbb, 0x55, 0x20, 0xd0, 0x52, 0x7f, 0x2d, 0xd1, 0x5f, 0x4e, 0x23, 0x39, 0xf5,
0xfb, 0x50, 0x61, 0x9c, 0xda, 0x42, 0x7d, 0x71, 0x7d, 0x65, 0xeb, 0xde, 0x7c, 0xf5, 0x52, 0x59,
0x7b, 0x35, 0xe1, 0x54, 0xf6, 0x04, 0x1a, 0xc7, 0x24, 0xc6, 0xf7, 0x22, 0x2c, 0xef, 0x75, 0xa9,
0xc3, 0x19, 0x1f, 0x2f, 0xc0, 0xfa, 0x16, 0xac, 0x7a, 0xbe, 0x3b, 0x62, 0x5d, 0xea, 0x0b, 0x2f,
0x65, 0x07, 0x2a, 0xed, 0xb5, 0x04, 0x53, 0x3d, 0xca, 0xdd, 0xe1, 0x99, 0x4c, 0xf5, 0x29, 0xac,
0xa5, 0x67, 0x61, 0xbd, 0x44, 0x17, 0x25, 0x5a, 0x4b, 0xd0, 0xb5, 0xa3, 0x2b, 0xf7, 0xf8, 0x1a,
0x42, 0xdd, 0x81, 0x25, 0xe1, 0x8a, 0x56, 0x92, 0xd5, 0xfd, 0x97, 0xab, 0x0e, 0x89, 0x3d, 0x98,
0xd6, 0x82, 0x69, 0x8f, 0xfa, 0xd4, 0xe9, 0xd0, 0x76, 0x35, 0xa1, 0x2f, 0x09, 0x12, 0x2c, 0xe1,
0xea, 0x31, 0x54, 0xe8, 0x3b, 0xee, 0x13, 0x4d, 0x91, 0x3d, 0x78, 0x3c, 0xbf, 0x07, 0xa9, 0xc7,
0x68, 0x47, 0xe0, 0x76, 0x1c, 0xee, 0x8f, 0xa7, 0x1d, 0x91, 0x31, 0x1c, 0x53, 0xd6, 0x5b, 0x10,
0x4e, 0x73, 0xd4, 0x1a, 0x2c, 0x0e, 0xe8, 0x58, 0x76, 0xa3, 0x82, 0xc5, 0xa7, 0xba, 0x06, 0x95,
0x11, 0x19, 0x86, 0x89, 0x77, 0x38, 0x3e, 0x6c, 0x2f, 0xb5, 0x80, 0xf1, 0x0d, 0xc0, 0x6a, 0xfa,
0xce, 0x02, 0x06, 0xf1, 0x70, 0x76, 0x10, 0xef, 0xdf, 0xdd, 0x84, 0x5b, 0x66, 0x71, 0x1b, 0xae,
0xce, 0x2c, 0x9a, 0xda, 0x48, 0x5f, 0x00, 0xcd, 0xe2, 0x7a, 0x25, 0xde, 0xbb, 0x3c, 0x62, 0xbb,
0xfc, 0xf1, 0x53, 0xa3, 0x70, 0xfa, 0xa3, 0x59, 0x30, 0x7e, 0x03, 0x28, 0x1b, 0xb4, 0x80, 0x19,
0x7e, 0x00, 0xcb, 0xbd, 0x70, 0x38, 0xcc, 0xcd, 0x6f, 0xe6, 0xd2, 0x6e, 0x12, 0xc7, 0x59, 0x86,
0x8a, 0x20, 0x64, 0x71, 0xd9, 0x8c, 0x06, 0x5a, 0x51, 0x16, 0xf2, 0x8f, 0xe0, 0xde, 0xcb, 0xa2,
0x38, 0x97, 0xa1, 0x1a, 0x70, 0xd9, 0x12, 0xfb, 0x1a, 0x68, 0x25, 0x99, 0x0b, 0xa3, 0x49, 0x63,
0x59, 0x6e, 0x70, 0x80, 0x93, 0x1b, 0xe3, 0xc3, 0x12, 0xfc, 0x57, 0x14, 0x9b, 0xfa, 0x79, 0x40,
0x3c, 0x8f, 0x39, 0xd6, 0x02, 0x6a, 0x7f, 0x09, 0xcb, 0x89, 0xd6, 0x71, 0xf2, 0xeb, 0x79, 0xa7,
0x1d, 0xca, 0x0c, 0x4a, 0x15, 0xe3, 0x8c, 0x26, 0x5b, 0xc9, 0xe2, 0x5f, 0xad, 0xa4, 0xf1, 0x19,
0xc0, 0xb2, 0x38, 0x2e, 0x60, 0xf0, 0x9f, 0xcf, 0x0e, 0xfe, 0xff, 0xf3, 0x07, 0x5f, 0x08, 0xbb,
0x79, 0xe8, 0xdb, 0xbb, 0x67, 0x97, 0x7a, 0xe1, 0xfc, 0x52, 0x2f, 0x5c, 0x5c, 0xea, 0x85, 0xd3,
0x48, 0x07, 0x67, 0x91, 0x0e, 0xce, 0x23, 0x1d, 0x5c, 0x44, 0x3a, 0xf8, 0x19, 0xe9, 0xe0, 0xfd,
0x2f, 0xbd, 0x70, 0xdc, 0x9c, 0xf7, 0x9f, 0xe1, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x45, 0x85,
0x81, 0x86, 0x56, 0x08, 0x00, 0x00,
}
func (m *Group) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Group) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Group) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Users != nil {
{
size, err := m.Users.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *GroupList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GroupList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GroupList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *Identity) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Identity) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Identity) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Extra) > 0 {
keysForExtra := make([]string, 0, len(m.Extra))
for k := range m.Extra {
keysForExtra = append(keysForExtra, string(k))
}
github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- {
v := m.Extra[string(keysForExtra[iNdEx])]
baseI := i
i -= len(v)
copy(dAtA[i:], v)
i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
i--
dAtA[i] = 0x12
i -= len(keysForExtra[iNdEx])
copy(dAtA[i:], keysForExtra[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx])))
i--
dAtA[i] = 0xa
i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x2a
}
}
{
size, err := m.User.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
i -= len(m.ProviderUserName)
copy(dAtA[i:], m.ProviderUserName)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderUserName)))
i--
dAtA[i] = 0x1a
i -= len(m.ProviderName)
copy(dAtA[i:], m.ProviderName)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderName)))
i--
dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *IdentityList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *IdentityList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *IdentityList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m OptionalNames) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m OptionalNames) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m OptionalNames) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m) > 0 {
for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m[iNdEx])
copy(dAtA[i:], m[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx])))
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *User) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *User) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Groups) > 0 {
for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Groups[iNdEx])
copy(dAtA[i:], m.Groups[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx])))
i--
dAtA[i] = 0x22
}
}
if len(m.Identities) > 0 {
for iNdEx := len(m.Identities) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Identities[iNdEx])
copy(dAtA[i:], m.Identities[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Identities[iNdEx])))
i--
dAtA[i] = 0x1a
}
}
i -= len(m.FullName)
copy(dAtA[i:], m.FullName)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.FullName)))
i--
dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *UserIdentityMapping) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *UserIdentityMapping) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *UserIdentityMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.User.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
{
size, err := m.Identity.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *UserList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *UserList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *UserList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Group) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if m.Users != nil {
l = m.Users.Size()
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *GroupList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *Identity) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = len(m.ProviderName)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.ProviderUserName)
n += 1 + l + sovGenerated(uint64(l))
l = m.User.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Extra) > 0 {
for k, v := range m.Extra {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
}
}
return n
}
func (m *IdentityList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m OptionalNames) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m) > 0 {
for _, s := range m {
l = len(s)
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *User) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = len(m.FullName)
n += 1 + l + sovGenerated(uint64(l))
if len(m.Identities) > 0 {
for _, s := range m.Identities {
l = len(s)
n += 1 + l + sovGenerated(uint64(l))
}
}
if len(m.Groups) > 0 {
for _, s := range m.Groups {
l = len(s)
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *UserIdentityMapping) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Identity.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.User.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *UserList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Group) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Group{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Users:` + strings.Replace(fmt.Sprintf("%v", this.Users), "OptionalNames", "OptionalNames", 1) + `,`,
`}`,
}, "")
return s
}
func (this *GroupList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]Group{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Group", "Group", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&GroupList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *Identity) String() string {
if this == nil {
return "nil"
}
keysForExtra := make([]string, 0, len(this.Extra))
for k := range this.Extra {
keysForExtra = append(keysForExtra, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
mapStringForExtra := "map[string]string{"
for _, k := range keysForExtra {
mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k])
}
mapStringForExtra += "}"
s := strings.Join([]string{`&Identity{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`ProviderName:` + fmt.Sprintf("%v", this.ProviderName) + `,`,
`ProviderUserName:` + fmt.Sprintf("%v", this.ProviderUserName) + `,`,
`User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`,
`Extra:` + mapStringForExtra + `,`,
`}`,
}, "")
return s
}
func (this *IdentityList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]Identity{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Identity", "Identity", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&IdentityList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *User) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&User{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`FullName:` + fmt.Sprintf("%v", this.FullName) + `,`,
`Identities:` + fmt.Sprintf("%v", this.Identities) + `,`,
`Groups:` + fmt.Sprintf("%v", this.Groups) + `,`,
`}`,
}, "")
return s
}
func (this *UserIdentityMapping) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&UserIdentityMapping{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Identity:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Identity), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`,
`User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *UserList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]User{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "User", "User", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&UserList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Group) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Group: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Group: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Users == nil {
m.Users = OptionalNames{}
}
if err := m.Users.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GroupList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GroupList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GroupList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/user/v1/legacy.go | vendor/github.com/openshift/api/user/v1/legacy.go | package v1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme)
DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
)
func addLegacyKnownTypes(scheme *runtime.Scheme) error {
types := []runtime.Object{
&User{},
&UserList{},
&Identity{},
&IdentityList{},
&UserIdentityMapping{},
&Group{},
&GroupList{},
}
scheme.AddKnownTypes(legacyGroupVersion, types...)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go | vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go | //go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AppliedClusterResourceQuota) DeepCopyInto(out *AppliedClusterResourceQuota) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedClusterResourceQuota.
func (in *AppliedClusterResourceQuota) DeepCopy() *AppliedClusterResourceQuota {
if in == nil {
return nil
}
out := new(AppliedClusterResourceQuota)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *AppliedClusterResourceQuota) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AppliedClusterResourceQuotaList) DeepCopyInto(out *AppliedClusterResourceQuotaList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]AppliedClusterResourceQuota, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedClusterResourceQuotaList.
func (in *AppliedClusterResourceQuotaList) DeepCopy() *AppliedClusterResourceQuotaList {
if in == nil {
return nil
}
out := new(AppliedClusterResourceQuotaList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *AppliedClusterResourceQuotaList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterResourceQuota) DeepCopyInto(out *ClusterResourceQuota) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuota.
func (in *ClusterResourceQuota) DeepCopy() *ClusterResourceQuota {
if in == nil {
return nil
}
out := new(ClusterResourceQuota)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterResourceQuota) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterResourceQuotaList) DeepCopyInto(out *ClusterResourceQuotaList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterResourceQuota, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaList.
func (in *ClusterResourceQuotaList) DeepCopy() *ClusterResourceQuotaList {
if in == nil {
return nil
}
out := new(ClusterResourceQuotaList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterResourceQuotaList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterResourceQuotaSelector) DeepCopyInto(out *ClusterResourceQuotaSelector) {
*out = *in
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.AnnotationSelector != nil {
in, out := &in.AnnotationSelector, &out.AnnotationSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaSelector.
func (in *ClusterResourceQuotaSelector) DeepCopy() *ClusterResourceQuotaSelector {
if in == nil {
return nil
}
out := new(ClusterResourceQuotaSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterResourceQuotaSpec) DeepCopyInto(out *ClusterResourceQuotaSpec) {
*out = *in
in.Selector.DeepCopyInto(&out.Selector)
in.Quota.DeepCopyInto(&out.Quota)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaSpec.
func (in *ClusterResourceQuotaSpec) DeepCopy() *ClusterResourceQuotaSpec {
if in == nil {
return nil
}
out := new(ClusterResourceQuotaSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterResourceQuotaStatus) DeepCopyInto(out *ClusterResourceQuotaStatus) {
*out = *in
in.Total.DeepCopyInto(&out.Total)
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = make(ResourceQuotasStatusByNamespace, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaStatus.
func (in *ClusterResourceQuotaStatus) DeepCopy() *ClusterResourceQuotaStatus {
if in == nil {
return nil
}
out := new(ClusterResourceQuotaStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceQuotaStatusByNamespace) DeepCopyInto(out *ResourceQuotaStatusByNamespace) {
*out = *in
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatusByNamespace.
func (in *ResourceQuotaStatusByNamespace) DeepCopy() *ResourceQuotaStatusByNamespace {
if in == nil {
return nil
}
out := new(ResourceQuotaStatusByNamespace)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourceQuotasStatusByNamespace) DeepCopyInto(out *ResourceQuotasStatusByNamespace) {
{
in := &in
*out = make(ResourceQuotasStatusByNamespace, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotasStatusByNamespace.
func (in ResourceQuotasStatusByNamespace) DeepCopy() ResourceQuotasStatusByNamespace {
if in == nil {
return nil
}
out := new(ResourceQuotasStatusByNamespace)
in.DeepCopyInto(out)
return *out
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/quota/v1/types.go | vendor/github.com/openshift/api/quota/v1/types.go | package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to
// synthetic ResourceQuota object to allow quota evaluation re-use.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ClusterResourceQuota struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired quota
Spec ClusterResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
// Status defines the actual enforced quota and its current usage
Status ClusterResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// ClusterResourceQuotaSpec defines the desired quota restrictions
type ClusterResourceQuotaSpec struct {
// Selector is the selector used to match projects.
// It should only select active projects on the scale of dozens (though it can select
// many more less active projects). These projects will contend on object creation through
// this resource.
Selector ClusterResourceQuotaSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"`
// Quota defines the desired quota
Quota corev1.ResourceQuotaSpec `json:"quota" protobuf:"bytes,2,opt,name=quota"`
}
// ClusterResourceQuotaSelector is used to select projects. At least one of LabelSelector or AnnotationSelector
// must present. If only one is present, it is the only selection criteria. If both are specified,
// the project must match both restrictions.
type ClusterResourceQuotaSelector struct {
// LabelSelector is used to select projects by label.
// +optional
// +nullable
LabelSelector *metav1.LabelSelector `json:"labels" protobuf:"bytes,1,opt,name=labels"`
// AnnotationSelector is used to select projects by annotation.
// +optional
// +nullable
AnnotationSelector map[string]string `json:"annotations" protobuf:"bytes,2,rep,name=annotations"`
}
// ClusterResourceQuotaStatus defines the actual enforced quota and its current usage
type ClusterResourceQuotaStatus struct {
// Total defines the actual enforced quota and its current usage across all projects
Total corev1.ResourceQuotaStatus `json:"total" protobuf:"bytes,1,opt,name=total"`
// Namespaces slices the usage by project. This division allows for quick resolution of
// deletion reconciliation inside of a single project without requiring a recalculation
// across all projects. This can be used to pull the deltas for a given project.
// +optional
// +nullable
Namespaces ResourceQuotasStatusByNamespace `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterResourceQuotaList is a collection of ClusterResourceQuotas
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ClusterResourceQuotaList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of ClusterResourceQuotas
Items []ClusterResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// ResourceQuotasStatusByNamespace bundles multiple ResourceQuotaStatusByNamespace
type ResourceQuotasStatusByNamespace []ResourceQuotaStatusByNamespace
// ResourceQuotaStatusByNamespace gives status for a particular project
type ResourceQuotaStatusByNamespace struct {
// Namespace the project this status applies to
Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
// Status indicates how many resources have been consumed by this project
Status corev1.ResourceQuotaStatus `json:"status" protobuf:"bytes,2,opt,name=status"`
}
// +genclient
// +genclient:onlyVerbs=get,list
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection
// into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to
// his project and their associated usage.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type AppliedClusterResourceQuota struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired quota
Spec ClusterResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
// Status defines the actual enforced quota and its current usage
Status ClusterResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type AppliedClusterResourceQuotaList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of AppliedClusterResourceQuota
Items []AppliedClusterResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/quota/v1/register.go | vendor/github.com/openshift/api/quota/v1/register.go | package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
GroupName = "quota.openshift.io"
GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme)
// Install is a function which adds this version to a scheme
Install = schemeBuilder.AddToScheme
// SchemeGroupVersion generated code relies on this name
// Deprecated
SchemeGroupVersion = GroupVersion
// AddToScheme exists solely to keep the old generators creating valid code
// DEPRECATED
AddToScheme = schemeBuilder.AddToScheme
)
// Resource generated code relies on this being here, but it logically belongs to the group
// DEPRECATED
func Resource(resource string) schema.GroupResource {
return schema.GroupResource{Group: GroupName, Resource: resource}
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(GroupVersion,
&ClusterResourceQuota{},
&ClusterResourceQuotaList{},
&AppliedClusterResourceQuota{},
&AppliedClusterResourceQuotaList{},
)
metav1.AddToGroupVersion(scheme, GroupVersion)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go | vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go | package v1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE
var map_AppliedClusterResourceQuota = map[string]string{
"": "AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to his project and their associated usage.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"spec": "Spec defines the desired quota",
"status": "Status defines the actual enforced quota and its current usage",
}
func (AppliedClusterResourceQuota) SwaggerDoc() map[string]string {
return map_AppliedClusterResourceQuota
}
var map_AppliedClusterResourceQuotaList = map[string]string{
"": "AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is a list of AppliedClusterResourceQuota",
}
func (AppliedClusterResourceQuotaList) SwaggerDoc() map[string]string {
return map_AppliedClusterResourceQuotaList
}
var map_ClusterResourceQuota = map[string]string{
"": "ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to synthetic ResourceQuota object to allow quota evaluation re-use.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"spec": "Spec defines the desired quota",
"status": "Status defines the actual enforced quota and its current usage",
}
func (ClusterResourceQuota) SwaggerDoc() map[string]string {
return map_ClusterResourceQuota
}
var map_ClusterResourceQuotaList = map[string]string{
"": "ClusterResourceQuotaList is a collection of ClusterResourceQuotas\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is a list of ClusterResourceQuotas",
}
func (ClusterResourceQuotaList) SwaggerDoc() map[string]string {
return map_ClusterResourceQuotaList
}
var map_ClusterResourceQuotaSelector = map[string]string{
"": "ClusterResourceQuotaSelector is used to select projects. At least one of LabelSelector or AnnotationSelector must present. If only one is present, it is the only selection criteria. If both are specified, the project must match both restrictions.",
"labels": "LabelSelector is used to select projects by label.",
"annotations": "AnnotationSelector is used to select projects by annotation.",
}
func (ClusterResourceQuotaSelector) SwaggerDoc() map[string]string {
return map_ClusterResourceQuotaSelector
}
var map_ClusterResourceQuotaSpec = map[string]string{
"": "ClusterResourceQuotaSpec defines the desired quota restrictions",
"selector": "Selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource.",
"quota": "Quota defines the desired quota",
}
func (ClusterResourceQuotaSpec) SwaggerDoc() map[string]string {
return map_ClusterResourceQuotaSpec
}
var map_ClusterResourceQuotaStatus = map[string]string{
"": "ClusterResourceQuotaStatus defines the actual enforced quota and its current usage",
"total": "Total defines the actual enforced quota and its current usage across all projects",
"namespaces": "Namespaces slices the usage by project. This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects. This can be used to pull the deltas for a given project.",
}
func (ClusterResourceQuotaStatus) SwaggerDoc() map[string]string {
return map_ClusterResourceQuotaStatus
}
var map_ResourceQuotaStatusByNamespace = map[string]string{
"": "ResourceQuotaStatusByNamespace gives status for a particular project",
"namespace": "Namespace the project this status applies to",
"status": "Status indicates how many resources have been consumed by this project",
}
func (ResourceQuotaStatusByNamespace) SwaggerDoc() map[string]string {
return map_ResourceQuotaStatusByNamespace
}
// AUTO-GENERATED FUNCTIONS END HERE
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/quota/v1/doc.go | vendor/github.com/openshift/api/quota/v1/doc.go | // +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/origin/pkg/quota/apis/quota
// +k8s:defaulter-gen=TypeMeta
// +k8s:openapi-gen=true
// +groupName=quota.openshift.io
// Package v1 is the v1 version of the API.
package v1
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/quota/v1/generated.pb.go | vendor/github.com/openshift/api/quota/v1/generated.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/openshift/api/quota/v1/generated.proto
package v1
import (
fmt "fmt"
io "io"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *AppliedClusterResourceQuota) Reset() { *m = AppliedClusterResourceQuota{} }
func (*AppliedClusterResourceQuota) ProtoMessage() {}
func (*AppliedClusterResourceQuota) Descriptor() ([]byte, []int) {
return fileDescriptor_f605e5b8440aecb8, []int{0}
}
func (m *AppliedClusterResourceQuota) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *AppliedClusterResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *AppliedClusterResourceQuota) XXX_Merge(src proto.Message) {
xxx_messageInfo_AppliedClusterResourceQuota.Merge(m, src)
}
func (m *AppliedClusterResourceQuota) XXX_Size() int {
return m.Size()
}
func (m *AppliedClusterResourceQuota) XXX_DiscardUnknown() {
xxx_messageInfo_AppliedClusterResourceQuota.DiscardUnknown(m)
}
var xxx_messageInfo_AppliedClusterResourceQuota proto.InternalMessageInfo
func (m *AppliedClusterResourceQuotaList) Reset() { *m = AppliedClusterResourceQuotaList{} }
func (*AppliedClusterResourceQuotaList) ProtoMessage() {}
func (*AppliedClusterResourceQuotaList) Descriptor() ([]byte, []int) {
return fileDescriptor_f605e5b8440aecb8, []int{1}
}
func (m *AppliedClusterResourceQuotaList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *AppliedClusterResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *AppliedClusterResourceQuotaList) XXX_Merge(src proto.Message) {
xxx_messageInfo_AppliedClusterResourceQuotaList.Merge(m, src)
}
func (m *AppliedClusterResourceQuotaList) XXX_Size() int {
return m.Size()
}
func (m *AppliedClusterResourceQuotaList) XXX_DiscardUnknown() {
xxx_messageInfo_AppliedClusterResourceQuotaList.DiscardUnknown(m)
}
var xxx_messageInfo_AppliedClusterResourceQuotaList proto.InternalMessageInfo
func (m *ClusterResourceQuota) Reset() { *m = ClusterResourceQuota{} }
func (*ClusterResourceQuota) ProtoMessage() {}
func (*ClusterResourceQuota) Descriptor() ([]byte, []int) {
return fileDescriptor_f605e5b8440aecb8, []int{2}
}
func (m *ClusterResourceQuota) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClusterResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ClusterResourceQuota) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterResourceQuota.Merge(m, src)
}
func (m *ClusterResourceQuota) XXX_Size() int {
return m.Size()
}
func (m *ClusterResourceQuota) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterResourceQuota.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterResourceQuota proto.InternalMessageInfo
func (m *ClusterResourceQuotaList) Reset() { *m = ClusterResourceQuotaList{} }
func (*ClusterResourceQuotaList) ProtoMessage() {}
func (*ClusterResourceQuotaList) Descriptor() ([]byte, []int) {
return fileDescriptor_f605e5b8440aecb8, []int{3}
}
func (m *ClusterResourceQuotaList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClusterResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ClusterResourceQuotaList) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterResourceQuotaList.Merge(m, src)
}
func (m *ClusterResourceQuotaList) XXX_Size() int {
return m.Size()
}
func (m *ClusterResourceQuotaList) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterResourceQuotaList.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterResourceQuotaList proto.InternalMessageInfo
func (m *ClusterResourceQuotaSelector) Reset() { *m = ClusterResourceQuotaSelector{} }
func (*ClusterResourceQuotaSelector) ProtoMessage() {}
func (*ClusterResourceQuotaSelector) Descriptor() ([]byte, []int) {
return fileDescriptor_f605e5b8440aecb8, []int{4}
}
func (m *ClusterResourceQuotaSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClusterResourceQuotaSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ClusterResourceQuotaSelector) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterResourceQuotaSelector.Merge(m, src)
}
func (m *ClusterResourceQuotaSelector) XXX_Size() int {
return m.Size()
}
func (m *ClusterResourceQuotaSelector) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterResourceQuotaSelector.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterResourceQuotaSelector proto.InternalMessageInfo
func (m *ClusterResourceQuotaSpec) Reset() { *m = ClusterResourceQuotaSpec{} }
func (*ClusterResourceQuotaSpec) ProtoMessage() {}
func (*ClusterResourceQuotaSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_f605e5b8440aecb8, []int{5}
}
func (m *ClusterResourceQuotaSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClusterResourceQuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ClusterResourceQuotaSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterResourceQuotaSpec.Merge(m, src)
}
func (m *ClusterResourceQuotaSpec) XXX_Size() int {
return m.Size()
}
func (m *ClusterResourceQuotaSpec) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterResourceQuotaSpec.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterResourceQuotaSpec proto.InternalMessageInfo
func (m *ClusterResourceQuotaStatus) Reset() { *m = ClusterResourceQuotaStatus{} }
func (*ClusterResourceQuotaStatus) ProtoMessage() {}
func (*ClusterResourceQuotaStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_f605e5b8440aecb8, []int{6}
}
func (m *ClusterResourceQuotaStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClusterResourceQuotaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ClusterResourceQuotaStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterResourceQuotaStatus.Merge(m, src)
}
func (m *ClusterResourceQuotaStatus) XXX_Size() int {
return m.Size()
}
func (m *ClusterResourceQuotaStatus) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterResourceQuotaStatus.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterResourceQuotaStatus proto.InternalMessageInfo
func (m *ResourceQuotaStatusByNamespace) Reset() { *m = ResourceQuotaStatusByNamespace{} }
func (*ResourceQuotaStatusByNamespace) ProtoMessage() {}
func (*ResourceQuotaStatusByNamespace) Descriptor() ([]byte, []int) {
return fileDescriptor_f605e5b8440aecb8, []int{7}
}
func (m *ResourceQuotaStatusByNamespace) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ResourceQuotaStatusByNamespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ResourceQuotaStatusByNamespace) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResourceQuotaStatusByNamespace.Merge(m, src)
}
func (m *ResourceQuotaStatusByNamespace) XXX_Size() int {
return m.Size()
}
func (m *ResourceQuotaStatusByNamespace) XXX_DiscardUnknown() {
xxx_messageInfo_ResourceQuotaStatusByNamespace.DiscardUnknown(m)
}
var xxx_messageInfo_ResourceQuotaStatusByNamespace proto.InternalMessageInfo
func init() {
proto.RegisterType((*AppliedClusterResourceQuota)(nil), "github.com.openshift.api.quota.v1.AppliedClusterResourceQuota")
proto.RegisterType((*AppliedClusterResourceQuotaList)(nil), "github.com.openshift.api.quota.v1.AppliedClusterResourceQuotaList")
proto.RegisterType((*ClusterResourceQuota)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuota")
proto.RegisterType((*ClusterResourceQuotaList)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaList")
proto.RegisterType((*ClusterResourceQuotaSelector)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSelector")
proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSelector.AnnotationsEntry")
proto.RegisterType((*ClusterResourceQuotaSpec)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSpec")
proto.RegisterType((*ClusterResourceQuotaStatus)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaStatus")
proto.RegisterType((*ResourceQuotaStatusByNamespace)(nil), "github.com.openshift.api.quota.v1.ResourceQuotaStatusByNamespace")
}
func init() {
proto.RegisterFile("github.com/openshift/api/quota/v1/generated.proto", fileDescriptor_f605e5b8440aecb8)
}
var fileDescriptor_f605e5b8440aecb8 = []byte{
// 716 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x41, 0x6f, 0xd3, 0x3e,
0x1c, 0x6d, 0xba, 0x75, 0x5a, 0xbd, 0xff, 0xfe, 0xda, 0xac, 0x1d, 0xaa, 0x82, 0xd2, 0x2d, 0x12,
0x62, 0x17, 0x1c, 0x3a, 0x10, 0x4c, 0x20, 0x86, 0x16, 0x84, 0x10, 0x68, 0x30, 0x08, 0x9c, 0xd0,
0x40, 0xb8, 0x99, 0xd7, 0x86, 0x26, 0x71, 0x88, 0x9d, 0x4a, 0xbd, 0xf1, 0x09, 0x10, 0x9f, 0x81,
0x0f, 0xc2, 0x0d, 0x69, 0x37, 0x76, 0x01, 0xed, 0x34, 0xd1, 0xc0, 0x07, 0x41, 0x76, 0xdc, 0xa4,
0xdb, 0xda, 0xad, 0x6c, 0x07, 0x2e, 0xdc, 0xe2, 0x5f, 0xfd, 0xde, 0xfb, 0xfd, 0x5e, 0x9e, 0xdd,
0x80, 0x7a, 0xd3, 0xe5, 0xad, 0xb8, 0x81, 0x1c, 0xea, 0x9b, 0x34, 0x24, 0x01, 0x6b, 0xb9, 0x3b,
0xdc, 0xc4, 0xa1, 0x6b, 0xbe, 0x8b, 0x29, 0xc7, 0x66, 0xa7, 0x6e, 0x36, 0x49, 0x40, 0x22, 0xcc,
0xc9, 0x36, 0x0a, 0x23, 0xca, 0x29, 0x5c, 0xca, 0x21, 0x28, 0x83, 0x20, 0x1c, 0xba, 0x48, 0x42,
0x50, 0xa7, 0x5e, 0xbd, 0x32, 0xc0, 0xda, 0xa4, 0x4d, 0x6a, 0x4a, 0x64, 0x23, 0xde, 0x91, 0x2b,
0xb9, 0x90, 0x4f, 0x29, 0x63, 0xd5, 0x68, 0xaf, 0x32, 0xe4, 0x52, 0x29, 0xeb, 0xd0, 0x88, 0x0c,
0x51, 0xad, 0x5e, 0xcf, 0xf7, 0xf8, 0xd8, 0x69, 0xb9, 0x01, 0x89, 0xba, 0x66, 0xd8, 0x6e, 0x8a,
0x02, 0x33, 0x7d, 0x32, 0xb4, 0xd7, 0xea, 0x8d, 0x51, 0xa8, 0x28, 0x0e, 0xb8, 0xeb, 0x13, 0x93,
0x39, 0x2d, 0xe2, 0xe3, 0xa3, 0x38, 0xe3, 0x4b, 0x11, 0x5c, 0x58, 0x0f, 0x43, 0xcf, 0x25, 0xdb,
0xf7, 0xbc, 0x98, 0x71, 0x12, 0xd9, 0x84, 0xd1, 0x38, 0x72, 0xc8, 0x33, 0x31, 0x23, 0x7c, 0x03,
0xa6, 0x85, 0xe4, 0x36, 0xe6, 0xb8, 0xa2, 0x2d, 0x6a, 0xcb, 0x33, 0x2b, 0x57, 0x51, 0x2a, 0x85,
0x06, 0xa5, 0x50, 0xd8, 0x6e, 0x8a, 0x02, 0x43, 0x62, 0x37, 0xea, 0xd4, 0xd1, 0x66, 0xe3, 0x2d,
0x71, 0xf8, 0x63, 0xc2, 0xb1, 0x05, 0x77, 0x0f, 0x6a, 0x85, 0xe4, 0xa0, 0x06, 0xf2, 0x9a, 0x9d,
0xb1, 0xc2, 0x57, 0x60, 0x92, 0x85, 0xc4, 0xa9, 0x14, 0x25, 0xfb, 0x6d, 0x74, 0xaa, 0xe9, 0x68,
0x58, 0xa3, 0xcf, 0x43, 0xe2, 0x58, 0xff, 0x29, 0xa1, 0x49, 0xb1, 0xb2, 0x25, 0x2d, 0x24, 0x60,
0x8a, 0x71, 0xcc, 0x63, 0x56, 0x99, 0x90, 0x02, 0x77, 0xce, 0x2a, 0x20, 0x49, 0xac, 0xff, 0x95,
0xc4, 0x54, 0xba, 0xb6, 0x15, 0xb9, 0xf1, 0x4b, 0x03, 0xb5, 0x13, 0x7c, 0xdc, 0x70, 0x19, 0x87,
0x5b, 0xc7, 0xbc, 0x44, 0xe3, 0x79, 0x29, 0xd0, 0xd2, 0xc9, 0x39, 0xa5, 0x3e, 0xdd, 0xaf, 0x0c,
0xf8, 0xe8, 0x80, 0x92, 0xcb, 0x89, 0xcf, 0x2a, 0xc5, 0xc5, 0x89, 0xe5, 0x99, 0x95, 0xb5, 0x31,
0xe6, 0x3c, 0xa1, 0x61, 0x6b, 0x56, 0x49, 0x95, 0x1e, 0x0a, 0x52, 0x3b, 0xe5, 0x36, 0x3e, 0x17,
0xc1, 0xc2, 0xbf, 0x9c, 0x9c, 0x23, 0x27, 0xdf, 0x35, 0x50, 0xf9, 0x4b, 0x01, 0xd9, 0x3a, 0x1c,
0x90, 0x9b, 0x67, 0x1c, 0x70, 0x44, 0x32, 0xbe, 0x16, 0xc1, 0xc5, 0xa1, 0x7e, 0x10, 0x8f, 0x38,
0x9c, 0x46, 0xf0, 0x35, 0x98, 0xf2, 0x70, 0x83, 0x78, 0x4c, 0x8d, 0x76, 0x6d, 0xcc, 0xd1, 0x04,
0xa6, 0x4f, 0x62, 0xcd, 0x27, 0x07, 0xb5, 0xd9, 0x43, 0x25, 0x5b, 0xb1, 0xc2, 0x0f, 0x1a, 0x98,
0xc1, 0x41, 0x40, 0x39, 0xe6, 0x2e, 0x0d, 0xfa, 0x53, 0x3e, 0x3d, 0xeb, 0x6b, 0x54, 0xf4, 0x68,
0x3d, 0xa7, 0xbc, 0x1f, 0xf0, 0xa8, 0x6b, 0x55, 0xd5, 0xf8, 0x30, 0xff, 0x25, 0xeb, 0x65, 0xb0,
0x81, 0xea, 0x1a, 0x98, 0x3b, 0x0a, 0x86, 0x73, 0x60, 0xa2, 0x4d, 0xba, 0xd2, 0x81, 0xb2, 0x2d,
0x1e, 0xe1, 0x02, 0x28, 0x75, 0xb0, 0x17, 0x13, 0x99, 0xeb, 0xb2, 0x9d, 0x2e, 0x6e, 0x15, 0x57,
0x35, 0xe3, 0xdb, 0x88, 0xa8, 0x88, 0xd0, 0x42, 0x1f, 0x4c, 0x33, 0xa5, 0xaa, 0xfc, 0xbc, 0x7b,
0xce, 0x49, 0xf3, 0xec, 0x64, 0xe3, 0x64, 0x12, 0xf0, 0x11, 0x28, 0x49, 0x12, 0x75, 0xfa, 0x2e,
0x0d, 0xbc, 0x3b, 0x24, 0xfe, 0xc8, 0x04, 0xf9, 0xf1, 0x73, 0x96, 0x25, 0x45, 0x96, 0xec, 0x94,
0xc2, 0xe8, 0x69, 0xa0, 0x3a, 0xfa, 0xe4, 0xc0, 0x0d, 0x50, 0xe2, 0x94, 0x63, 0x4f, 0x8d, 0x75,
0xf9, 0x74, 0xa9, 0xf4, 0xc4, 0x65, 0x62, 0x2f, 0x04, 0xda, 0x4e, 0x49, 0x60, 0x0c, 0x40, 0x80,
0x7d, 0xc2, 0x42, 0xec, 0x90, 0x7e, 0x26, 0xd6, 0xc7, 0x70, 0x6a, 0x98, 0x42, 0xf7, 0x49, 0x9f,
0x29, 0xbf, 0xaa, 0xb2, 0x12, 0xb3, 0x07, 0x84, 0x8c, 0x4f, 0x1a, 0xd0, 0x4f, 0xa6, 0x80, 0x26,
0x28, 0x67, 0x80, 0x34, 0x10, 0xd6, 0xbc, 0x62, 0x2d, 0x67, 0xbb, 0xec, 0x7c, 0x0f, 0xdc, 0xcc,
0x6e, 0xa8, 0xe2, 0x9f, 0x39, 0x33, 0xe2, 0x2e, 0xb2, 0x1e, 0xec, 0xf6, 0xf4, 0xc2, 0x5e, 0x4f,
0x2f, 0xec, 0xf7, 0xf4, 0xc2, 0xfb, 0x44, 0xd7, 0x76, 0x13, 0x5d, 0xdb, 0x4b, 0x74, 0x6d, 0x3f,
0xd1, 0xb5, 0x1f, 0x89, 0xae, 0x7d, 0xfc, 0xa9, 0x17, 0x5e, 0x2e, 0x9d, 0xfa, 0xe1, 0xf4, 0x3b,
0x00, 0x00, 0xff, 0xff, 0xda, 0x49, 0x50, 0x7b, 0x5c, 0x09, 0x00, 0x00,
}
func (m *AppliedClusterResourceQuota) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *AppliedClusterResourceQuota) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *AppliedClusterResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *AppliedClusterResourceQuotaList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *AppliedClusterResourceQuotaList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *AppliedClusterResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ClusterResourceQuota) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ClusterResourceQuota) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ClusterResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ClusterResourceQuotaList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ClusterResourceQuotaList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ClusterResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ClusterResourceQuotaSelector) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ClusterResourceQuotaSelector) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ClusterResourceQuotaSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.AnnotationSelector) > 0 {
keysForAnnotationSelector := make([]string, 0, len(m.AnnotationSelector))
for k := range m.AnnotationSelector {
keysForAnnotationSelector = append(keysForAnnotationSelector, string(k))
}
github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotationSelector)
for iNdEx := len(keysForAnnotationSelector) - 1; iNdEx >= 0; iNdEx-- {
v := m.AnnotationSelector[string(keysForAnnotationSelector[iNdEx])]
baseI := i
i -= len(v)
copy(dAtA[i:], v)
i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
i--
dAtA[i] = 0x12
i -= len(keysForAnnotationSelector[iNdEx])
copy(dAtA[i:], keysForAnnotationSelector[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotationSelector[iNdEx])))
i--
dAtA[i] = 0xa
i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x12
}
}
if m.LabelSelector != nil {
{
size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *ClusterResourceQuotaSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ClusterResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ClusterResourceQuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.Quota.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ClusterResourceQuotaStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ClusterResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ClusterResourceQuotaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Namespaces) > 0 {
for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Namespaces[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.Total.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ResourceQuotaStatusByNamespace) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ResourceQuotaStatusByNamespace) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ResourceQuotaStatusByNamespace) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
i -= len(m.Namespace)
copy(dAtA[i:], m.Namespace)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *AppliedClusterResourceQuota) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Status.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *AppliedClusterResourceQuotaList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *ClusterResourceQuota) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Status.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *ClusterResourceQuotaList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *ClusterResourceQuotaSelector) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.LabelSelector != nil {
l = m.LabelSelector.Size()
n += 1 + l + sovGenerated(uint64(l))
}
if len(m.AnnotationSelector) > 0 {
for k, v := range m.AnnotationSelector {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
}
}
return n
}
func (m *ClusterResourceQuotaSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.Selector.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Quota.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *ClusterResourceQuotaStatus) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.Total.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Namespaces) > 0 {
for _, e := range m.Namespaces {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *ResourceQuotaStatusByNamespace) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Namespace)
n += 1 + l + sovGenerated(uint64(l))
l = m.Status.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *AppliedClusterResourceQuota) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&AppliedClusterResourceQuota{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterResourceQuotaSpec", "ClusterResourceQuotaSpec", 1), `&`, ``, 1) + `,`,
`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ClusterResourceQuotaStatus", "ClusterResourceQuotaStatus", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *AppliedClusterResourceQuotaList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]AppliedClusterResourceQuota{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "AppliedClusterResourceQuota", "AppliedClusterResourceQuota", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&AppliedClusterResourceQuotaList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *ClusterResourceQuota) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ClusterResourceQuota{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterResourceQuotaSpec", "ClusterResourceQuotaSpec", 1), `&`, ``, 1) + `,`,
`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ClusterResourceQuotaStatus", "ClusterResourceQuotaStatus", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *ClusterResourceQuotaList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]ClusterResourceQuota{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterResourceQuota", "ClusterResourceQuota", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&ClusterResourceQuotaList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *ClusterResourceQuotaSelector) String() string {
if this == nil {
return "nil"
}
keysForAnnotationSelector := make([]string, 0, len(this.AnnotationSelector))
for k := range this.AnnotationSelector {
keysForAnnotationSelector = append(keysForAnnotationSelector, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotationSelector)
mapStringForAnnotationSelector := "map[string]string{"
for _, k := range keysForAnnotationSelector {
mapStringForAnnotationSelector += fmt.Sprintf("%v: %v,", k, this.AnnotationSelector[k])
}
mapStringForAnnotationSelector += "}"
s := strings.Join([]string{`&ClusterResourceQuotaSelector{`,
`LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
`AnnotationSelector:` + mapStringForAnnotationSelector + `,`,
`}`,
}, "")
return s
}
func (this *ClusterResourceQuotaSpec) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ClusterResourceQuotaSpec{`,
`Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "ClusterResourceQuotaSelector", "ClusterResourceQuotaSelector", 1), `&`, ``, 1) + `,`,
`Quota:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Quota), "ResourceQuotaSpec", "v11.ResourceQuotaSpec", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *ClusterResourceQuotaStatus) String() string {
if this == nil {
return "nil"
}
repeatedStringForNamespaces := "[]ResourceQuotaStatusByNamespace{"
for _, f := range this.Namespaces {
repeatedStringForNamespaces += strings.Replace(strings.Replace(f.String(), "ResourceQuotaStatusByNamespace", "ResourceQuotaStatusByNamespace", 1), `&`, ``, 1) + ","
}
repeatedStringForNamespaces += "}"
s := strings.Join([]string{`&ClusterResourceQuotaStatus{`,
`Total:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Total), "ResourceQuotaStatus", "v11.ResourceQuotaStatus", 1), `&`, ``, 1) + `,`,
`Namespaces:` + repeatedStringForNamespaces + `,`,
`}`,
}, "")
return s
}
func (this *ResourceQuotaStatusByNamespace) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ResourceQuotaStatusByNamespace{`,
`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
`Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "ResourceQuotaStatus", "v11.ResourceQuotaStatus", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *AppliedClusterResourceQuota) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: AppliedClusterResourceQuota: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: AppliedClusterResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/quota/v1/legacy.go | vendor/github.com/openshift/api/quota/v1/legacy.go | package v1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme)
DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
)
func addLegacyKnownTypes(scheme *runtime.Scheme) error {
types := []runtime.Object{
&ClusterResourceQuota{},
&ClusterResourceQuotaList{},
&AppliedClusterResourceQuota{},
&AppliedClusterResourceQuotaList{},
}
scheme.AddKnownTypes(legacyGroupVersion, types...)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go | vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go | //go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Action) DeepCopyInto(out *Action) {
*out = *in
in.Content.DeepCopyInto(&out.Content)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Action.
func (in *Action) DeepCopy() *Action {
if in == nil {
return nil
}
out := new(Action)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterRole) DeepCopyInto(out *ClusterRole) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]PolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AggregationRule != nil {
in, out := &in.AggregationRule, &out.AggregationRule
*out = new(rbacv1.AggregationRule)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole.
func (in *ClusterRole) DeepCopy() *ClusterRole {
if in == nil {
return nil
}
out := new(ClusterRole)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterRole) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.UserNames != nil {
in, out := &in.UserNames, &out.UserNames
*out = make(OptionalNames, len(*in))
copy(*out, *in)
}
if in.GroupNames != nil {
in, out := &in.GroupNames, &out.GroupNames
*out = make(OptionalNames, len(*in))
copy(*out, *in)
}
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]corev1.ObjectReference, len(*in))
copy(*out, *in)
}
out.RoleRef = in.RoleRef
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding.
func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding {
if in == nil {
return nil
}
out := new(ClusterRoleBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterRoleBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList.
func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList {
if in == nil {
return nil
}
out := new(ClusterRoleBindingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterRole, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList.
func (in *ClusterRoleList) DeepCopy() *ClusterRoleList {
if in == nil {
return nil
}
out := new(ClusterRoleList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterRoleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GroupRestriction) DeepCopyInto(out *GroupRestriction) {
*out = *in
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Selectors != nil {
in, out := &in.Selectors, &out.Selectors
*out = make([]metav1.LabelSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupRestriction.
func (in *GroupRestriction) DeepCopy() *GroupRestriction {
if in == nil {
return nil
}
out := new(GroupRestriction)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IsPersonalSubjectAccessReview) DeepCopyInto(out *IsPersonalSubjectAccessReview) {
*out = *in
out.TypeMeta = in.TypeMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsPersonalSubjectAccessReview.
func (in *IsPersonalSubjectAccessReview) DeepCopy() *IsPersonalSubjectAccessReview {
if in == nil {
return nil
}
out := new(IsPersonalSubjectAccessReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IsPersonalSubjectAccessReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalResourceAccessReview) DeepCopyInto(out *LocalResourceAccessReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Action.DeepCopyInto(&out.Action)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalResourceAccessReview.
func (in *LocalResourceAccessReview) DeepCopy() *LocalResourceAccessReview {
if in == nil {
return nil
}
out := new(LocalResourceAccessReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LocalResourceAccessReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Action.DeepCopyInto(&out.Action)
if in.GroupsSlice != nil {
in, out := &in.GroupsSlice, &out.GroupsSlice
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Scopes != nil {
in, out := &in.Scopes, &out.Scopes
*out = make(OptionalScopes, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSubjectAccessReview.
func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview {
if in == nil {
return nil
}
out := new(LocalSubjectAccessReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedClusterRole) DeepCopyInto(out *NamedClusterRole) {
*out = *in
in.Role.DeepCopyInto(&out.Role)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedClusterRole.
func (in *NamedClusterRole) DeepCopy() *NamedClusterRole {
if in == nil {
return nil
}
out := new(NamedClusterRole)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedClusterRoleBinding) DeepCopyInto(out *NamedClusterRoleBinding) {
*out = *in
in.RoleBinding.DeepCopyInto(&out.RoleBinding)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedClusterRoleBinding.
func (in *NamedClusterRoleBinding) DeepCopy() *NamedClusterRoleBinding {
if in == nil {
return nil
}
out := new(NamedClusterRoleBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedRole) DeepCopyInto(out *NamedRole) {
*out = *in
in.Role.DeepCopyInto(&out.Role)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRole.
func (in *NamedRole) DeepCopy() *NamedRole {
if in == nil {
return nil
}
out := new(NamedRole)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedRoleBinding) DeepCopyInto(out *NamedRoleBinding) {
*out = *in
in.RoleBinding.DeepCopyInto(&out.RoleBinding)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRoleBinding.
func (in *NamedRoleBinding) DeepCopy() *NamedRoleBinding {
if in == nil {
return nil
}
out := new(NamedRoleBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in OptionalNames) DeepCopyInto(out *OptionalNames) {
{
in := &in
*out = make(OptionalNames, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNames.
func (in OptionalNames) DeepCopy() OptionalNames {
if in == nil {
return nil
}
out := new(OptionalNames)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in OptionalScopes) DeepCopyInto(out *OptionalScopes) {
{
in := &in
*out = make(OptionalScopes, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalScopes.
func (in OptionalScopes) DeepCopy() OptionalScopes {
if in == nil {
return nil
}
out := new(OptionalScopes)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PolicyRule) DeepCopyInto(out *PolicyRule) {
*out = *in
if in.Verbs != nil {
in, out := &in.Verbs, &out.Verbs
*out = make([]string, len(*in))
copy(*out, *in)
}
in.AttributeRestrictions.DeepCopyInto(&out.AttributeRestrictions)
if in.APIGroups != nil {
in, out := &in.APIGroups, &out.APIGroups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ResourceNames != nil {
in, out := &in.ResourceNames, &out.ResourceNames
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.NonResourceURLsSlice != nil {
in, out := &in.NonResourceURLsSlice, &out.NonResourceURLsSlice
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule.
func (in *PolicyRule) DeepCopy() *PolicyRule {
if in == nil {
return nil
}
out := new(PolicyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceAccessReview) DeepCopyInto(out *ResourceAccessReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Action.DeepCopyInto(&out.Action)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAccessReview.
func (in *ResourceAccessReview) DeepCopy() *ResourceAccessReview {
if in == nil {
return nil
}
out := new(ResourceAccessReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceAccessReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceAccessReviewResponse) DeepCopyInto(out *ResourceAccessReviewResponse) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.UsersSlice != nil {
in, out := &in.UsersSlice, &out.UsersSlice
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.GroupsSlice != nil {
in, out := &in.GroupsSlice, &out.GroupsSlice
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAccessReviewResponse.
func (in *ResourceAccessReviewResponse) DeepCopy() *ResourceAccessReviewResponse {
if in == nil {
return nil
}
out := new(ResourceAccessReviewResponse)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceAccessReviewResponse) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Role) DeepCopyInto(out *Role) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]PolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role.
func (in *Role) DeepCopy() *Role {
if in == nil {
return nil
}
out := new(Role)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Role) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleBinding) DeepCopyInto(out *RoleBinding) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.UserNames != nil {
in, out := &in.UserNames, &out.UserNames
*out = make(OptionalNames, len(*in))
copy(*out, *in)
}
if in.GroupNames != nil {
in, out := &in.GroupNames, &out.GroupNames
*out = make(OptionalNames, len(*in))
copy(*out, *in)
}
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]corev1.ObjectReference, len(*in))
copy(*out, *in)
}
out.RoleRef = in.RoleRef
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding.
func (in *RoleBinding) DeepCopy() *RoleBinding {
if in == nil {
return nil
}
out := new(RoleBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RoleBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]RoleBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList.
func (in *RoleBindingList) DeepCopy() *RoleBindingList {
if in == nil {
return nil
}
out := new(RoleBindingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RoleBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleBindingRestriction) DeepCopyInto(out *RoleBindingRestriction) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestriction.
func (in *RoleBindingRestriction) DeepCopy() *RoleBindingRestriction {
if in == nil {
return nil
}
out := new(RoleBindingRestriction)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RoleBindingRestriction) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleBindingRestrictionList) DeepCopyInto(out *RoleBindingRestrictionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]RoleBindingRestriction, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestrictionList.
func (in *RoleBindingRestrictionList) DeepCopy() *RoleBindingRestrictionList {
if in == nil {
return nil
}
out := new(RoleBindingRestrictionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RoleBindingRestrictionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleBindingRestrictionSpec) DeepCopyInto(out *RoleBindingRestrictionSpec) {
*out = *in
if in.UserRestriction != nil {
in, out := &in.UserRestriction, &out.UserRestriction
*out = new(UserRestriction)
(*in).DeepCopyInto(*out)
}
if in.GroupRestriction != nil {
in, out := &in.GroupRestriction, &out.GroupRestriction
*out = new(GroupRestriction)
(*in).DeepCopyInto(*out)
}
if in.ServiceAccountRestriction != nil {
in, out := &in.ServiceAccountRestriction, &out.ServiceAccountRestriction
*out = new(ServiceAccountRestriction)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestrictionSpec.
func (in *RoleBindingRestrictionSpec) DeepCopy() *RoleBindingRestrictionSpec {
if in == nil {
return nil
}
out := new(RoleBindingRestrictionSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleList) DeepCopyInto(out *RoleList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Role, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList.
func (in *RoleList) DeepCopy() *RoleList {
if in == nil {
return nil
}
out := new(RoleList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RoleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SelfSubjectRulesReview) DeepCopyInto(out *SelfSubjectRulesReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReview.
func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview {
if in == nil {
return nil
}
out := new(SelfSubjectRulesReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SelfSubjectRulesReviewSpec) DeepCopyInto(out *SelfSubjectRulesReviewSpec) {
*out = *in
if in.Scopes != nil {
in, out := &in.Scopes, &out.Scopes
*out = make(OptionalScopes, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReviewSpec.
func (in *SelfSubjectRulesReviewSpec) DeepCopy() *SelfSubjectRulesReviewSpec {
if in == nil {
return nil
}
out := new(SelfSubjectRulesReviewSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAccountReference) DeepCopyInto(out *ServiceAccountReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountReference.
func (in *ServiceAccountReference) DeepCopy() *ServiceAccountReference {
if in == nil {
return nil
}
out := new(ServiceAccountReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAccountRestriction) DeepCopyInto(out *ServiceAccountRestriction) {
*out = *in
if in.ServiceAccounts != nil {
in, out := &in.ServiceAccounts, &out.ServiceAccounts
*out = make([]ServiceAccountReference, len(*in))
copy(*out, *in)
}
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountRestriction.
func (in *ServiceAccountRestriction) DeepCopy() *ServiceAccountRestriction {
if in == nil {
return nil
}
out := new(ServiceAccountRestriction)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SubjectAccessReview) DeepCopyInto(out *SubjectAccessReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Action.DeepCopyInto(&out.Action)
if in.GroupsSlice != nil {
in, out := &in.GroupsSlice, &out.GroupsSlice
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Scopes != nil {
in, out := &in.Scopes, &out.Scopes
*out = make(OptionalScopes, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReview.
func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview {
if in == nil {
return nil
}
out := new(SubjectAccessReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SubjectAccessReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SubjectAccessReviewResponse) DeepCopyInto(out *SubjectAccessReviewResponse) {
*out = *in
out.TypeMeta = in.TypeMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewResponse.
func (in *SubjectAccessReviewResponse) DeepCopy() *SubjectAccessReviewResponse {
if in == nil {
return nil
}
out := new(SubjectAccessReviewResponse)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SubjectAccessReviewResponse) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SubjectRulesReview) DeepCopyInto(out *SubjectRulesReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReview.
func (in *SubjectRulesReview) DeepCopy() *SubjectRulesReview {
if in == nil {
return nil
}
out := new(SubjectRulesReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SubjectRulesReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SubjectRulesReviewSpec) DeepCopyInto(out *SubjectRulesReviewSpec) {
*out = *in
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Scopes != nil {
in, out := &in.Scopes, &out.Scopes
*out = make(OptionalScopes, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewSpec.
func (in *SubjectRulesReviewSpec) DeepCopy() *SubjectRulesReviewSpec {
if in == nil {
return nil
}
out := new(SubjectRulesReviewSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SubjectRulesReviewStatus) DeepCopyInto(out *SubjectRulesReviewStatus) {
*out = *in
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]PolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewStatus.
func (in *SubjectRulesReviewStatus) DeepCopy() *SubjectRulesReviewStatus {
if in == nil {
return nil
}
out := new(SubjectRulesReviewStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UserRestriction) DeepCopyInto(out *UserRestriction) {
*out = *in
if in.Users != nil {
in, out := &in.Users, &out.Users
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Selectors != nil {
in, out := &in.Selectors, &out.Selectors
*out = make([]metav1.LabelSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserRestriction.
func (in *UserRestriction) DeepCopy() *UserRestriction {
if in == nil {
return nil
}
out := new(UserRestriction)
in.DeepCopyInto(out)
return out
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/authorization/v1/types.go | vendor/github.com/openshift/api/authorization/v1/types.go | package v1
import (
"fmt"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kruntime "k8s.io/apimachinery/pkg/runtime"
)
// Authorization is calculated against
// 1. all deny RoleBinding PolicyRules in the master namespace - short circuit on match
// 2. all allow RoleBinding PolicyRules in the master namespace - short circuit on match
// 3. all deny RoleBinding PolicyRules in the namespace - short circuit on match
// 4. all allow RoleBinding PolicyRules in the namespace - short circuit on match
// 5. deny by default
const (
// GroupKind is string representation of kind used in role binding subjects that represents the "group".
GroupKind = "Group"
// UserKind is string representation of kind used in role binding subjects that represents the "user".
UserKind = "User"
ScopesKey = "scopes.authorization.openshift.io"
)
// PolicyRule holds information that describes a policy rule, but does not contain information
// about who the rule applies to or which namespace the rule applies to.
type PolicyRule struct {
// Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
// AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports.
// If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.
// +kubebuilder:pruning:PreserveUnknownFields
AttributeRestrictions kruntime.RawExtension `json:"attributeRestrictions,omitempty" protobuf:"bytes,2,opt,name=attributeRestrictions"`
// APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed.
// That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request
// will be allowed
// +optional
// +nullable
APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"`
// Resources is a list of resources this rule applies to. ResourceAll represents all resources.
Resources []string `json:"resources" protobuf:"bytes,4,rep,name=resources"`
// ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"`
// NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path
// This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.
NonResourceURLsSlice []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type IsPersonalSubjectAccessReview struct {
metav1.TypeMeta `json:",inline"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type Role struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Rules holds all the PolicyRules for this Role
Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
}
// OptionalNames is an array that may also be left nil to distinguish between set and unset.
// +protobuf.nullable=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
type OptionalNames []string
func (t OptionalNames) String() string {
return fmt.Sprintf("%v", []string(t))
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace.
// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in.
// RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type RoleBinding struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// UserNames holds all the usernames directly bound to the role.
// This field should only be specified when supporting legacy clients and servers.
// See Subjects for further details.
// +k8s:conversion-gen=false
// +optional
UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"`
// GroupNames holds all the groups directly bound to the role.
// This field should only be specified when supporting legacy clients and servers.
// See Subjects for further details.
// +k8s:conversion-gen=false
// +optional
GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"`
// Subjects hold object references to authorize with this rule.
// This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers.
// Thus newer clients that do not need to support backwards compatibility should send
// only fully qualified Subjects and should omit the UserNames and GroupNames fields.
// Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.
Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"`
// RoleRef can only reference the current namespace and the global namespace.
// If the RoleRef cannot be resolved, the Authorizer must return an error.
// Since Policy is a singleton, this is sufficient knowledge to locate a role.
RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"`
}
// NamedRole relates a Role with a name
type NamedRole struct {
// Name is the name of the role
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Role is the role being named
Role Role `json:"role" protobuf:"bytes,2,opt,name=role"`
}
// NamedRoleBinding relates a role binding with a name
type NamedRoleBinding struct {
// Name is the name of the role binding
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// RoleBinding is the role binding being named
RoleBinding RoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"`
}
// +genclient
// +genclient:onlyVerbs=create
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type SelfSubjectRulesReview struct {
metav1.TypeMeta `json:",inline"`
// Spec adds information about how to conduct the check
Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"`
// Status is completed by the server to tell which permissions you have
Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// SelfSubjectRulesReviewSpec adds information about how to conduct the check
type SelfSubjectRulesReviewSpec struct {
// Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
// Nil means "use the scopes on this request".
// +k8s:conversion-gen=false
Scopes OptionalScopes `json:"scopes" protobuf:"bytes,1,rep,name=scopes"`
}
// +genclient
// +genclient:onlyVerbs=create
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type SubjectRulesReview struct {
metav1.TypeMeta `json:",inline"`
// Spec adds information about how to conduct the check
Spec SubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"`
// Status is completed by the server to tell which permissions you have
Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// SubjectRulesReviewSpec adds information about how to conduct the check
type SubjectRulesReviewSpec struct {
// User is optional. At least one of User and Groups must be specified.
User string `json:"user" protobuf:"bytes,1,opt,name=user"`
// Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.
Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"`
// Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
Scopes OptionalScopes `json:"scopes" protobuf:"bytes,3,opt,name=scopes"`
}
// SubjectRulesReviewStatus is contains the result of a rules check
type SubjectRulesReviewStatus struct {
// Rules is the list of rules (no particular sort) that are allowed for the subject
Rules []PolicyRule `json:"rules" protobuf:"bytes,1,rep,name=rules"`
// EvaluationError can appear in combination with Rules. It means some error happened during evaluation
// that may have prevented additional rules from being populated.
EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,2,opt,name=evaluationError"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceAccessReviewResponse describes who can perform the action
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ResourceAccessReviewResponse struct {
metav1.TypeMeta `json:",inline"`
// Namespace is the namespace used for the access review
Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
// UsersSlice is the list of users who can perform the action
// +k8s:conversion-gen=false
UsersSlice []string `json:"users" protobuf:"bytes,2,rep,name=users"`
// GroupsSlice is the list of groups who can perform the action
// +k8s:conversion-gen=false
GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"`
// EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned.
// It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is
// most common when a bound role is missing, but enough roles are still present and bound to reason about the request.
EvaluationError string `json:"evalutionError" protobuf:"bytes,4,opt,name=evalutionError"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:skipVerbs=apply,get,list,create,update,patch,delete,deleteCollection,watch
// +genclient:method=Create,verb=create,result=ResourceAccessReviewResponse
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the
// action specified by spec
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ResourceAccessReview struct {
metav1.TypeMeta `json:",inline"`
// Action describes the action being tested.
Action `json:",inline" protobuf:"bytes,1,opt,name=Action"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SubjectAccessReviewResponse describes whether or not a user or group can perform an action
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type SubjectAccessReviewResponse struct {
metav1.TypeMeta `json:",inline"`
// Namespace is the namespace used for the access review
Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
// Allowed is required. True if the action would be allowed, false otherwise.
Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"`
// Reason is optional. It indicates why a request was allowed or denied.
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// EvaluationError is an indication that some error occurred during the authorization check.
// It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is
// most common when a bound role is missing, but enough roles are still present and bound to reason about the request.
EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"`
}
// OptionalScopes is an array that may also be left nil to distinguish between set and unset.
// +protobuf.nullable=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
type OptionalScopes []string
func (t OptionalScopes) String() string {
return fmt.Sprintf("%v", []string(t))
}
// +genclient
// +genclient:nonNamespaced
// +genclient:skipVerbs=apply,get,list,create,update,patch,delete,deleteCollection,watch
// +genclient:method=Create,verb=create,result=SubjectAccessReviewResponse
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SubjectAccessReview is an object for requesting information about whether a user or group can perform an action
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type SubjectAccessReview struct {
metav1.TypeMeta `json:",inline"`
// Action describes the action being tested.
Action `json:",inline" protobuf:"bytes,1,opt,name=Action"`
// User is optional. If both User and Groups are empty, the current authenticated user is used.
User string `json:"user" protobuf:"bytes,2,opt,name=user"`
// GroupsSlice is optional. Groups is the list of groups to which the User belongs.
// +k8s:conversion-gen=false
GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"`
// Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
// Nil for a self-SAR, means "use the scopes on this request".
// Nil for a regular SAR, means the same as empty.
// +k8s:conversion-gen=false
Scopes OptionalScopes `json:"scopes" protobuf:"bytes,4,rep,name=scopes"`
}
// +genclient
// +genclient:skipVerbs=apply,get,list,create,update,patch,delete,deleteCollection,watch
// +genclient:method=Create,verb=create,result=ResourceAccessReviewResponse
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type LocalResourceAccessReview struct {
metav1.TypeMeta `json:",inline"`
// Action describes the action being tested. The Namespace element is FORCED to the current namespace.
Action `json:",inline" protobuf:"bytes,1,opt,name=Action"`
}
// +genclient
// +genclient:skipVerbs=apply,get,list,create,update,patch,delete,deleteCollection,watch
// +genclient:method=Create,verb=create,result=SubjectAccessReviewResponse
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type LocalSubjectAccessReview struct {
metav1.TypeMeta `json:",inline"`
// Action describes the action being tested. The Namespace element is FORCED to the current namespace.
Action `json:",inline" protobuf:"bytes,1,opt,name=Action"`
// User is optional. If both User and Groups are empty, the current authenticated user is used.
User string `json:"user" protobuf:"bytes,2,opt,name=user"`
// Groups is optional. Groups is the list of groups to which the User belongs.
// +k8s:conversion-gen=false
GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"`
// Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
// Nil for a self-SAR, means "use the scopes on this request".
// Nil for a regular SAR, means the same as empty.
// +k8s:conversion-gen=false
Scopes OptionalScopes `json:"scopes" protobuf:"bytes,4,rep,name=scopes"`
}
// Action describes a request to the API server
type Action struct {
// Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces
Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
// Verb is one of: get, list, watch, create, update, delete
Verb string `json:"verb" protobuf:"bytes,2,opt,name=verb"`
// Group is the API group of the resource
// Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined
Group string `json:"resourceAPIGroup" protobuf:"bytes,3,opt,name=resourceAPIGroup"`
// Version is the API version of the resource
// Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined
Version string `json:"resourceAPIVersion" protobuf:"bytes,4,opt,name=resourceAPIVersion"`
// Resource is one of the existing resource types
Resource string `json:"resource" protobuf:"bytes,5,opt,name=resource"`
// ResourceName is the name of the resource being requested for a "get" or deleted for a "delete"
ResourceName string `json:"resourceName" protobuf:"bytes,6,opt,name=resourceName"`
// Path is the path of a non resource URL
Path string `json:"path" protobuf:"bytes,8,opt,name=path"`
// IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)
IsNonResourceURL bool `json:"isNonResourceURL" protobuf:"varint,9,opt,name=isNonResourceURL"`
// Content is the actual content of the request for create and update
// +kubebuilder:pruning:PreserveUnknownFields
Content kruntime.RawExtension `json:"content,omitempty" protobuf:"bytes,7,opt,name=content"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RoleBindingList is a collection of RoleBindings
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type RoleBindingList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of RoleBindings
Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RoleList is a collection of Roles
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type RoleList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of Roles
Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ClusterRole struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Rules holds all the PolicyRules for this ClusterRole
Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
// AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
// If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be
// stomped by the controller.
AggregationRule *rbacv1.AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace.
// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in.
// ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ClusterRoleBinding struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// UserNames holds all the usernames directly bound to the role.
// This field should only be specified when supporting legacy clients and servers.
// See Subjects for further details.
// +k8s:conversion-gen=false
// +optional
UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"`
// GroupNames holds all the groups directly bound to the role.
// This field should only be specified when supporting legacy clients and servers.
// See Subjects for further details.
// +k8s:conversion-gen=false
// +optional
GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"`
// Subjects hold object references to authorize with this rule.
// This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers.
// Thus newer clients that do not need to support backwards compatibility should send
// only fully qualified Subjects and should omit the UserNames and GroupNames fields.
// Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.
Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"`
// RoleRef can only reference the current namespace and the global namespace.
// If the ClusterRoleRef cannot be resolved, the Authorizer must return an error.
// Since Policy is a singleton, this is sufficient knowledge to locate a role.
RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"`
}
// NamedClusterRole relates a name with a cluster role
type NamedClusterRole struct {
// Name is the name of the cluster role
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Role is the cluster role being named
Role ClusterRole `json:"role" protobuf:"bytes,2,opt,name=role"`
}
// NamedClusterRoleBinding relates a name with a cluster role binding
type NamedClusterRoleBinding struct {
// Name is the name of the cluster role binding
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// RoleBinding is the cluster role binding being named
RoleBinding ClusterRoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterRoleBindingList is a collection of ClusterRoleBindings
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ClusterRoleBindingList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of ClusterRoleBindings
Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterRoleList is a collection of ClusterRoles
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ClusterRoleList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of ClusterRoles
Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RoleBindingRestriction is an object that can be matched against a subject
// (user, group, or service account) to determine whether rolebindings on that
// subject are allowed in the namespace to which the RoleBindingRestriction
// belongs. If any one of those RoleBindingRestriction objects matches
// a subject, rolebindings on that subject in the namespace are allowed.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type RoleBindingRestriction struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the matcher.
Spec RoleBindingRestrictionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
}
// RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one
// field must be non-nil.
type RoleBindingRestrictionSpec struct {
// UserRestriction matches against user subjects.
// +nullable
UserRestriction *UserRestriction `json:"userrestriction" protobuf:"bytes,1,opt,name=userrestriction"`
// GroupRestriction matches against group subjects.
// +nullable
GroupRestriction *GroupRestriction `json:"grouprestriction" protobuf:"bytes,2,opt,name=grouprestriction"`
// ServiceAccountRestriction matches against service-account subjects.
// +nullable
ServiceAccountRestriction *ServiceAccountRestriction `json:"serviceaccountrestriction" protobuf:"bytes,3,opt,name=serviceaccountrestriction"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RoleBindingRestrictionList is a collection of RoleBindingRestriction objects.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type RoleBindingRestrictionList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of RoleBindingRestriction objects.
Items []RoleBindingRestriction `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// UserRestriction matches a user either by a string match on the user name,
// a string match on the name of a group to which the user belongs, or a label
// selector applied to the user labels.
type UserRestriction struct {
// Users specifies a list of literal user names.
Users []string `json:"users" protobuf:"bytes,1,rep,name=users"`
// Groups specifies a list of literal group names.
// +nullable
Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"`
// Selectors specifies a list of label selectors over user labels.
// +nullable
Selectors []metav1.LabelSelector `json:"labels" protobuf:"bytes,3,rep,name=labels"`
}
// GroupRestriction matches a group either by a string match on the group name
// or a label selector applied to group labels.
type GroupRestriction struct {
// Groups is a list of groups used to match against an individual user's
// groups. If the user is a member of one of the whitelisted groups, the user
// is allowed to be bound to a role.
// +nullable
Groups []string `json:"groups" protobuf:"bytes,1,rep,name=groups"`
// Selectors specifies a list of label selectors over group labels.
// +nullable
Selectors []metav1.LabelSelector `json:"labels" protobuf:"bytes,2,rep,name=labels"`
}
// ServiceAccountRestriction matches a service account by a string match on
// either the service-account name or the name of the service account's
// namespace.
type ServiceAccountRestriction struct {
// ServiceAccounts specifies a list of literal service-account names.
ServiceAccounts []ServiceAccountReference `json:"serviceaccounts" protobuf:"bytes,1,rep,name=serviceaccounts"`
// Namespaces specifies a list of literal namespace names.
Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"`
}
// ServiceAccountReference specifies a service account and namespace by their
// names.
type ServiceAccountReference struct {
// Name is the name of the service account.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Namespace is the namespace of the service account. Service accounts from
// inside the whitelisted namespaces are allowed to be bound to roles. If
// Namespace is empty, then the namespace of the RoleBindingRestriction in
// which the ServiceAccountReference is embedded is used.
Namespace string `json:"namespace" protobuf:"bytes,2,opt,name=namespace"`
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/authorization/v1/register.go | vendor/github.com/openshift/api/authorization/v1/register.go | package v1
import (
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
GroupName = "authorization.openshift.io"
GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme, rbacv1.AddToScheme)
// Install is a function which adds this version to a scheme
Install = schemeBuilder.AddToScheme
// SchemeGroupVersion generated code relies on this name
// Deprecated
SchemeGroupVersion = GroupVersion
// AddToScheme exists solely to keep the old generators creating valid code
// DEPRECATED
AddToScheme = schemeBuilder.AddToScheme
)
// Resource generated code relies on this being here, but it logically belongs to the group
// DEPRECATED
func Resource(resource string) schema.GroupResource {
return schema.GroupResource{Group: GroupName, Resource: resource}
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(GroupVersion,
&Role{},
&RoleBinding{},
&RoleBindingList{},
&RoleList{},
&SelfSubjectRulesReview{},
&SubjectRulesReview{},
&ResourceAccessReview{},
&SubjectAccessReview{},
&LocalResourceAccessReview{},
&LocalSubjectAccessReview{},
&ResourceAccessReviewResponse{},
&SubjectAccessReviewResponse{},
&IsPersonalSubjectAccessReview{},
&ClusterRole{},
&ClusterRoleBinding{},
&ClusterRoleBindingList{},
&ClusterRoleList{},
&RoleBindingRestriction{},
&RoleBindingRestrictionList{},
)
metav1.AddToGroupVersion(scheme, GroupVersion)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go | vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go | package v1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE
var map_Action = map[string]string{
"": "Action describes a request to the API server",
"namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces",
"verb": "Verb is one of: get, list, watch, create, update, delete",
"resourceAPIGroup": "Group is the API group of the resource Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined",
"resourceAPIVersion": "Version is the API version of the resource Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined",
"resource": "Resource is one of the existing resource types",
"resourceName": "ResourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"",
"path": "Path is the path of a non resource URL",
"isNonResourceURL": "IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)",
"content": "Content is the actual content of the request for create and update",
}
func (Action) SwaggerDoc() map[string]string {
return map_Action
}
var map_ClusterRole = map[string]string{
"": "ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"rules": "Rules holds all the PolicyRules for this ClusterRole",
"aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.",
}
func (ClusterRole) SwaggerDoc() map[string]string {
return map_ClusterRole
}
var map_ClusterRoleBinding = map[string]string{
"": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"userNames": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
"groupNames": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
"subjects": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.",
"roleRef": "RoleRef can only reference the current namespace and the global namespace. If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.",
}
func (ClusterRoleBinding) SwaggerDoc() map[string]string {
return map_ClusterRoleBinding
}
var map_ClusterRoleBindingList = map[string]string{
"": "ClusterRoleBindingList is a collection of ClusterRoleBindings\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is a list of ClusterRoleBindings",
}
func (ClusterRoleBindingList) SwaggerDoc() map[string]string {
return map_ClusterRoleBindingList
}
var map_ClusterRoleList = map[string]string{
"": "ClusterRoleList is a collection of ClusterRoles\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is a list of ClusterRoles",
}
func (ClusterRoleList) SwaggerDoc() map[string]string {
return map_ClusterRoleList
}
var map_GroupRestriction = map[string]string{
"": "GroupRestriction matches a group either by a string match on the group name or a label selector applied to group labels.",
"groups": "Groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role.",
"labels": "Selectors specifies a list of label selectors over group labels.",
}
func (GroupRestriction) SwaggerDoc() map[string]string {
return map_GroupRestriction
}
var map_IsPersonalSubjectAccessReview = map[string]string{
"": "IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
}
func (IsPersonalSubjectAccessReview) SwaggerDoc() map[string]string {
return map_IsPersonalSubjectAccessReview
}
var map_LocalResourceAccessReview = map[string]string{
"": "LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
}
func (LocalResourceAccessReview) SwaggerDoc() map[string]string {
return map_LocalResourceAccessReview
}
var map_LocalSubjectAccessReview = map[string]string{
"": "LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"user": "User is optional. If both User and Groups are empty, the current authenticated user is used.",
"groups": "Groups is optional. Groups is the list of groups to which the User belongs.",
"scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.",
}
func (LocalSubjectAccessReview) SwaggerDoc() map[string]string {
return map_LocalSubjectAccessReview
}
var map_NamedClusterRole = map[string]string{
"": "NamedClusterRole relates a name with a cluster role",
"name": "Name is the name of the cluster role",
"role": "Role is the cluster role being named",
}
func (NamedClusterRole) SwaggerDoc() map[string]string {
return map_NamedClusterRole
}
var map_NamedClusterRoleBinding = map[string]string{
"": "NamedClusterRoleBinding relates a name with a cluster role binding",
"name": "Name is the name of the cluster role binding",
"roleBinding": "RoleBinding is the cluster role binding being named",
}
func (NamedClusterRoleBinding) SwaggerDoc() map[string]string {
return map_NamedClusterRoleBinding
}
var map_NamedRole = map[string]string{
"": "NamedRole relates a Role with a name",
"name": "Name is the name of the role",
"role": "Role is the role being named",
}
func (NamedRole) SwaggerDoc() map[string]string {
return map_NamedRole
}
var map_NamedRoleBinding = map[string]string{
"": "NamedRoleBinding relates a role binding with a name",
"name": "Name is the name of the role binding",
"roleBinding": "RoleBinding is the role binding being named",
}
func (NamedRoleBinding) SwaggerDoc() map[string]string {
return map_NamedRoleBinding
}
var map_PolicyRule = map[string]string{
"": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.",
"verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.",
"attributeRestrictions": "AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.",
"apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed",
"resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.",
"resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
"nonResourceURLs": "NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.",
}
func (PolicyRule) SwaggerDoc() map[string]string {
return map_PolicyRule
}
var map_ResourceAccessReview = map[string]string{
"": "ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
}
func (ResourceAccessReview) SwaggerDoc() map[string]string {
return map_ResourceAccessReview
}
var map_ResourceAccessReviewResponse = map[string]string{
"": "ResourceAccessReviewResponse describes who can perform the action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"namespace": "Namespace is the namespace used for the access review",
"users": "UsersSlice is the list of users who can perform the action",
"groups": "GroupsSlice is the list of groups who can perform the action",
"evalutionError": "EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.",
}
func (ResourceAccessReviewResponse) SwaggerDoc() map[string]string {
return map_ResourceAccessReviewResponse
}
var map_Role = map[string]string{
"": "Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"rules": "Rules holds all the PolicyRules for this Role",
}
func (Role) SwaggerDoc() map[string]string {
return map_Role
}
var map_RoleBinding = map[string]string{
"": "RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"userNames": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
"groupNames": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
"subjects": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.",
"roleRef": "RoleRef can only reference the current namespace and the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.",
}
func (RoleBinding) SwaggerDoc() map[string]string {
return map_RoleBinding
}
var map_RoleBindingList = map[string]string{
"": "RoleBindingList is a collection of RoleBindings\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is a list of RoleBindings",
}
func (RoleBindingList) SwaggerDoc() map[string]string {
return map_RoleBindingList
}
var map_RoleBindingRestriction = map[string]string{
"": "RoleBindingRestriction is an object that can be matched against a subject (user, group, or service account) to determine whether rolebindings on that subject are allowed in the namespace to which the RoleBindingRestriction belongs. If any one of those RoleBindingRestriction objects matches a subject, rolebindings on that subject in the namespace are allowed.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"spec": "Spec defines the matcher.",
}
func (RoleBindingRestriction) SwaggerDoc() map[string]string {
return map_RoleBindingRestriction
}
var map_RoleBindingRestrictionList = map[string]string{
"": "RoleBindingRestrictionList is a collection of RoleBindingRestriction objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is a list of RoleBindingRestriction objects.",
}
func (RoleBindingRestrictionList) SwaggerDoc() map[string]string {
return map_RoleBindingRestrictionList
}
var map_RoleBindingRestrictionSpec = map[string]string{
"": "RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one field must be non-nil.",
"userrestriction": "UserRestriction matches against user subjects.",
"grouprestriction": "GroupRestriction matches against group subjects.",
"serviceaccountrestriction": "ServiceAccountRestriction matches against service-account subjects.",
}
func (RoleBindingRestrictionSpec) SwaggerDoc() map[string]string {
return map_RoleBindingRestrictionSpec
}
var map_RoleList = map[string]string{
"": "RoleList is a collection of Roles\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is a list of Roles",
}
func (RoleList) SwaggerDoc() map[string]string {
return map_RoleList
}
var map_SelfSubjectRulesReview = map[string]string{
"": "SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"spec": "Spec adds information about how to conduct the check",
"status": "Status is completed by the server to tell which permissions you have",
}
func (SelfSubjectRulesReview) SwaggerDoc() map[string]string {
return map_SelfSubjectRulesReview
}
var map_SelfSubjectRulesReviewSpec = map[string]string{
"": "SelfSubjectRulesReviewSpec adds information about how to conduct the check",
"scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil means \"use the scopes on this request\".",
}
func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string {
return map_SelfSubjectRulesReviewSpec
}
var map_ServiceAccountReference = map[string]string{
"": "ServiceAccountReference specifies a service account and namespace by their names.",
"name": "Name is the name of the service account.",
"namespace": "Namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used.",
}
func (ServiceAccountReference) SwaggerDoc() map[string]string {
return map_ServiceAccountReference
}
var map_ServiceAccountRestriction = map[string]string{
"": "ServiceAccountRestriction matches a service account by a string match on either the service-account name or the name of the service account's namespace.",
"serviceaccounts": "ServiceAccounts specifies a list of literal service-account names.",
"namespaces": "Namespaces specifies a list of literal namespace names.",
}
func (ServiceAccountRestriction) SwaggerDoc() map[string]string {
return map_ServiceAccountRestriction
}
var map_SubjectAccessReview = map[string]string{
"": "SubjectAccessReview is an object for requesting information about whether a user or group can perform an action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"user": "User is optional. If both User and Groups are empty, the current authenticated user is used.",
"groups": "GroupsSlice is optional. Groups is the list of groups to which the User belongs.",
"scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.",
}
func (SubjectAccessReview) SwaggerDoc() map[string]string {
return map_SubjectAccessReview
}
var map_SubjectAccessReviewResponse = map[string]string{
"": "SubjectAccessReviewResponse describes whether or not a user or group can perform an action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"namespace": "Namespace is the namespace used for the access review",
"allowed": "Allowed is required. True if the action would be allowed, false otherwise.",
"reason": "Reason is optional. It indicates why a request was allowed or denied.",
"evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.",
}
func (SubjectAccessReviewResponse) SwaggerDoc() map[string]string {
return map_SubjectAccessReviewResponse
}
var map_SubjectRulesReview = map[string]string{
"": "SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"spec": "Spec adds information about how to conduct the check",
"status": "Status is completed by the server to tell which permissions you have",
}
func (SubjectRulesReview) SwaggerDoc() map[string]string {
return map_SubjectRulesReview
}
var map_SubjectRulesReviewSpec = map[string]string{
"": "SubjectRulesReviewSpec adds information about how to conduct the check",
"user": "User is optional. At least one of User and Groups must be specified.",
"groups": "Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.",
"scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\".",
}
func (SubjectRulesReviewSpec) SwaggerDoc() map[string]string {
return map_SubjectRulesReviewSpec
}
var map_SubjectRulesReviewStatus = map[string]string{
"": "SubjectRulesReviewStatus is contains the result of a rules check",
"rules": "Rules is the list of rules (no particular sort) that are allowed for the subject",
"evaluationError": "EvaluationError can appear in combination with Rules. It means some error happened during evaluation that may have prevented additional rules from being populated.",
}
func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string {
return map_SubjectRulesReviewStatus
}
var map_UserRestriction = map[string]string{
"": "UserRestriction matches a user either by a string match on the user name, a string match on the name of a group to which the user belongs, or a label selector applied to the user labels.",
"users": "Users specifies a list of literal user names.",
"groups": "Groups specifies a list of literal group names.",
"labels": "Selectors specifies a list of label selectors over user labels.",
}
func (UserRestriction) SwaggerDoc() map[string]string {
return map_UserRestriction
}
// AUTO-GENERATED FUNCTIONS END HERE
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/authorization/v1/doc.go | vendor/github.com/openshift/api/authorization/v1/doc.go | // +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/origin/pkg/authorization/apis/authorization
// +k8s:defaulter-gen=TypeMeta
// +k8s:openapi-gen=true
// +kubebuilder:validation:Optional
// +groupName=authorization.openshift.io
// Package v1 is the v1 version of the API.
package v1
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/authorization/v1/generated.pb.go | vendor/github.com/openshift/api/authorization/v1/generated.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/openshift/api/authorization/v1/generated.proto
package v1
import (
fmt "fmt"
io "io"
proto "github.com/gogo/protobuf/proto"
v12 "k8s.io/api/core/v1"
v11 "k8s.io/api/rbac/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *Action) Reset() { *m = Action{} }
func (*Action) ProtoMessage() {}
func (*Action) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{0}
}
func (m *Action) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Action) XXX_Merge(src proto.Message) {
xxx_messageInfo_Action.Merge(m, src)
}
func (m *Action) XXX_Size() int {
return m.Size()
}
func (m *Action) XXX_DiscardUnknown() {
xxx_messageInfo_Action.DiscardUnknown(m)
}
var xxx_messageInfo_Action proto.InternalMessageInfo
func (m *ClusterRole) Reset() { *m = ClusterRole{} }
func (*ClusterRole) ProtoMessage() {}
func (*ClusterRole) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{1}
}
func (m *ClusterRole) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ClusterRole) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterRole.Merge(m, src)
}
func (m *ClusterRole) XXX_Size() int {
return m.Size()
}
func (m *ClusterRole) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterRole.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterRole proto.InternalMessageInfo
func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} }
func (*ClusterRoleBinding) ProtoMessage() {}
func (*ClusterRoleBinding) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{2}
}
func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterRoleBinding.Merge(m, src)
}
func (m *ClusterRoleBinding) XXX_Size() int {
return m.Size()
}
func (m *ClusterRoleBinding) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo
func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} }
func (*ClusterRoleBindingList) ProtoMessage() {}
func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{3}
}
func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterRoleBindingList.Merge(m, src)
}
func (m *ClusterRoleBindingList) XXX_Size() int {
return m.Size()
}
func (m *ClusterRoleBindingList) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo
func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} }
func (*ClusterRoleList) ProtoMessage() {}
func (*ClusterRoleList) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{4}
}
func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ClusterRoleList) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterRoleList.Merge(m, src)
}
func (m *ClusterRoleList) XXX_Size() int {
return m.Size()
}
func (m *ClusterRoleList) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterRoleList.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo
func (m *GroupRestriction) Reset() { *m = GroupRestriction{} }
func (*GroupRestriction) ProtoMessage() {}
func (*GroupRestriction) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{5}
}
func (m *GroupRestriction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GroupRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *GroupRestriction) XXX_Merge(src proto.Message) {
xxx_messageInfo_GroupRestriction.Merge(m, src)
}
func (m *GroupRestriction) XXX_Size() int {
return m.Size()
}
func (m *GroupRestriction) XXX_DiscardUnknown() {
xxx_messageInfo_GroupRestriction.DiscardUnknown(m)
}
var xxx_messageInfo_GroupRestriction proto.InternalMessageInfo
func (m *IsPersonalSubjectAccessReview) Reset() { *m = IsPersonalSubjectAccessReview{} }
func (*IsPersonalSubjectAccessReview) ProtoMessage() {}
func (*IsPersonalSubjectAccessReview) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{6}
}
func (m *IsPersonalSubjectAccessReview) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *IsPersonalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *IsPersonalSubjectAccessReview) XXX_Merge(src proto.Message) {
xxx_messageInfo_IsPersonalSubjectAccessReview.Merge(m, src)
}
func (m *IsPersonalSubjectAccessReview) XXX_Size() int {
return m.Size()
}
func (m *IsPersonalSubjectAccessReview) XXX_DiscardUnknown() {
xxx_messageInfo_IsPersonalSubjectAccessReview.DiscardUnknown(m)
}
var xxx_messageInfo_IsPersonalSubjectAccessReview proto.InternalMessageInfo
func (m *LocalResourceAccessReview) Reset() { *m = LocalResourceAccessReview{} }
func (*LocalResourceAccessReview) ProtoMessage() {}
func (*LocalResourceAccessReview) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{7}
}
func (m *LocalResourceAccessReview) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LocalResourceAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *LocalResourceAccessReview) XXX_Merge(src proto.Message) {
xxx_messageInfo_LocalResourceAccessReview.Merge(m, src)
}
func (m *LocalResourceAccessReview) XXX_Size() int {
return m.Size()
}
func (m *LocalResourceAccessReview) XXX_DiscardUnknown() {
xxx_messageInfo_LocalResourceAccessReview.DiscardUnknown(m)
}
var xxx_messageInfo_LocalResourceAccessReview proto.InternalMessageInfo
func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} }
func (*LocalSubjectAccessReview) ProtoMessage() {}
func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{8}
}
func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) {
xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src)
}
func (m *LocalSubjectAccessReview) XXX_Size() int {
return m.Size()
}
func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() {
xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m)
}
var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo
func (m *NamedClusterRole) Reset() { *m = NamedClusterRole{} }
func (*NamedClusterRole) ProtoMessage() {}
func (*NamedClusterRole) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{9}
}
func (m *NamedClusterRole) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NamedClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NamedClusterRole) XXX_Merge(src proto.Message) {
xxx_messageInfo_NamedClusterRole.Merge(m, src)
}
func (m *NamedClusterRole) XXX_Size() int {
return m.Size()
}
func (m *NamedClusterRole) XXX_DiscardUnknown() {
xxx_messageInfo_NamedClusterRole.DiscardUnknown(m)
}
var xxx_messageInfo_NamedClusterRole proto.InternalMessageInfo
func (m *NamedClusterRoleBinding) Reset() { *m = NamedClusterRoleBinding{} }
func (*NamedClusterRoleBinding) ProtoMessage() {}
func (*NamedClusterRoleBinding) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{10}
}
func (m *NamedClusterRoleBinding) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NamedClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NamedClusterRoleBinding) XXX_Merge(src proto.Message) {
xxx_messageInfo_NamedClusterRoleBinding.Merge(m, src)
}
func (m *NamedClusterRoleBinding) XXX_Size() int {
return m.Size()
}
func (m *NamedClusterRoleBinding) XXX_DiscardUnknown() {
xxx_messageInfo_NamedClusterRoleBinding.DiscardUnknown(m)
}
var xxx_messageInfo_NamedClusterRoleBinding proto.InternalMessageInfo
func (m *NamedRole) Reset() { *m = NamedRole{} }
func (*NamedRole) ProtoMessage() {}
func (*NamedRole) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{11}
}
func (m *NamedRole) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NamedRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NamedRole) XXX_Merge(src proto.Message) {
xxx_messageInfo_NamedRole.Merge(m, src)
}
func (m *NamedRole) XXX_Size() int {
return m.Size()
}
func (m *NamedRole) XXX_DiscardUnknown() {
xxx_messageInfo_NamedRole.DiscardUnknown(m)
}
var xxx_messageInfo_NamedRole proto.InternalMessageInfo
func (m *NamedRoleBinding) Reset() { *m = NamedRoleBinding{} }
func (*NamedRoleBinding) ProtoMessage() {}
func (*NamedRoleBinding) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{12}
}
func (m *NamedRoleBinding) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NamedRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NamedRoleBinding) XXX_Merge(src proto.Message) {
xxx_messageInfo_NamedRoleBinding.Merge(m, src)
}
func (m *NamedRoleBinding) XXX_Size() int {
return m.Size()
}
func (m *NamedRoleBinding) XXX_DiscardUnknown() {
xxx_messageInfo_NamedRoleBinding.DiscardUnknown(m)
}
var xxx_messageInfo_NamedRoleBinding proto.InternalMessageInfo
func (m *OptionalNames) Reset() { *m = OptionalNames{} }
func (*OptionalNames) ProtoMessage() {}
func (*OptionalNames) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{13}
}
func (m *OptionalNames) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *OptionalNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *OptionalNames) XXX_Merge(src proto.Message) {
xxx_messageInfo_OptionalNames.Merge(m, src)
}
func (m *OptionalNames) XXX_Size() int {
return m.Size()
}
func (m *OptionalNames) XXX_DiscardUnknown() {
xxx_messageInfo_OptionalNames.DiscardUnknown(m)
}
var xxx_messageInfo_OptionalNames proto.InternalMessageInfo
func (m *OptionalScopes) Reset() { *m = OptionalScopes{} }
func (*OptionalScopes) ProtoMessage() {}
func (*OptionalScopes) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{14}
}
func (m *OptionalScopes) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *OptionalScopes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *OptionalScopes) XXX_Merge(src proto.Message) {
xxx_messageInfo_OptionalScopes.Merge(m, src)
}
func (m *OptionalScopes) XXX_Size() int {
return m.Size()
}
func (m *OptionalScopes) XXX_DiscardUnknown() {
xxx_messageInfo_OptionalScopes.DiscardUnknown(m)
}
var xxx_messageInfo_OptionalScopes proto.InternalMessageInfo
func (m *PolicyRule) Reset() { *m = PolicyRule{} }
func (*PolicyRule) ProtoMessage() {}
func (*PolicyRule) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{15}
}
func (m *PolicyRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *PolicyRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_PolicyRule.Merge(m, src)
}
func (m *PolicyRule) XXX_Size() int {
return m.Size()
}
func (m *PolicyRule) XXX_DiscardUnknown() {
xxx_messageInfo_PolicyRule.DiscardUnknown(m)
}
var xxx_messageInfo_PolicyRule proto.InternalMessageInfo
func (m *ResourceAccessReview) Reset() { *m = ResourceAccessReview{} }
func (*ResourceAccessReview) ProtoMessage() {}
func (*ResourceAccessReview) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{16}
}
func (m *ResourceAccessReview) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ResourceAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ResourceAccessReview) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResourceAccessReview.Merge(m, src)
}
func (m *ResourceAccessReview) XXX_Size() int {
return m.Size()
}
func (m *ResourceAccessReview) XXX_DiscardUnknown() {
xxx_messageInfo_ResourceAccessReview.DiscardUnknown(m)
}
var xxx_messageInfo_ResourceAccessReview proto.InternalMessageInfo
func (m *ResourceAccessReviewResponse) Reset() { *m = ResourceAccessReviewResponse{} }
func (*ResourceAccessReviewResponse) ProtoMessage() {}
func (*ResourceAccessReviewResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{17}
}
func (m *ResourceAccessReviewResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ResourceAccessReviewResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ResourceAccessReviewResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResourceAccessReviewResponse.Merge(m, src)
}
func (m *ResourceAccessReviewResponse) XXX_Size() int {
return m.Size()
}
func (m *ResourceAccessReviewResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ResourceAccessReviewResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ResourceAccessReviewResponse proto.InternalMessageInfo
func (m *Role) Reset() { *m = Role{} }
func (*Role) ProtoMessage() {}
func (*Role) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{18}
}
func (m *Role) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Role) XXX_Merge(src proto.Message) {
xxx_messageInfo_Role.Merge(m, src)
}
func (m *Role) XXX_Size() int {
return m.Size()
}
func (m *Role) XXX_DiscardUnknown() {
xxx_messageInfo_Role.DiscardUnknown(m)
}
var xxx_messageInfo_Role proto.InternalMessageInfo
func (m *RoleBinding) Reset() { *m = RoleBinding{} }
func (*RoleBinding) ProtoMessage() {}
func (*RoleBinding) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{19}
}
func (m *RoleBinding) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RoleBinding) XXX_Merge(src proto.Message) {
xxx_messageInfo_RoleBinding.Merge(m, src)
}
func (m *RoleBinding) XXX_Size() int {
return m.Size()
}
func (m *RoleBinding) XXX_DiscardUnknown() {
xxx_messageInfo_RoleBinding.DiscardUnknown(m)
}
var xxx_messageInfo_RoleBinding proto.InternalMessageInfo
func (m *RoleBindingList) Reset() { *m = RoleBindingList{} }
func (*RoleBindingList) ProtoMessage() {}
func (*RoleBindingList) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{20}
}
func (m *RoleBindingList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RoleBindingList) XXX_Merge(src proto.Message) {
xxx_messageInfo_RoleBindingList.Merge(m, src)
}
func (m *RoleBindingList) XXX_Size() int {
return m.Size()
}
func (m *RoleBindingList) XXX_DiscardUnknown() {
xxx_messageInfo_RoleBindingList.DiscardUnknown(m)
}
var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo
func (m *RoleBindingRestriction) Reset() { *m = RoleBindingRestriction{} }
func (*RoleBindingRestriction) ProtoMessage() {}
func (*RoleBindingRestriction) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{21}
}
func (m *RoleBindingRestriction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RoleBindingRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RoleBindingRestriction) XXX_Merge(src proto.Message) {
xxx_messageInfo_RoleBindingRestriction.Merge(m, src)
}
func (m *RoleBindingRestriction) XXX_Size() int {
return m.Size()
}
func (m *RoleBindingRestriction) XXX_DiscardUnknown() {
xxx_messageInfo_RoleBindingRestriction.DiscardUnknown(m)
}
var xxx_messageInfo_RoleBindingRestriction proto.InternalMessageInfo
func (m *RoleBindingRestrictionList) Reset() { *m = RoleBindingRestrictionList{} }
func (*RoleBindingRestrictionList) ProtoMessage() {}
func (*RoleBindingRestrictionList) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{22}
}
func (m *RoleBindingRestrictionList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RoleBindingRestrictionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RoleBindingRestrictionList) XXX_Merge(src proto.Message) {
xxx_messageInfo_RoleBindingRestrictionList.Merge(m, src)
}
func (m *RoleBindingRestrictionList) XXX_Size() int {
return m.Size()
}
func (m *RoleBindingRestrictionList) XXX_DiscardUnknown() {
xxx_messageInfo_RoleBindingRestrictionList.DiscardUnknown(m)
}
var xxx_messageInfo_RoleBindingRestrictionList proto.InternalMessageInfo
func (m *RoleBindingRestrictionSpec) Reset() { *m = RoleBindingRestrictionSpec{} }
func (*RoleBindingRestrictionSpec) ProtoMessage() {}
func (*RoleBindingRestrictionSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{23}
}
func (m *RoleBindingRestrictionSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RoleBindingRestrictionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RoleBindingRestrictionSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_RoleBindingRestrictionSpec.Merge(m, src)
}
func (m *RoleBindingRestrictionSpec) XXX_Size() int {
return m.Size()
}
func (m *RoleBindingRestrictionSpec) XXX_DiscardUnknown() {
xxx_messageInfo_RoleBindingRestrictionSpec.DiscardUnknown(m)
}
var xxx_messageInfo_RoleBindingRestrictionSpec proto.InternalMessageInfo
func (m *RoleList) Reset() { *m = RoleList{} }
func (*RoleList) ProtoMessage() {}
func (*RoleList) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{24}
}
func (m *RoleList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RoleList) XXX_Merge(src proto.Message) {
xxx_messageInfo_RoleList.Merge(m, src)
}
func (m *RoleList) XXX_Size() int {
return m.Size()
}
func (m *RoleList) XXX_DiscardUnknown() {
xxx_messageInfo_RoleList.DiscardUnknown(m)
}
var xxx_messageInfo_RoleList proto.InternalMessageInfo
func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} }
func (*SelfSubjectRulesReview) ProtoMessage() {}
func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{25}
}
func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) {
xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src)
}
func (m *SelfSubjectRulesReview) XXX_Size() int {
return m.Size()
}
func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() {
xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m)
}
var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo
func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} }
func (*SelfSubjectRulesReviewSpec) ProtoMessage() {}
func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{26}
}
func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src)
}
func (m *SelfSubjectRulesReviewSpec) XXX_Size() int {
return m.Size()
}
func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() {
xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m)
}
var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo
func (m *ServiceAccountReference) Reset() { *m = ServiceAccountReference{} }
func (*ServiceAccountReference) ProtoMessage() {}
func (*ServiceAccountReference) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{27}
}
func (m *ServiceAccountReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ServiceAccountReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ServiceAccountReference) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServiceAccountReference.Merge(m, src)
}
func (m *ServiceAccountReference) XXX_Size() int {
return m.Size()
}
func (m *ServiceAccountReference) XXX_DiscardUnknown() {
xxx_messageInfo_ServiceAccountReference.DiscardUnknown(m)
}
var xxx_messageInfo_ServiceAccountReference proto.InternalMessageInfo
func (m *ServiceAccountRestriction) Reset() { *m = ServiceAccountRestriction{} }
func (*ServiceAccountRestriction) ProtoMessage() {}
func (*ServiceAccountRestriction) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{28}
}
func (m *ServiceAccountRestriction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ServiceAccountRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ServiceAccountRestriction) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServiceAccountRestriction.Merge(m, src)
}
func (m *ServiceAccountRestriction) XXX_Size() int {
return m.Size()
}
func (m *ServiceAccountRestriction) XXX_DiscardUnknown() {
xxx_messageInfo_ServiceAccountRestriction.DiscardUnknown(m)
}
var xxx_messageInfo_ServiceAccountRestriction proto.InternalMessageInfo
func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} }
func (*SubjectAccessReview) ProtoMessage() {}
func (*SubjectAccessReview) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{29}
}
func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SubjectAccessReview) XXX_Merge(src proto.Message) {
xxx_messageInfo_SubjectAccessReview.Merge(m, src)
}
func (m *SubjectAccessReview) XXX_Size() int {
return m.Size()
}
func (m *SubjectAccessReview) XXX_DiscardUnknown() {
xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m)
}
var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo
func (m *SubjectAccessReviewResponse) Reset() { *m = SubjectAccessReviewResponse{} }
func (*SubjectAccessReviewResponse) ProtoMessage() {}
func (*SubjectAccessReviewResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{30}
}
func (m *SubjectAccessReviewResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SubjectAccessReviewResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SubjectAccessReviewResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SubjectAccessReviewResponse.Merge(m, src)
}
func (m *SubjectAccessReviewResponse) XXX_Size() int {
return m.Size()
}
func (m *SubjectAccessReviewResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SubjectAccessReviewResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SubjectAccessReviewResponse proto.InternalMessageInfo
func (m *SubjectRulesReview) Reset() { *m = SubjectRulesReview{} }
func (*SubjectRulesReview) ProtoMessage() {}
func (*SubjectRulesReview) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{31}
}
func (m *SubjectRulesReview) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SubjectRulesReview) XXX_Merge(src proto.Message) {
xxx_messageInfo_SubjectRulesReview.Merge(m, src)
}
func (m *SubjectRulesReview) XXX_Size() int {
return m.Size()
}
func (m *SubjectRulesReview) XXX_DiscardUnknown() {
xxx_messageInfo_SubjectRulesReview.DiscardUnknown(m)
}
var xxx_messageInfo_SubjectRulesReview proto.InternalMessageInfo
func (m *SubjectRulesReviewSpec) Reset() { *m = SubjectRulesReviewSpec{} }
func (*SubjectRulesReviewSpec) ProtoMessage() {}
func (*SubjectRulesReviewSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{32}
}
func (m *SubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SubjectRulesReviewSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_SubjectRulesReviewSpec.Merge(m, src)
}
func (m *SubjectRulesReviewSpec) XXX_Size() int {
return m.Size()
}
func (m *SubjectRulesReviewSpec) XXX_DiscardUnknown() {
xxx_messageInfo_SubjectRulesReviewSpec.DiscardUnknown(m)
}
var xxx_messageInfo_SubjectRulesReviewSpec proto.InternalMessageInfo
func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} }
func (*SubjectRulesReviewStatus) ProtoMessage() {}
func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{33}
}
func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src)
}
func (m *SubjectRulesReviewStatus) XXX_Size() int {
return m.Size()
}
func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() {
xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m)
}
var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo
func (m *UserRestriction) Reset() { *m = UserRestriction{} }
func (*UserRestriction) ProtoMessage() {}
func (*UserRestriction) Descriptor() ([]byte, []int) {
return fileDescriptor_39b89822f939ca46, []int{34}
}
func (m *UserRestriction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *UserRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *UserRestriction) XXX_Merge(src proto.Message) {
xxx_messageInfo_UserRestriction.Merge(m, src)
}
func (m *UserRestriction) XXX_Size() int {
return m.Size()
}
func (m *UserRestriction) XXX_DiscardUnknown() {
xxx_messageInfo_UserRestriction.DiscardUnknown(m)
}
var xxx_messageInfo_UserRestriction proto.InternalMessageInfo
func init() {
proto.RegisterType((*Action)(nil), "github.com.openshift.api.authorization.v1.Action")
proto.RegisterType((*ClusterRole)(nil), "github.com.openshift.api.authorization.v1.ClusterRole")
proto.RegisterType((*ClusterRoleBinding)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleBinding")
proto.RegisterType((*ClusterRoleBindingList)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleBindingList")
proto.RegisterType((*ClusterRoleList)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleList")
proto.RegisterType((*GroupRestriction)(nil), "github.com.openshift.api.authorization.v1.GroupRestriction")
proto.RegisterType((*IsPersonalSubjectAccessReview)(nil), "github.com.openshift.api.authorization.v1.IsPersonalSubjectAccessReview")
proto.RegisterType((*LocalResourceAccessReview)(nil), "github.com.openshift.api.authorization.v1.LocalResourceAccessReview")
proto.RegisterType((*LocalSubjectAccessReview)(nil), "github.com.openshift.api.authorization.v1.LocalSubjectAccessReview")
proto.RegisterType((*NamedClusterRole)(nil), "github.com.openshift.api.authorization.v1.NamedClusterRole")
proto.RegisterType((*NamedClusterRoleBinding)(nil), "github.com.openshift.api.authorization.v1.NamedClusterRoleBinding")
proto.RegisterType((*NamedRole)(nil), "github.com.openshift.api.authorization.v1.NamedRole")
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/authorization/v1/legacy.go | vendor/github.com/openshift/api/authorization/v1/legacy.go | package v1
import (
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, rbacv1.AddToScheme)
DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
)
func addLegacyKnownTypes(scheme *runtime.Scheme) error {
types := []runtime.Object{
&Role{},
&RoleBinding{},
&RoleBindingList{},
&RoleList{},
&SelfSubjectRulesReview{},
&SubjectRulesReview{},
&ResourceAccessReview{},
&SubjectAccessReview{},
&LocalResourceAccessReview{},
&LocalSubjectAccessReview{},
&ResourceAccessReviewResponse{},
&SubjectAccessReviewResponse{},
&IsPersonalSubjectAccessReview{},
&ClusterRole{},
&ClusterRoleBinding{},
&ClusterRoleBindingList{},
&ClusterRoleList{},
&RoleBindingRestriction{},
&RoleBindingRestrictionList{},
}
scheme.AddKnownTypes(legacyGroupVersion, types...)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/authorization/v1/codec.go | vendor/github.com/openshift/api/authorization/v1/codec.go | package v1
import (
"github.com/openshift/api/pkg/serialization"
runtime "k8s.io/apimachinery/pkg/runtime"
)
var _ runtime.NestedObjectDecoder = &PolicyRule{}
var _ runtime.NestedObjectEncoder = &PolicyRule{}
func (c *PolicyRule) DecodeNestedObjects(d runtime.Decoder) error {
// decoding failures result in a runtime.Unknown object being created in Object and passed
// to conversion
serialization.DecodeNestedRawExtensionOrUnknown(d, &c.AttributeRestrictions)
return nil
}
func (c *PolicyRule) EncodeNestedObjects(e runtime.Encoder) error {
return serialization.EncodeNestedRawExtension(e, &c.AttributeRestrictions)
}
var _ runtime.NestedObjectDecoder = &SelfSubjectRulesReview{}
var _ runtime.NestedObjectEncoder = &SelfSubjectRulesReview{}
func (c *SelfSubjectRulesReview) DecodeNestedObjects(d runtime.Decoder) error {
// decoding failures result in a runtime.Unknown object being created in Object and passed
// to conversion
for i := range c.Status.Rules {
c.Status.Rules[i].DecodeNestedObjects(d)
}
return nil
}
func (c *SelfSubjectRulesReview) EncodeNestedObjects(e runtime.Encoder) error {
for i := range c.Status.Rules {
if err := c.Status.Rules[i].EncodeNestedObjects(e); err != nil {
return err
}
}
return nil
}
var _ runtime.NestedObjectDecoder = &SubjectRulesReview{}
var _ runtime.NestedObjectEncoder = &SubjectRulesReview{}
func (c *SubjectRulesReview) DecodeNestedObjects(d runtime.Decoder) error {
// decoding failures result in a runtime.Unknown object being created in Object and passed
// to conversion
for i := range c.Status.Rules {
c.Status.Rules[i].DecodeNestedObjects(d)
}
return nil
}
func (c *SubjectRulesReview) EncodeNestedObjects(e runtime.Encoder) error {
for i := range c.Status.Rules {
if err := c.Status.Rules[i].EncodeNestedObjects(e); err != nil {
return err
}
}
return nil
}
var _ runtime.NestedObjectDecoder = &ClusterRole{}
var _ runtime.NestedObjectEncoder = &ClusterRole{}
func (c *ClusterRole) DecodeNestedObjects(d runtime.Decoder) error {
// decoding failures result in a runtime.Unknown object being created in Object and passed
// to conversion
for i := range c.Rules {
c.Rules[i].DecodeNestedObjects(d)
}
return nil
}
func (c *ClusterRole) EncodeNestedObjects(e runtime.Encoder) error {
for i := range c.Rules {
if err := c.Rules[i].EncodeNestedObjects(e); err != nil {
return err
}
}
return nil
}
var _ runtime.NestedObjectDecoder = &Role{}
var _ runtime.NestedObjectEncoder = &Role{}
func (c *Role) DecodeNestedObjects(d runtime.Decoder) error {
// decoding failures result in a runtime.Unknown object being created in Object and passed
// to conversion
for i := range c.Rules {
c.Rules[i].DecodeNestedObjects(d)
}
return nil
}
func (c *Role) EncodeNestedObjects(e runtime.Encoder) error {
for i := range c.Rules {
if err := c.Rules[i].EncodeNestedObjects(e); err != nil {
return err
}
}
return nil
}
var _ runtime.NestedObjectDecoder = &ClusterRoleList{}
var _ runtime.NestedObjectEncoder = &ClusterRoleList{}
func (c *ClusterRoleList) DecodeNestedObjects(d runtime.Decoder) error {
// decoding failures result in a runtime.Unknown object being created in Object and passed
// to conversion
for i := range c.Items {
c.Items[i].DecodeNestedObjects(d)
}
return nil
}
func (c *ClusterRoleList) EncodeNestedObjects(e runtime.Encoder) error {
for i := range c.Items {
if err := c.Items[i].EncodeNestedObjects(e); err != nil {
return err
}
}
return nil
}
var _ runtime.NestedObjectDecoder = &RoleList{}
var _ runtime.NestedObjectEncoder = &RoleList{}
func (c *RoleList) DecodeNestedObjects(d runtime.Decoder) error {
// decoding failures result in a runtime.Unknown object being created in Object and passed
// to conversion
for i := range c.Items {
c.Items[i].DecodeNestedObjects(d)
}
return nil
}
func (c *RoleList) EncodeNestedObjects(e runtime.Encoder) error {
for i := range c.Items {
if err := c.Items[i].EncodeNestedObjects(e); err != nil {
return err
}
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go | vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go | //go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
corev1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Project) DeepCopyInto(out *Project) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project.
func (in *Project) DeepCopy() *Project {
if in == nil {
return nil
}
out := new(Project)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Project) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectList) DeepCopyInto(out *ProjectList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Project, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList.
func (in *ProjectList) DeepCopy() *ProjectList {
if in == nil {
return nil
}
out := new(ProjectList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ProjectList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectRequest) DeepCopyInto(out *ProjectRequest) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectRequest.
func (in *ProjectRequest) DeepCopy() *ProjectRequest {
if in == nil {
return nil
}
out := new(ProjectRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ProjectRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) {
*out = *in
if in.Finalizers != nil {
in, out := &in.Finalizers, &out.Finalizers
*out = make([]corev1.FinalizerName, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec.
func (in *ProjectSpec) DeepCopy() *ProjectSpec {
if in == nil {
return nil
}
out := new(ProjectSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]corev1.NamespaceCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus.
func (in *ProjectStatus) DeepCopy() *ProjectStatus {
if in == nil {
return nil
}
out := new(ProjectStatus)
in.DeepCopyInto(out)
return out
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/project/v1/types.go | vendor/github.com/openshift/api/project/v1/types.go | package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ProjectList is a list of Project objects.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ProjectList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of projects
Items []Project `json:"items" protobuf:"bytes,2,rep,name=items"`
}
const (
// These are internal finalizer values to Origin
FinalizerOrigin corev1.FinalizerName = "openshift.io/origin"
// ProjectNodeSelector is an annotation that holds the node selector;
// the node selector annotation determines which nodes will have pods from this project scheduled to them
ProjectNodeSelector = "openshift.io/node-selector"
// ProjectRequesterAnnotation is the username that requested a given project. Its not guaranteed to be present,
// but it is set by the default project template.
ProjectRequesterAnnotation = "openshift.io/requester"
)
// ProjectSpec describes the attributes on a Project
type ProjectSpec struct {
// Finalizers is an opaque list of values that must be empty to permanently remove object from storage
Finalizers []corev1.FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=k8s.io/api/core/v1.FinalizerName"`
}
// ProjectStatus is information about the current status of a Project
type ProjectStatus struct {
// Phase is the current lifecycle phase of the project
// +optional
Phase corev1.NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=k8s.io/api/core/v1.NamespacePhase"`
// Represents the latest available observations of the project current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []corev1.NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members,
// a quota on the resources that the project may consume, and the security controls on the resources in
// the project. Within a project, members may have different roles - project administrators can set
// membership, editors can create and manage the resources, and viewers can see but not access running
// containers. In a normal cluster project administrators are not able to alter their quotas - that is
// restricted to cluster administrators.
//
// Listing or watching projects will return only projects the user has the reader role on.
//
// An OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed
// as editable to end users while namespaces are not. Direct creation of a project is typically restricted
// to administrators, while end users should use the requestproject resource.
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type Project struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of the Namespace.
Spec ProjectSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status describes the current status of a Namespace
// +optional
Status ProjectStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +genclient
// +genclient:nonNamespaced
// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch
// +genclient:method=Create,verb=create,result=Project
// ProjectRequest is the set of options necessary to fully qualify a project request
//
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
type ProjectRequest struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// DisplayName is the display name to apply to a project
DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"`
// Description is the description to apply to a project
Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"`
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/project/v1/register.go | vendor/github.com/openshift/api/project/v1/register.go | package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
GroupName = "project.openshift.io"
GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme)
// Install is a function which adds this version to a scheme
Install = schemeBuilder.AddToScheme
// SchemeGroupVersion generated code relies on this name
// Deprecated
SchemeGroupVersion = GroupVersion
// AddToScheme exists solely to keep the old generators creating valid code
// DEPRECATED
AddToScheme = schemeBuilder.AddToScheme
)
// Resource generated code relies on this being here, but it logically belongs to the group
// DEPRECATED
func Resource(resource string) schema.GroupResource {
return schema.GroupResource{Group: GroupName, Resource: resource}
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(GroupVersion,
&Project{},
&ProjectList{},
&ProjectRequest{},
)
metav1.AddToGroupVersion(scheme, GroupVersion)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go | vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go | package v1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE
var map_Project = map[string]string{
"": "Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, a quota on the resources that the project may consume, and the security controls on the resources in the project. Within a project, members may have different roles - project administrators can set membership, editors can create and manage the resources, and viewers can see but not access running containers. In a normal cluster project administrators are not able to alter their quotas - that is restricted to cluster administrators.\n\nListing or watching projects will return only projects the user has the reader role on.\n\nAn OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed as editable to end users while namespaces are not. Direct creation of a project is typically restricted to administrators, while end users should use the requestproject resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"spec": "Spec defines the behavior of the Namespace.",
"status": "Status describes the current status of a Namespace",
}
func (Project) SwaggerDoc() map[string]string {
return map_Project
}
var map_ProjectList = map[string]string{
"": "ProjectList is a list of Project objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "Items is the list of projects",
}
func (ProjectList) SwaggerDoc() map[string]string {
return map_ProjectList
}
var map_ProjectRequest = map[string]string{
"": "ProjectRequest is the set of options necessary to fully qualify a project request\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"displayName": "DisplayName is the display name to apply to a project",
"description": "Description is the description to apply to a project",
}
func (ProjectRequest) SwaggerDoc() map[string]string {
return map_ProjectRequest
}
var map_ProjectSpec = map[string]string{
"": "ProjectSpec describes the attributes on a Project",
"finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage",
}
func (ProjectSpec) SwaggerDoc() map[string]string {
return map_ProjectSpec
}
var map_ProjectStatus = map[string]string{
"": "ProjectStatus is information about the current status of a Project",
"phase": "Phase is the current lifecycle phase of the project",
"conditions": "Represents the latest available observations of the project current state.",
}
func (ProjectStatus) SwaggerDoc() map[string]string {
return map_ProjectStatus
}
// AUTO-GENERATED FUNCTIONS END HERE
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/project/v1/doc.go | vendor/github.com/openshift/api/project/v1/doc.go | // +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/origin/pkg/project/apis/project
// +k8s:defaulter-gen=TypeMeta
// +k8s:openapi-gen=true
// +groupName=project.openshift.io
// Package v1 is the v1 version of the API.
package v1
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/project/v1/generated.pb.go | vendor/github.com/openshift/api/project/v1/generated.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/openshift/api/project/v1/generated.proto
package v1
import (
fmt "fmt"
io "io"
proto "github.com/gogo/protobuf/proto"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v11 "k8s.io/api/core/v1"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *Project) Reset() { *m = Project{} }
func (*Project) ProtoMessage() {}
func (*Project) Descriptor() ([]byte, []int) {
return fileDescriptor_fbf46eaac05029bf, []int{0}
}
func (m *Project) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Project) XXX_Merge(src proto.Message) {
xxx_messageInfo_Project.Merge(m, src)
}
func (m *Project) XXX_Size() int {
return m.Size()
}
func (m *Project) XXX_DiscardUnknown() {
xxx_messageInfo_Project.DiscardUnknown(m)
}
var xxx_messageInfo_Project proto.InternalMessageInfo
func (m *ProjectList) Reset() { *m = ProjectList{} }
func (*ProjectList) ProtoMessage() {}
func (*ProjectList) Descriptor() ([]byte, []int) {
return fileDescriptor_fbf46eaac05029bf, []int{1}
}
func (m *ProjectList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProjectList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ProjectList) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProjectList.Merge(m, src)
}
func (m *ProjectList) XXX_Size() int {
return m.Size()
}
func (m *ProjectList) XXX_DiscardUnknown() {
xxx_messageInfo_ProjectList.DiscardUnknown(m)
}
var xxx_messageInfo_ProjectList proto.InternalMessageInfo
func (m *ProjectRequest) Reset() { *m = ProjectRequest{} }
func (*ProjectRequest) ProtoMessage() {}
func (*ProjectRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_fbf46eaac05029bf, []int{2}
}
func (m *ProjectRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ProjectRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProjectRequest.Merge(m, src)
}
func (m *ProjectRequest) XXX_Size() int {
return m.Size()
}
func (m *ProjectRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ProjectRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ProjectRequest proto.InternalMessageInfo
func (m *ProjectSpec) Reset() { *m = ProjectSpec{} }
func (*ProjectSpec) ProtoMessage() {}
func (*ProjectSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_fbf46eaac05029bf, []int{3}
}
func (m *ProjectSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProjectSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ProjectSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProjectSpec.Merge(m, src)
}
func (m *ProjectSpec) XXX_Size() int {
return m.Size()
}
func (m *ProjectSpec) XXX_DiscardUnknown() {
xxx_messageInfo_ProjectSpec.DiscardUnknown(m)
}
var xxx_messageInfo_ProjectSpec proto.InternalMessageInfo
func (m *ProjectStatus) Reset() { *m = ProjectStatus{} }
func (*ProjectStatus) ProtoMessage() {}
func (*ProjectStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_fbf46eaac05029bf, []int{4}
}
func (m *ProjectStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProjectStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ProjectStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProjectStatus.Merge(m, src)
}
func (m *ProjectStatus) XXX_Size() int {
return m.Size()
}
func (m *ProjectStatus) XXX_DiscardUnknown() {
xxx_messageInfo_ProjectStatus.DiscardUnknown(m)
}
var xxx_messageInfo_ProjectStatus proto.InternalMessageInfo
func init() {
proto.RegisterType((*Project)(nil), "github.com.openshift.api.project.v1.Project")
proto.RegisterType((*ProjectList)(nil), "github.com.openshift.api.project.v1.ProjectList")
proto.RegisterType((*ProjectRequest)(nil), "github.com.openshift.api.project.v1.ProjectRequest")
proto.RegisterType((*ProjectSpec)(nil), "github.com.openshift.api.project.v1.ProjectSpec")
proto.RegisterType((*ProjectStatus)(nil), "github.com.openshift.api.project.v1.ProjectStatus")
}
func init() {
proto.RegisterFile("github.com/openshift/api/project/v1/generated.proto", fileDescriptor_fbf46eaac05029bf)
}
var fileDescriptor_fbf46eaac05029bf = []byte{
// 573 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xd3, 0x30,
0x18, 0xc7, 0x9b, 0x6d, 0x1d, 0xab, 0xcb, 0x26, 0x14, 0x2e, 0x55, 0x0f, 0x69, 0xc9, 0x24, 0xd4,
0x03, 0x38, 0xb4, 0xbc, 0x88, 0x73, 0x40, 0x88, 0x49, 0xbc, 0x0c, 0x73, 0xab, 0x38, 0xe0, 0xa6,
0x6e, 0x6a, 0xba, 0xc4, 0x26, 0x76, 0x2b, 0x8d, 0x13, 0x1f, 0x81, 0x3b, 0x9f, 0x83, 0x2b, 0xe7,
0x1e, 0x77, 0xdc, 0xa9, 0x5a, 0xc3, 0xb7, 0xd8, 0x09, 0xd9, 0x71, 0x93, 0xc0, 0x8a, 0xd4, 0x5d,
0xb8, 0xd5, 0x4f, 0xfe, 0xbf, 0x9f, 0xed, 0xe7, 0x49, 0x03, 0x1e, 0x86, 0x54, 0x8e, 0xa7, 0x03,
0x18, 0xb0, 0xc8, 0x63, 0x9c, 0xc4, 0x62, 0x4c, 0x47, 0xd2, 0xc3, 0x9c, 0x7a, 0x3c, 0x61, 0x9f,
0x48, 0x20, 0xbd, 0x59, 0xd7, 0x0b, 0x49, 0x4c, 0x12, 0x2c, 0xc9, 0x10, 0xf2, 0x84, 0x49, 0x66,
0x1f, 0x16, 0x10, 0xcc, 0x21, 0x88, 0x39, 0x85, 0x06, 0x82, 0xb3, 0x6e, 0xf3, 0x7e, 0xc9, 0x1c,
0xb2, 0x90, 0x79, 0x9a, 0x1d, 0x4c, 0x47, 0x7a, 0xa5, 0x17, 0xfa, 0x57, 0xe6, 0x6c, 0xba, 0x93,
0xa7, 0x02, 0x52, 0xa6, 0xb7, 0x0e, 0x58, 0x42, 0xd6, 0xec, 0xdb, 0x7c, 0x54, 0x64, 0x22, 0x1c,
0x8c, 0x69, 0x4c, 0x92, 0x53, 0x8f, 0x4f, 0x42, 0x55, 0x10, 0x5e, 0x44, 0x24, 0x5e, 0x47, 0x3d,
0xf9, 0x17, 0x95, 0x4c, 0x63, 0x49, 0x23, 0xe2, 0x89, 0x60, 0x4c, 0x22, 0xfc, 0x37, 0xe7, 0x7e,
0xdf, 0x02, 0x37, 0x8e, 0xb3, 0xfb, 0xd8, 0x1f, 0xc1, 0x9e, 0xd2, 0x0f, 0xb1, 0xc4, 0x0d, 0xab,
0x6d, 0x75, 0xea, 0xbd, 0x07, 0x30, 0xd3, 0xc2, 0xb2, 0x16, 0xf2, 0x49, 0xa8, 0x0a, 0x02, 0xaa,
0x34, 0x9c, 0x75, 0xe1, 0xdb, 0x81, 0xe2, 0x5f, 0x13, 0x89, 0x7d, 0x7b, 0xbe, 0x68, 0x55, 0xd2,
0x45, 0x0b, 0x14, 0x35, 0x94, 0x5b, 0x6d, 0x04, 0x76, 0x04, 0x27, 0x41, 0x63, 0xcb, 0xd8, 0x37,
0x68, 0x31, 0x34, 0xa7, 0x7b, 0xcf, 0x49, 0xe0, 0xdf, 0x34, 0xf6, 0x1d, 0xb5, 0x42, 0xda, 0x65,
0xf7, 0xc1, 0xae, 0x90, 0x58, 0x4e, 0x45, 0x63, 0x5b, 0x5b, 0x7b, 0xd7, 0xb2, 0x6a, 0xd2, 0x3f,
0x30, 0xde, 0xdd, 0x6c, 0x8d, 0x8c, 0xd1, 0xfd, 0x69, 0x81, 0xba, 0x49, 0xbe, 0xa2, 0x42, 0xda,
0x1f, 0xae, 0x74, 0x08, 0x6e, 0xd6, 0x21, 0x45, 0xeb, 0xfe, 0xdc, 0x32, 0x3b, 0xed, 0xad, 0x2a,
0xa5, 0xee, 0xbc, 0x03, 0x55, 0x2a, 0x49, 0x24, 0x1a, 0x5b, 0xed, 0xed, 0x4e, 0xbd, 0x77, 0xef,
0x3a, 0x17, 0xf1, 0xf7, 0x8d, 0xb8, 0x7a, 0xa4, 0x14, 0x28, 0x33, 0xb9, 0x17, 0x16, 0x38, 0x30,
0x09, 0x44, 0x3e, 0x4f, 0x89, 0xf8, 0x1f, 0x53, 0x7e, 0x0c, 0xea, 0x43, 0x2a, 0xf8, 0x09, 0x3e,
0x7d, 0x83, 0x23, 0xa2, 0x87, 0x5d, 0xf3, 0x6f, 0x1b, 0xa4, 0xfe, 0xbc, 0x78, 0x84, 0xca, 0x39,
0x8d, 0x11, 0x11, 0x24, 0x94, 0x4b, 0xca, 0x62, 0x3d, 0xcd, 0x32, 0x56, 0x3c, 0x42, 0xe5, 0x9c,
0x8b, 0xf3, 0x11, 0xa9, 0x97, 0xc2, 0x46, 0x00, 0x8c, 0x68, 0x8c, 0x4f, 0xe8, 0x17, 0x92, 0x88,
0x86, 0xd5, 0xde, 0xee, 0xd4, 0xfc, 0x9e, 0x3a, 0xea, 0x8b, 0xbc, 0x7a, 0xb9, 0x68, 0xb5, 0xaf,
0xfe, 0x11, 0x61, 0x1e, 0xd0, 0x47, 0x2b, 0x59, 0xdc, 0x1f, 0x16, 0xd8, 0xff, 0xe3, 0x85, 0xb1,
0x5f, 0x82, 0x2a, 0x1f, 0x63, 0x41, 0x74, 0x07, 0x6b, 0x7e, 0x6f, 0xd5, 0xfc, 0x63, 0x55, 0xbc,
0x5c, 0xb4, 0xee, 0xac, 0xf1, 0x2b, 0xad, 0xe0, 0x38, 0x20, 0x3a, 0x84, 0x32, 0x81, 0xdd, 0x07,
0x20, 0x60, 0xf1, 0x90, 0xaa, 0xbb, 0xac, 0x26, 0x7f, 0xb7, 0x34, 0x10, 0xa8, 0x70, 0x58, 0xc6,
0x9f, 0xad, 0xe2, 0xc5, 0x18, 0xf2, 0x92, 0x40, 0x25, 0x9b, 0x7f, 0x34, 0x5f, 0x3a, 0x95, 0xb3,
0xa5, 0x53, 0x39, 0x5f, 0x3a, 0x95, 0xaf, 0xa9, 0x63, 0xcd, 0x53, 0xc7, 0x3a, 0x4b, 0x1d, 0xeb,
0x3c, 0x75, 0xac, 0x8b, 0xd4, 0xb1, 0xbe, 0xfd, 0x72, 0x2a, 0xfd, 0xc3, 0x0d, 0xbe, 0x8e, 0xbf,
0x03, 0x00, 0x00, 0xff, 0xff, 0xb3, 0x9b, 0x1f, 0xba, 0x43, 0x05, 0x00, 0x00,
}
func (m *Project) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Project) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Project) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ProjectList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ProjectList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ProjectList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ProjectRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ProjectRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ProjectRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.Description)
copy(dAtA[i:], m.Description)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description)))
i--
dAtA[i] = 0x1a
i -= len(m.DisplayName)
copy(dAtA[i:], m.DisplayName)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName)))
i--
dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ProjectSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ProjectSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ProjectSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Finalizers) > 0 {
for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Finalizers[iNdEx])
copy(dAtA[i:], m.Finalizers[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx])))
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ProjectStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ProjectStatus) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ProjectStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Conditions) > 0 {
for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
i -= len(m.Phase)
copy(dAtA[i:], m.Phase)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Project) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Status.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *ProjectList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *ProjectRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = len(m.DisplayName)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Description)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *ProjectSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Finalizers) > 0 {
for _, s := range m.Finalizers {
l = len(s)
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *ProjectStatus) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Phase)
n += 1 + l + sovGenerated(uint64(l))
if len(m.Conditions) > 0 {
for _, e := range m.Conditions {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Project) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Project{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ProjectSpec", "ProjectSpec", 1), `&`, ``, 1) + `,`,
`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ProjectStatus", "ProjectStatus", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *ProjectList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]Project{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Project", "Project", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&ProjectList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *ProjectRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ProjectRequest{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`,
`Description:` + fmt.Sprintf("%v", this.Description) + `,`,
`}`,
}, "")
return s
}
func (this *ProjectSpec) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ProjectSpec{`,
`Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`,
`}`,
}, "")
return s
}
func (this *ProjectStatus) String() string {
if this == nil {
return "nil"
}
repeatedStringForConditions := "[]NamespaceCondition{"
for _, f := range this.Conditions {
repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
}
repeatedStringForConditions += "}"
s := strings.Join([]string{`&ProjectStatus{`,
`Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
`Conditions:` + repeatedStringForConditions + `,`,
`}`,
}, "")
return s
}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Project) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Project: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ProjectList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ProjectList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ProjectList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, Project{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ProjectRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ProjectRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ProjectRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DisplayName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Description = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ProjectSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ProjectSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ProjectSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Finalizers = append(m.Finalizers, k8s_io_api_core_v1.FinalizerName(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ProjectStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ProjectStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ProjectStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Phase = k8s_io_api_core_v1.NamespacePhase(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Conditions = append(m.Conditions, v11.NamespaceCondition{})
if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenerated
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/project/v1/legacy.go | vendor/github.com/openshift/api/project/v1/legacy.go | package v1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme)
DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
)
func addLegacyKnownTypes(scheme *runtime.Scheme) error {
types := []runtime.Object{
&Project{},
&ProjectList{},
&ProjectRequest{},
}
scheme.AddKnownTypes(legacyGroupVersion, types...)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.