repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go
vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated // Code generated by deepcopy-gen. DO NOT EDIT. package v1 import ( corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BinaryBuildRequestOptions) DeepCopyInto(out *BinaryBuildRequestOptions) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BinaryBuildRequestOptions. func (in *BinaryBuildRequestOptions) DeepCopy() *BinaryBuildRequestOptions { if in == nil { return nil } out := new(BinaryBuildRequestOptions) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *BinaryBuildRequestOptions) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BinaryBuildSource) DeepCopyInto(out *BinaryBuildSource) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BinaryBuildSource. func (in *BinaryBuildSource) DeepCopy() *BinaryBuildSource { if in == nil { return nil } out := new(BinaryBuildSource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BitbucketWebHookCause) DeepCopyInto(out *BitbucketWebHookCause) { *out = *in in.CommonWebHookCause.DeepCopyInto(&out.CommonWebHookCause) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitbucketWebHookCause. func (in *BitbucketWebHookCause) DeepCopy() *BitbucketWebHookCause { if in == nil { return nil } out := new(BitbucketWebHookCause) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Build) DeepCopyInto(out *Build) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build. func (in *Build) DeepCopy() *Build { if in == nil { return nil } out := new(Build) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *Build) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildCondition) DeepCopyInto(out *BuildCondition) { *out = *in in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildCondition. func (in *BuildCondition) DeepCopy() *BuildCondition { if in == nil { return nil } out := new(BuildCondition) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildConfig) DeepCopyInto(out *BuildConfig) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfig. func (in *BuildConfig) DeepCopy() *BuildConfig { if in == nil { return nil } out := new(BuildConfig) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *BuildConfig) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildConfigList) DeepCopyInto(out *BuildConfigList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]BuildConfig, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigList. func (in *BuildConfigList) DeepCopy() *BuildConfigList { if in == nil { return nil } out := new(BuildConfigList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *BuildConfigList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildConfigSpec) DeepCopyInto(out *BuildConfigSpec) { *out = *in if in.Triggers != nil { in, out := &in.Triggers, &out.Triggers *out = make([]BuildTriggerPolicy, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } in.CommonSpec.DeepCopyInto(&out.CommonSpec) if in.SuccessfulBuildsHistoryLimit != nil { in, out := &in.SuccessfulBuildsHistoryLimit, &out.SuccessfulBuildsHistoryLimit *out = new(int32) **out = **in } if in.FailedBuildsHistoryLimit != nil { in, out := &in.FailedBuildsHistoryLimit, &out.FailedBuildsHistoryLimit *out = new(int32) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigSpec. func (in *BuildConfigSpec) DeepCopy() *BuildConfigSpec { if in == nil { return nil } out := new(BuildConfigSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildConfigStatus) DeepCopyInto(out *BuildConfigStatus) { *out = *in if in.ImageChangeTriggers != nil { in, out := &in.ImageChangeTriggers, &out.ImageChangeTriggers *out = make([]ImageChangeTriggerStatus, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigStatus. func (in *BuildConfigStatus) DeepCopy() *BuildConfigStatus { if in == nil { return nil } out := new(BuildConfigStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildList) DeepCopyInto(out *BuildList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Build, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList. func (in *BuildList) DeepCopy() *BuildList { if in == nil { return nil } out := new(BuildList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *BuildList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildLog) DeepCopyInto(out *BuildLog) { *out = *in out.TypeMeta = in.TypeMeta return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildLog. func (in *BuildLog) DeepCopy() *BuildLog { if in == nil { return nil } out := new(BuildLog) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *BuildLog) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildLogOptions) DeepCopyInto(out *BuildLogOptions) { *out = *in out.TypeMeta = in.TypeMeta if in.SinceSeconds != nil { in, out := &in.SinceSeconds, &out.SinceSeconds *out = new(int64) **out = **in } if in.SinceTime != nil { in, out := &in.SinceTime, &out.SinceTime *out = (*in).DeepCopy() } if in.TailLines != nil { in, out := &in.TailLines, &out.TailLines *out = new(int64) **out = **in } if in.LimitBytes != nil { in, out := &in.LimitBytes, &out.LimitBytes *out = new(int64) **out = **in } if in.Version != nil { in, out := &in.Version, &out.Version *out = new(int64) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildLogOptions. func (in *BuildLogOptions) DeepCopy() *BuildLogOptions { if in == nil { return nil } out := new(BuildLogOptions) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *BuildLogOptions) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildOutput) DeepCopyInto(out *BuildOutput) { *out = *in if in.To != nil { in, out := &in.To, &out.To *out = new(corev1.ObjectReference) **out = **in } if in.PushSecret != nil { in, out := &in.PushSecret, &out.PushSecret *out = new(corev1.LocalObjectReference) **out = **in } if in.ImageLabels != nil { in, out := &in.ImageLabels, &out.ImageLabels *out = make([]ImageLabel, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOutput. func (in *BuildOutput) DeepCopy() *BuildOutput { if in == nil { return nil } out := new(BuildOutput) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildPostCommitSpec) DeepCopyInto(out *BuildPostCommitSpec) { *out = *in if in.Command != nil { in, out := &in.Command, &out.Command *out = make([]string, len(*in)) copy(*out, *in) } if in.Args != nil { in, out := &in.Args, &out.Args *out = make([]string, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildPostCommitSpec. func (in *BuildPostCommitSpec) DeepCopy() *BuildPostCommitSpec { if in == nil { return nil } out := new(BuildPostCommitSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildRequest) DeepCopyInto(out *BuildRequest) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Revision != nil { in, out := &in.Revision, &out.Revision *out = new(SourceRevision) (*in).DeepCopyInto(*out) } if in.TriggeredByImage != nil { in, out := &in.TriggeredByImage, &out.TriggeredByImage *out = new(corev1.ObjectReference) **out = **in } if in.From != nil { in, out := &in.From, &out.From *out = new(corev1.ObjectReference) **out = **in } if in.Binary != nil { in, out := &in.Binary, &out.Binary *out = new(BinaryBuildSource) **out = **in } if in.LastVersion != nil { in, out := &in.LastVersion, &out.LastVersion *out = new(int64) **out = **in } if in.Env != nil { in, out := &in.Env, &out.Env *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.TriggeredBy != nil { in, out := &in.TriggeredBy, &out.TriggeredBy *out = make([]BuildTriggerCause, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.DockerStrategyOptions != nil { in, out := &in.DockerStrategyOptions, &out.DockerStrategyOptions *out = new(DockerStrategyOptions) (*in).DeepCopyInto(*out) } if in.SourceStrategyOptions != nil { in, out := &in.SourceStrategyOptions, &out.SourceStrategyOptions *out = new(SourceStrategyOptions) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildRequest. func (in *BuildRequest) DeepCopy() *BuildRequest { if in == nil { return nil } out := new(BuildRequest) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *BuildRequest) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildSource) DeepCopyInto(out *BuildSource) { *out = *in if in.Binary != nil { in, out := &in.Binary, &out.Binary *out = new(BinaryBuildSource) **out = **in } if in.Dockerfile != nil { in, out := &in.Dockerfile, &out.Dockerfile *out = new(string) **out = **in } if in.Git != nil { in, out := &in.Git, &out.Git *out = new(GitBuildSource) (*in).DeepCopyInto(*out) } if in.Images != nil { in, out := &in.Images, &out.Images *out = make([]ImageSource, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.SourceSecret != nil { in, out := &in.SourceSecret, &out.SourceSecret *out = new(corev1.LocalObjectReference) **out = **in } if in.Secrets != nil { in, out := &in.Secrets, &out.Secrets *out = make([]SecretBuildSource, len(*in)) copy(*out, *in) } if in.ConfigMaps != nil { in, out := &in.ConfigMaps, &out.ConfigMaps *out = make([]ConfigMapBuildSource, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSource. func (in *BuildSource) DeepCopy() *BuildSource { if in == nil { return nil } out := new(BuildSource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { *out = *in in.CommonSpec.DeepCopyInto(&out.CommonSpec) if in.TriggeredBy != nil { in, out := &in.TriggeredBy, &out.TriggeredBy *out = make([]BuildTriggerCause, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec. func (in *BuildSpec) DeepCopy() *BuildSpec { if in == nil { return nil } out := new(BuildSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildStatus) DeepCopyInto(out *BuildStatus) { *out = *in if in.StartTimestamp != nil { in, out := &in.StartTimestamp, &out.StartTimestamp *out = (*in).DeepCopy() } if in.CompletionTimestamp != nil { in, out := &in.CompletionTimestamp, &out.CompletionTimestamp *out = (*in).DeepCopy() } if in.Config != nil { in, out := &in.Config, &out.Config *out = new(corev1.ObjectReference) **out = **in } in.Output.DeepCopyInto(&out.Output) if in.Stages != nil { in, out := &in.Stages, &out.Stages *out = make([]StageInfo, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]BuildCondition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatus. func (in *BuildStatus) DeepCopy() *BuildStatus { if in == nil { return nil } out := new(BuildStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildStatusOutput) DeepCopyInto(out *BuildStatusOutput) { *out = *in if in.To != nil { in, out := &in.To, &out.To *out = new(BuildStatusOutputTo) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatusOutput. func (in *BuildStatusOutput) DeepCopy() *BuildStatusOutput { if in == nil { return nil } out := new(BuildStatusOutput) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildStatusOutputTo) DeepCopyInto(out *BuildStatusOutputTo) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatusOutputTo. func (in *BuildStatusOutputTo) DeepCopy() *BuildStatusOutputTo { if in == nil { return nil } out := new(BuildStatusOutputTo) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildStrategy) DeepCopyInto(out *BuildStrategy) { *out = *in if in.DockerStrategy != nil { in, out := &in.DockerStrategy, &out.DockerStrategy *out = new(DockerBuildStrategy) (*in).DeepCopyInto(*out) } if in.SourceStrategy != nil { in, out := &in.SourceStrategy, &out.SourceStrategy *out = new(SourceBuildStrategy) (*in).DeepCopyInto(*out) } if in.CustomStrategy != nil { in, out := &in.CustomStrategy, &out.CustomStrategy *out = new(CustomBuildStrategy) (*in).DeepCopyInto(*out) } if in.JenkinsPipelineStrategy != nil { in, out := &in.JenkinsPipelineStrategy, &out.JenkinsPipelineStrategy *out = new(JenkinsPipelineBuildStrategy) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStrategy. func (in *BuildStrategy) DeepCopy() *BuildStrategy { if in == nil { return nil } out := new(BuildStrategy) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildTriggerCause) DeepCopyInto(out *BuildTriggerCause) { *out = *in if in.GenericWebHook != nil { in, out := &in.GenericWebHook, &out.GenericWebHook *out = new(GenericWebHookCause) (*in).DeepCopyInto(*out) } if in.GitHubWebHook != nil { in, out := &in.GitHubWebHook, &out.GitHubWebHook *out = new(GitHubWebHookCause) (*in).DeepCopyInto(*out) } if in.ImageChangeBuild != nil { in, out := &in.ImageChangeBuild, &out.ImageChangeBuild *out = new(ImageChangeCause) (*in).DeepCopyInto(*out) } if in.GitLabWebHook != nil { in, out := &in.GitLabWebHook, &out.GitLabWebHook *out = new(GitLabWebHookCause) (*in).DeepCopyInto(*out) } if in.BitbucketWebHook != nil { in, out := &in.BitbucketWebHook, &out.BitbucketWebHook *out = new(BitbucketWebHookCause) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTriggerCause. func (in *BuildTriggerCause) DeepCopy() *BuildTriggerCause { if in == nil { return nil } out := new(BuildTriggerCause) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildTriggerPolicy) DeepCopyInto(out *BuildTriggerPolicy) { *out = *in if in.GitHubWebHook != nil { in, out := &in.GitHubWebHook, &out.GitHubWebHook *out = new(WebHookTrigger) (*in).DeepCopyInto(*out) } if in.GenericWebHook != nil { in, out := &in.GenericWebHook, &out.GenericWebHook *out = new(WebHookTrigger) (*in).DeepCopyInto(*out) } if in.ImageChange != nil { in, out := &in.ImageChange, &out.ImageChange *out = new(ImageChangeTrigger) (*in).DeepCopyInto(*out) } if in.GitLabWebHook != nil { in, out := &in.GitLabWebHook, &out.GitLabWebHook *out = new(WebHookTrigger) (*in).DeepCopyInto(*out) } if in.BitbucketWebHook != nil { in, out := &in.BitbucketWebHook, &out.BitbucketWebHook *out = new(WebHookTrigger) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTriggerPolicy. func (in *BuildTriggerPolicy) DeepCopy() *BuildTriggerPolicy { if in == nil { return nil } out := new(BuildTriggerPolicy) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildVolume) DeepCopyInto(out *BuildVolume) { *out = *in in.Source.DeepCopyInto(&out.Source) if in.Mounts != nil { in, out := &in.Mounts, &out.Mounts *out = make([]BuildVolumeMount, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolume. func (in *BuildVolume) DeepCopy() *BuildVolume { if in == nil { return nil } out := new(BuildVolume) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildVolumeMount) DeepCopyInto(out *BuildVolumeMount) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolumeMount. func (in *BuildVolumeMount) DeepCopy() *BuildVolumeMount { if in == nil { return nil } out := new(BuildVolumeMount) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildVolumeSource) DeepCopyInto(out *BuildVolumeSource) { *out = *in if in.Secret != nil { in, out := &in.Secret, &out.Secret *out = new(corev1.SecretVolumeSource) (*in).DeepCopyInto(*out) } if in.ConfigMap != nil { in, out := &in.ConfigMap, &out.ConfigMap *out = new(corev1.ConfigMapVolumeSource) (*in).DeepCopyInto(*out) } if in.CSI != nil { in, out := &in.CSI, &out.CSI *out = new(corev1.CSIVolumeSource) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolumeSource. func (in *BuildVolumeSource) DeepCopy() *BuildVolumeSource { if in == nil { return nil } out := new(BuildVolumeSource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CommonSpec) DeepCopyInto(out *CommonSpec) { *out = *in in.Source.DeepCopyInto(&out.Source) if in.Revision != nil { in, out := &in.Revision, &out.Revision *out = new(SourceRevision) (*in).DeepCopyInto(*out) } in.Strategy.DeepCopyInto(&out.Strategy) in.Output.DeepCopyInto(&out.Output) in.Resources.DeepCopyInto(&out.Resources) in.PostCommit.DeepCopyInto(&out.PostCommit) if in.CompletionDeadlineSeconds != nil { in, out := &in.CompletionDeadlineSeconds, &out.CompletionDeadlineSeconds *out = new(int64) **out = **in } if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector *out = make(OptionalNodeSelector, len(*in)) for key, val := range *in { (*out)[key] = val } } if in.MountTrustedCA != nil { in, out := &in.MountTrustedCA, &out.MountTrustedCA *out = new(bool) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonSpec. func (in *CommonSpec) DeepCopy() *CommonSpec { if in == nil { return nil } out := new(CommonSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CommonWebHookCause) DeepCopyInto(out *CommonWebHookCause) { *out = *in if in.Revision != nil { in, out := &in.Revision, &out.Revision *out = new(SourceRevision) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonWebHookCause. func (in *CommonWebHookCause) DeepCopy() *CommonWebHookCause { if in == nil { return nil } out := new(CommonWebHookCause) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConfigMapBuildSource) DeepCopyInto(out *ConfigMapBuildSource) { *out = *in out.ConfigMap = in.ConfigMap return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapBuildSource. func (in *ConfigMapBuildSource) DeepCopy() *ConfigMapBuildSource { if in == nil { return nil } out := new(ConfigMapBuildSource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomBuildStrategy) DeepCopyInto(out *CustomBuildStrategy) { *out = *in out.From = in.From if in.PullSecret != nil { in, out := &in.PullSecret, &out.PullSecret *out = new(corev1.LocalObjectReference) **out = **in } if in.Env != nil { in, out := &in.Env, &out.Env *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Secrets != nil { in, out := &in.Secrets, &out.Secrets *out = make([]SecretSpec, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomBuildStrategy. func (in *CustomBuildStrategy) DeepCopy() *CustomBuildStrategy { if in == nil { return nil } out := new(CustomBuildStrategy) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DockerBuildStrategy) DeepCopyInto(out *DockerBuildStrategy) { *out = *in if in.From != nil { in, out := &in.From, &out.From *out = new(corev1.ObjectReference) **out = **in } if in.PullSecret != nil { in, out := &in.PullSecret, &out.PullSecret *out = new(corev1.LocalObjectReference) **out = **in } if in.Env != nil { in, out := &in.Env, &out.Env *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.BuildArgs != nil { in, out := &in.BuildArgs, &out.BuildArgs *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.ImageOptimizationPolicy != nil { in, out := &in.ImageOptimizationPolicy, &out.ImageOptimizationPolicy *out = new(ImageOptimizationPolicy) **out = **in } if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes *out = make([]BuildVolume, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerBuildStrategy. func (in *DockerBuildStrategy) DeepCopy() *DockerBuildStrategy { if in == nil { return nil } out := new(DockerBuildStrategy) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DockerStrategyOptions) DeepCopyInto(out *DockerStrategyOptions) { *out = *in if in.BuildArgs != nil { in, out := &in.BuildArgs, &out.BuildArgs *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.NoCache != nil { in, out := &in.NoCache, &out.NoCache *out = new(bool) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerStrategyOptions. func (in *DockerStrategyOptions) DeepCopy() *DockerStrategyOptions { if in == nil { return nil } out := new(DockerStrategyOptions) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GenericWebHookCause) DeepCopyInto(out *GenericWebHookCause) { *out = *in if in.Revision != nil { in, out := &in.Revision, &out.Revision *out = new(SourceRevision) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericWebHookCause. func (in *GenericWebHookCause) DeepCopy() *GenericWebHookCause { if in == nil { return nil } out := new(GenericWebHookCause) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GenericWebHookEvent) DeepCopyInto(out *GenericWebHookEvent) { *out = *in if in.Git != nil { in, out := &in.Git, &out.Git *out = new(GitInfo) (*in).DeepCopyInto(*out) } if in.Env != nil { in, out := &in.Env, &out.Env *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.DockerStrategyOptions != nil { in, out := &in.DockerStrategyOptions, &out.DockerStrategyOptions *out = new(DockerStrategyOptions) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericWebHookEvent. func (in *GenericWebHookEvent) DeepCopy() *GenericWebHookEvent { if in == nil { return nil } out := new(GenericWebHookEvent) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitBuildSource) DeepCopyInto(out *GitBuildSource) { *out = *in in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitBuildSource. func (in *GitBuildSource) DeepCopy() *GitBuildSource { if in == nil { return nil } out := new(GitBuildSource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitHubWebHookCause) DeepCopyInto(out *GitHubWebHookCause) { *out = *in if in.Revision != nil { in, out := &in.Revision, &out.Revision *out = new(SourceRevision) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubWebHookCause. func (in *GitHubWebHookCause) DeepCopy() *GitHubWebHookCause { if in == nil { return nil } out := new(GitHubWebHookCause) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitInfo) DeepCopyInto(out *GitInfo) { *out = *in in.GitBuildSource.DeepCopyInto(&out.GitBuildSource) out.GitSourceRevision = in.GitSourceRevision if in.Refs != nil { in, out := &in.Refs, &out.Refs *out = make([]GitRefInfo, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitInfo. func (in *GitInfo) DeepCopy() *GitInfo { if in == nil { return nil } out := new(GitInfo) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitLabWebHookCause) DeepCopyInto(out *GitLabWebHookCause) { *out = *in in.CommonWebHookCause.DeepCopyInto(&out.CommonWebHookCause) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabWebHookCause. func (in *GitLabWebHookCause) DeepCopy() *GitLabWebHookCause { if in == nil { return nil } out := new(GitLabWebHookCause) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitRefInfo) DeepCopyInto(out *GitRefInfo) { *out = *in in.GitBuildSource.DeepCopyInto(&out.GitBuildSource) out.GitSourceRevision = in.GitSourceRevision return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRefInfo. func (in *GitRefInfo) DeepCopy() *GitRefInfo { if in == nil { return nil } out := new(GitRefInfo) in.DeepCopyInto(out) return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/build/v1/consts.go
vendor/github.com/openshift/api/build/v1/consts.go
package v1 // annotations const ( // BuildAnnotation is an annotation that identifies a Pod as being for a Build BuildAnnotation = "openshift.io/build.name" // BuildConfigAnnotation is an annotation that identifies the BuildConfig that a Build was created from BuildConfigAnnotation = "openshift.io/build-config.name" // BuildCloneAnnotation is an annotation whose value is the name of the build this build was cloned from BuildCloneAnnotation = "openshift.io/build.clone-of" // BuildNumberAnnotation is an annotation whose value is the sequential number for this Build BuildNumberAnnotation = "openshift.io/build.number" // BuildPodNameAnnotation is an annotation whose value is the name of the pod running this build BuildPodNameAnnotation = "openshift.io/build.pod-name" // BuildJenkinsStatusJSONAnnotation is an annotation holding the Jenkins status information BuildJenkinsStatusJSONAnnotation = "openshift.io/jenkins-status-json" // BuildJenkinsLogURLAnnotation is an annotation holding a link to the raw Jenkins build console log BuildJenkinsLogURLAnnotation = "openshift.io/jenkins-log-url" // BuildJenkinsConsoleLogURLAnnotation is an annotation holding a link to the Jenkins build console log (including Jenkins chrome wrappering) BuildJenkinsConsoleLogURLAnnotation = "openshift.io/jenkins-console-log-url" // BuildJenkinsBlueOceanLogURLAnnotation is an annotation holding a link to the Jenkins build console log via the Jenkins BlueOcean UI Plugin BuildJenkinsBlueOceanLogURLAnnotation = "openshift.io/jenkins-blueocean-log-url" // BuildJenkinsBuildURIAnnotation is an annotation holding a link to the Jenkins build BuildJenkinsBuildURIAnnotation = "openshift.io/jenkins-build-uri" // BuildSourceSecretMatchURIAnnotationPrefix is a prefix for annotations on a Secret which indicate a source URI against which the Secret can be used BuildSourceSecretMatchURIAnnotationPrefix = "build.openshift.io/source-secret-match-uri-" // BuildConfigPausedAnnotation is an annotation that marks a BuildConfig as paused. // New Builds cannot be instantiated from a paused BuildConfig. BuildConfigPausedAnnotation = "openshift.io/build-config.paused" ) // labels const ( // BuildConfigLabel is the key of a Build label whose value is the ID of a BuildConfig // on which the Build is based. NOTE: The value for this label may not contain the entire // BuildConfig name because it will be truncated to maximum label length. BuildConfigLabel = "openshift.io/build-config.name" // BuildLabel is the key of a Pod label whose value is the Name of a Build which is run. // NOTE: The value for this label may not contain the entire Build name because it will be // truncated to maximum label length. BuildLabel = "openshift.io/build.name" // BuildRunPolicyLabel represents the start policy used to start the build. BuildRunPolicyLabel = "openshift.io/build.start-policy" // BuildConfigLabelDeprecated was used as BuildConfigLabel before adding namespaces. // We keep it for backward compatibility. BuildConfigLabelDeprecated = "buildconfig" ) const ( // StatusReasonError is a generic reason for a build error condition. StatusReasonError StatusReason = "Error" // StatusReasonCannotCreateBuildPodSpec is an error condition when the build // strategy cannot create a build pod spec. StatusReasonCannotCreateBuildPodSpec StatusReason = "CannotCreateBuildPodSpec" // StatusReasonCannotCreateBuildPod is an error condition when a build pod // cannot be created. StatusReasonCannotCreateBuildPod StatusReason = "CannotCreateBuildPod" // StatusReasonInvalidOutputReference is an error condition when the build // output is an invalid reference. StatusReasonInvalidOutputReference StatusReason = "InvalidOutputReference" // StatusReasonInvalidImageReference is an error condition when the build // references an invalid image. StatusReasonInvalidImageReference StatusReason = "InvalidImageReference" // StatusReasonCancelBuildFailed is an error condition when cancelling a build // fails. StatusReasonCancelBuildFailed StatusReason = "CancelBuildFailed" // StatusReasonBuildPodDeleted is an error condition when the build pod is // deleted before build completion. StatusReasonBuildPodDeleted StatusReason = "BuildPodDeleted" // StatusReasonExceededRetryTimeout is an error condition when the build has // not completed and retrying the build times out. StatusReasonExceededRetryTimeout StatusReason = "ExceededRetryTimeout" // StatusReasonMissingPushSecret indicates that the build is missing required // secret for pushing the output image. // The build will stay in the pending state until the secret is created, or the build times out. StatusReasonMissingPushSecret StatusReason = "MissingPushSecret" // StatusReasonPostCommitHookFailed indicates the post-commit hook failed. StatusReasonPostCommitHookFailed StatusReason = "PostCommitHookFailed" // StatusReasonPushImageToRegistryFailed indicates that an image failed to be // pushed to the registry. StatusReasonPushImageToRegistryFailed StatusReason = "PushImageToRegistryFailed" // StatusReasonPullBuilderImageFailed indicates that we failed to pull the // builder image. StatusReasonPullBuilderImageFailed StatusReason = "PullBuilderImageFailed" // StatusReasonFetchSourceFailed indicates that fetching the source of the // build has failed. StatusReasonFetchSourceFailed StatusReason = "FetchSourceFailed" // StatusReasonFetchImageContentFailed indicates that the fetching of an image and extracting // its contents for inclusion in the build has failed. StatusReasonFetchImageContentFailed StatusReason = "FetchImageContentFailed" // StatusReasonManageDockerfileFailed indicates that the set up of the Dockerfile for the build // has failed. StatusReasonManageDockerfileFailed StatusReason = "ManageDockerfileFailed" // StatusReasonInvalidContextDirectory indicates that the supplied // contextDir does not exist StatusReasonInvalidContextDirectory StatusReason = "InvalidContextDirectory" // StatusReasonCancelledBuild indicates that the build was cancelled by the // user. StatusReasonCancelledBuild StatusReason = "CancelledBuild" // StatusReasonDockerBuildFailed indicates that the container image build strategy has // failed. StatusReasonDockerBuildFailed StatusReason = "DockerBuildFailed" // StatusReasonBuildPodExists indicates that the build tried to create a // build pod but one was already present. StatusReasonBuildPodExists StatusReason = "BuildPodExists" // StatusReasonNoBuildContainerStatus indicates that the build failed because the // the build pod has no container statuses. StatusReasonNoBuildContainerStatus StatusReason = "NoBuildContainerStatus" // StatusReasonFailedContainer indicates that the pod for the build has at least // one container with a non-zero exit status. StatusReasonFailedContainer StatusReason = "FailedContainer" // StatusReasonUnresolvableEnvironmentVariable indicates that an error occurred processing // the supplied options for environment variables in the build strategy environment StatusReasonUnresolvableEnvironmentVariable StatusReason = "UnresolvableEnvironmentVariable" // StatusReasonGenericBuildFailed is the reason associated with a broad // range of build failures. StatusReasonGenericBuildFailed StatusReason = "GenericBuildFailed" // StatusReasonOutOfMemoryKilled indicates that the build pod was killed for its memory consumption StatusReasonOutOfMemoryKilled StatusReason = "OutOfMemoryKilled" // StatusReasonCannotRetrieveServiceAccount is the reason associated with a failure // to look up the service account associated with the BuildConfig. StatusReasonCannotRetrieveServiceAccount StatusReason = "CannotRetrieveServiceAccount" // StatusReasonBuildPodEvicted is the reason a build fails due to the build pod being evicted // from its node StatusReasonBuildPodEvicted StatusReason = "BuildPodEvicted" ) // env vars // WhitelistEnvVarNames is a list of special env vars allows s2i containers var WhitelistEnvVarNames = []string{"BUILD_LOGLEVEL", "GIT_SSL_NO_VERIFY", "HTTP_PROXY", "HTTPS_PROXY", "LANG", "NO_PROXY"} // env vars const ( // CustomBuildStrategyBaseImageKey is the environment variable that indicates the base image to be used when // performing a custom build, if needed. CustomBuildStrategyBaseImageKey = "OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE" // AllowedUIDs is an environment variable that contains ranges of UIDs that are allowed in // Source builder images AllowedUIDs = "ALLOWED_UIDS" // DropCapabilities is an environment variable that contains a list of capabilities to drop when // executing a Source build DropCapabilities = "DROP_CAPS" ) // keys inside of secrets and configmaps const ( // WebHookSecretKey is the key used to identify the value containing the webhook invocation // secret within a secret referenced by a webhook trigger. WebHookSecretKey = "WebHookSecretKey" // RegistryConfKey is the ConfigMap key for the build pod's registry configuration file. RegistryConfKey = "registries.conf" // SignaturePolicyKey is the ConfigMap key for the build pod's image signature policy file. SignaturePolicyKey = "policy.json" // ServiceCAKey is the ConfigMap key for the service signing certificate authority mounted into build pods. ServiceCAKey = "service-ca.crt" )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/build/v1/types.go
vendor/github.com/openshift/api/build/v1/types.go
package v1 import ( "fmt" "time" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +genclient // +genclient:method=UpdateDetails,verb=update,subresource=details // +genclient:method=Clone,verb=create,subresource=clone,input=BuildRequest // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Build encapsulates the inputs needed to produce a new deployable image, as well as // the status of the execution and a reference to the Pod which executed the build. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Build struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is all the inputs used to execute the build. Spec BuildSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // status is the current status of the build. // +optional Status BuildStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // BuildSpec has the information to represent a build and also additional // information about a build type BuildSpec struct { // CommonSpec is the information that represents a build CommonSpec `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` // triggeredBy describes which triggers started the most recent update to the // build configuration and contains information about those triggers. TriggeredBy []BuildTriggerCause `json:"triggeredBy,omitempty" protobuf:"bytes,2,rep,name=triggeredBy"` } // OptionalNodeSelector is a map that may also be left nil to distinguish between set and unset. // +protobuf.nullable=true // +protobuf.options.(gogoproto.goproto_stringer)=false type OptionalNodeSelector map[string]string func (t OptionalNodeSelector) String() string { return fmt.Sprintf("%v", map[string]string(t)) } // CommonSpec encapsulates all the inputs necessary to represent a build. type CommonSpec struct { // serviceAccount is the name of the ServiceAccount to use to run the pod // created by this build. // The pod will be allowed to use secrets referenced by the ServiceAccount ServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,1,opt,name=serviceAccount"` // source describes the SCM in use. Source BuildSource `json:"source,omitempty" protobuf:"bytes,2,opt,name=source"` // revision is the information from the source for a specific repo snapshot. // This is optional. Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,3,opt,name=revision"` // strategy defines how to perform a build. Strategy BuildStrategy `json:"strategy" protobuf:"bytes,4,opt,name=strategy"` // output describes the container image the Strategy should produce. Output BuildOutput `json:"output,omitempty" protobuf:"bytes,5,opt,name=output"` // resources computes resource requirements to execute the build. Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,6,opt,name=resources"` // postCommit is a build hook executed after the build output image is // committed, before it is pushed to a registry. PostCommit BuildPostCommitSpec `json:"postCommit,omitempty" protobuf:"bytes,7,opt,name=postCommit"` // completionDeadlineSeconds is an optional duration in seconds, counted from // the time when a build pod gets scheduled in the system, that the build may // be active on a node before the system actively tries to terminate the // build; value must be positive integer CompletionDeadlineSeconds *int64 `json:"completionDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=completionDeadlineSeconds"` // nodeSelector is a selector which must be true for the build pod to fit on a node // If nil, it can be overridden by default build nodeselector values for the cluster. // If set to an empty map or a map with any values, default build nodeselector values // are ignored. // +optional NodeSelector OptionalNodeSelector `json:"nodeSelector" protobuf:"bytes,9,name=nodeSelector"` // mountTrustedCA bind mounts the cluster's trusted certificate authorities, as defined in // the cluster's proxy configuration, into the build. This lets processes within a build trust // components signed by custom PKI certificate authorities, such as private artifact // repositories and HTTPS proxies. // // When this field is set to true, the contents of `/etc/pki/ca-trust` within the build are // managed by the build container, and any changes to this directory or its subdirectories (for // example - within a Dockerfile `RUN` instruction) are not persisted in the build's output image. MountTrustedCA *bool `json:"mountTrustedCA,omitempty" protobuf:"varint,10,opt,name=mountTrustedCA"` } // BuildTriggerCause holds information about a triggered build. It is used for // displaying build trigger data for each build and build configuration in oc // describe. It is also used to describe which triggers led to the most recent // update in the build configuration. type BuildTriggerCause struct { // message is used to store a human readable message for why the build was // triggered. E.g.: "Manually triggered by user", "Configuration change",etc. Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"` // genericWebHook holds data about a builds generic webhook trigger. GenericWebHook *GenericWebHookCause `json:"genericWebHook,omitempty" protobuf:"bytes,2,opt,name=genericWebHook"` // gitHubWebHook represents data for a GitHub webhook that fired a //specific build. GitHubWebHook *GitHubWebHookCause `json:"githubWebHook,omitempty" protobuf:"bytes,3,opt,name=githubWebHook"` // imageChangeBuild stores information about an imagechange event // that triggered a new build. ImageChangeBuild *ImageChangeCause `json:"imageChangeBuild,omitempty" protobuf:"bytes,4,opt,name=imageChangeBuild"` // GitLabWebHook represents data for a GitLab webhook that fired a specific // build. GitLabWebHook *GitLabWebHookCause `json:"gitlabWebHook,omitempty" protobuf:"bytes,5,opt,name=gitlabWebHook"` // BitbucketWebHook represents data for a Bitbucket webhook that fired a // specific build. BitbucketWebHook *BitbucketWebHookCause `json:"bitbucketWebHook,omitempty" protobuf:"bytes,6,opt,name=bitbucketWebHook"` } // GenericWebHookCause holds information about a generic WebHook that // triggered a build. type GenericWebHookCause struct { // revision is an optional field that stores the git source revision // information of the generic webhook trigger when it is available. Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` // secret is the obfuscated webhook secret that triggered a build. Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` } // GitHubWebHookCause has information about a GitHub webhook that triggered a // build. type GitHubWebHookCause struct { // revision is the git revision information of the trigger. Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` // secret is the obfuscated webhook secret that triggered a build. Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` } // CommonWebHookCause factors out the identical format of these webhook // causes into struct so we can share it in the specific causes; it is too late for // GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket. type CommonWebHookCause struct { // Revision is the git source revision information of the trigger. Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` // Secret is the obfuscated webhook secret that triggered a build. Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` } // GitLabWebHookCause has information about a GitLab webhook that triggered a // build. type GitLabWebHookCause struct { CommonWebHookCause `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` } // BitbucketWebHookCause has information about a Bitbucket webhook that triggered a // build. type BitbucketWebHookCause struct { CommonWebHookCause `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` } // ImageChangeCause contains information about the image that triggered a // build type ImageChangeCause struct { // imageID is the ID of the image that triggered a new build. ImageID string `json:"imageID,omitempty" protobuf:"bytes,1,opt,name=imageID"` // fromRef contains detailed information about an image that triggered a // build. FromRef *corev1.ObjectReference `json:"fromRef,omitempty" protobuf:"bytes,2,opt,name=fromRef"` } // BuildStatus contains the status of a build type BuildStatus struct { // phase is the point in the build lifecycle. Possible values are // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled". Phase BuildPhase `json:"phase" protobuf:"bytes,1,opt,name=phase,casttype=BuildPhase"` // cancelled describes if a cancel event was triggered for the build. Cancelled bool `json:"cancelled,omitempty" protobuf:"varint,2,opt,name=cancelled"` // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason,casttype=StatusReason"` // message is a human-readable message indicating details about why the build has this status. Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` // startTimestamp is a timestamp representing the server time when this Build started // running in a Pod. // It is represented in RFC3339 form and is in UTC. StartTimestamp *metav1.Time `json:"startTimestamp,omitempty" protobuf:"bytes,5,opt,name=startTimestamp"` // completionTimestamp is a timestamp representing the server time when this Build was // finished, whether that build failed or succeeded. It reflects the time at which // the Pod running the Build terminated. // It is represented in RFC3339 form and is in UTC. CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty" protobuf:"bytes,6,opt,name=completionTimestamp"` // duration contains time.Duration object describing build time. Duration time.Duration `json:"duration,omitempty" protobuf:"varint,7,opt,name=duration,casttype=time.Duration"` // outputDockerImageReference contains a reference to the container image that // will be built by this build. Its value is computed from // Build.Spec.Output.To, and should include the registry address, so that // it can be used to push and pull the image. OutputDockerImageReference string `json:"outputDockerImageReference,omitempty" protobuf:"bytes,8,opt,name=outputDockerImageReference"` // config is an ObjectReference to the BuildConfig this Build is based on. Config *corev1.ObjectReference `json:"config,omitempty" protobuf:"bytes,9,opt,name=config"` // output describes the container image the build has produced. Output BuildStatusOutput `json:"output,omitempty" protobuf:"bytes,10,opt,name=output"` // stages contains details about each stage that occurs during the build // including start time, duration (in milliseconds), and the steps that // occured within each stage. Stages []StageInfo `json:"stages,omitempty" protobuf:"bytes,11,opt,name=stages"` // logSnippet is the last few lines of the build log. This value is only set for builds that failed. LogSnippet string `json:"logSnippet,omitempty" protobuf:"bytes,12,opt,name=logSnippet"` // Conditions represents the latest available observations of a build's current state. // +patchMergeKey=type // +patchStrategy=merge Conditions []BuildCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,13,rep,name=conditions"` } // StageInfo contains details about a build stage. type StageInfo struct { // name is a unique identifier for each build stage that occurs. Name StageName `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` // startTime is a timestamp representing the server time when this Stage started. // It is represented in RFC3339 form and is in UTC. StartTime metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` // durationMilliseconds identifies how long the stage took // to complete in milliseconds. // Note: the duration of a stage can exceed the sum of the duration of the steps within // the stage as not all actions are accounted for in explicit build steps. DurationMilliseconds int64 `json:"durationMilliseconds,omitempty" protobuf:"varint,3,opt,name=durationMilliseconds"` // steps contains details about each step that occurs during a build stage // including start time and duration in milliseconds. Steps []StepInfo `json:"steps,omitempty" protobuf:"bytes,4,opt,name=steps"` } // StageName is the unique identifier for each build stage. type StageName string // Valid values for StageName const ( // StageFetchInputs fetches any inputs such as source code. StageFetchInputs StageName = "FetchInputs" // StagePullImages pulls any images that are needed such as // base images or input images. StagePullImages StageName = "PullImages" // StageBuild performs the steps necessary to build the image. StageBuild StageName = "Build" // StagePostCommit executes any post commit steps. StagePostCommit StageName = "PostCommit" // StagePushImage pushes the image to the node. StagePushImage StageName = "PushImage" ) // StepInfo contains details about a build step. type StepInfo struct { // name is a unique identifier for each build step. Name StepName `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` // startTime is a timestamp representing the server time when this Step started. // it is represented in RFC3339 form and is in UTC. StartTime metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` // durationMilliseconds identifies how long the step took // to complete in milliseconds. DurationMilliseconds int64 `json:"durationMilliseconds,omitempty" protobuf:"varint,3,opt,name=durationMilliseconds"` } // StepName is a unique identifier for each build step. type StepName string // Valid values for StepName const ( // StepExecPostCommitHook executes the buildconfigs post commit hook. StepExecPostCommitHook StepName = "RunPostCommitHook" // StepFetchGitSource fetches source code for the build. StepFetchGitSource StepName = "FetchGitSource" // StepPullBaseImage pulls a base image for the build. StepPullBaseImage StepName = "PullBaseImage" // StepPullInputImage pulls an input image for the build. StepPullInputImage StepName = "PullInputImage" // StepPushImage pushes an image to the registry. StepPushImage StepName = "PushImage" // StepPushDockerImage pushes a container image to the registry. StepPushDockerImage StepName = "PushDockerImage" //StepDockerBuild performs the container image build StepDockerBuild StepName = "DockerBuild" ) // BuildPhase represents the status of a build at a point in time. type BuildPhase string // Valid values for BuildPhase. const ( // BuildPhaseNew is automatically assigned to a newly created build. BuildPhaseNew BuildPhase = "New" // BuildPhasePending indicates that a pod name has been assigned and a build is // about to start running. BuildPhasePending BuildPhase = "Pending" // BuildPhaseRunning indicates that a pod has been created and a build is running. BuildPhaseRunning BuildPhase = "Running" // BuildPhaseComplete indicates that a build has been successful. BuildPhaseComplete BuildPhase = "Complete" // BuildPhaseFailed indicates that a build has executed and failed. BuildPhaseFailed BuildPhase = "Failed" // BuildPhaseError indicates that an error prevented the build from executing. BuildPhaseError BuildPhase = "Error" // BuildPhaseCancelled indicates that a running/pending build was stopped from executing. BuildPhaseCancelled BuildPhase = "Cancelled" ) type BuildConditionType string // BuildCondition describes the state of a build at a certain point. type BuildCondition struct { // Type of build condition. Type BuildConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildConditionType"` // Status of the condition, one of True, False, Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` // The last time this condition was updated. LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` // The last time the condition transitioned from one status to another. LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` // The reason for the condition's last transition. Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` // A human readable message indicating details about the transition. Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // StatusReason is a brief CamelCase string that describes a temporary or // permanent build error condition, meant for machine parsing and tidy display // in the CLI. type StatusReason string // BuildStatusOutput contains the status of the built image. type BuildStatusOutput struct { // to describes the status of the built image being pushed to a registry. To *BuildStatusOutputTo `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"` } // BuildStatusOutputTo describes the status of the built image with regards to // image registry to which it was supposed to be pushed. type BuildStatusOutputTo struct { // imageDigest is the digest of the built container image. The digest uniquely // identifies the image in the registry to which it was pushed. // // Please note that this field may not always be set even if the push // completes successfully - e.g. when the registry returns no digest or // returns it in a format that the builder doesn't understand. ImageDigest string `json:"imageDigest,omitempty" protobuf:"bytes,1,opt,name=imageDigest"` } // BuildSourceType is the type of SCM used. type BuildSourceType string // Valid values for BuildSourceType. const ( //BuildSourceGit instructs a build to use a Git source control repository as the build input. BuildSourceGit BuildSourceType = "Git" // BuildSourceDockerfile uses a Dockerfile as the start of a build BuildSourceDockerfile BuildSourceType = "Dockerfile" // BuildSourceBinary indicates the build will accept a Binary file as input. BuildSourceBinary BuildSourceType = "Binary" // BuildSourceImage indicates the build will accept an image as input BuildSourceImage BuildSourceType = "Image" // BuildSourceNone indicates the build has no predefined input (only valid for Source and Custom Strategies) BuildSourceNone BuildSourceType = "None" ) // BuildSource is the SCM used for the build. type BuildSource struct { // type of build input to accept // +k8s:conversion-gen=false // +optional Type BuildSourceType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` // binary builds accept a binary as their input. The binary is generally assumed to be a tar, // gzipped tar, or zip file depending on the strategy. For container image builds, this is the build // context and an optional Dockerfile may be specified to override any Dockerfile in the // build context. For Source builds, this is assumed to be an archive as described above. For // Source and container image builds, if binary.asFile is set the build will receive a directory with // a single file. contextDir may be used when an archive is provided. Custom builds will // receive this binary as input on STDIN. Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,2,opt,name=binary"` // dockerfile is the raw contents of a Dockerfile which should be built. When this option is // specified, the FROM may be modified based on your strategy base image and additional ENV // stanzas from your strategy environment will be added after the FROM, but before the rest // of your Dockerfile stanzas. The Dockerfile source type may be used with other options like // git - in those cases the Git repo will have any innate Dockerfile replaced in the context // dir. Dockerfile *string `json:"dockerfile,omitempty" protobuf:"bytes,3,opt,name=dockerfile"` // git contains optional information about git build source Git *GitBuildSource `json:"git,omitempty" protobuf:"bytes,4,opt,name=git"` // images describes a set of images to be used to provide source for the build Images []ImageSource `json:"images,omitempty" protobuf:"bytes,5,rep,name=images"` // contextDir specifies the sub-directory where the source code for the application exists. // This allows to have buildable sources in directory other than root of // repository. ContextDir string `json:"contextDir,omitempty" protobuf:"bytes,6,opt,name=contextDir"` // sourceSecret is the name of a Secret that would be used for setting // up the authentication for cloning private repository. // The secret contains valid credentials for remote repository, where the // data's key represent the authentication method to be used and value is // the base64 encoded credentials. Supported auth methods are: ssh-privatekey. SourceSecret *corev1.LocalObjectReference `json:"sourceSecret,omitempty" protobuf:"bytes,7,opt,name=sourceSecret"` // secrets represents a list of secrets and their destinations that will // be used only for the build. Secrets []SecretBuildSource `json:"secrets,omitempty" protobuf:"bytes,8,rep,name=secrets"` // configMaps represents a list of configMaps and their destinations that will // be used for the build. ConfigMaps []ConfigMapBuildSource `json:"configMaps,omitempty" protobuf:"bytes,9,rep,name=configMaps"` } // ImageSource is used to describe build source that will be extracted from an image or used during a // multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. // A pull secret can be specified to pull the image from an external registry or override the default // service account secret if pulling from the internal registry. Image sources can either be used to // extract content from an image and place it into the build context along with the repository source, // or used directly during a multi-stage container image build to allow content to be copied without overwriting // the contents of the repository source (see the 'paths' and 'as' fields). type ImageSource struct { // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to // copy source from. From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` // A list of image names that this source will be used in place of during a multi-stage container image // build. For instance, a Dockerfile that uses "COPY --from=nginx:latest" will first check for an image // source that has "nginx:latest" in this field before attempting to pull directly. If the Dockerfile // does not reference an image source it is ignored. This field and paths may both be set, in which case // the contents will be used twice. // +optional As []string `json:"as,omitempty" protobuf:"bytes,4,rep,name=as"` // paths is a list of source and destination paths to copy from the image. This content will be copied // into the build context prior to starting the build. If no paths are set, the build context will // not be altered. // +optional Paths []ImageSourcePath `json:"paths,omitempty" protobuf:"bytes,2,rep,name=paths"` // pullSecret is a reference to a secret to be used to pull the image from a registry // If the image is pulled from the OpenShift registry, this field does not need to be set. PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,3,opt,name=pullSecret"` } // ImageSourcePath describes a path to be copied from a source image and its destination within the build directory. type ImageSourcePath struct { // sourcePath is the absolute path of the file or directory inside the image to // copy to the build directory. If the source path ends in /. then the content of // the directory will be copied, but the directory itself will not be created at the // destination. SourcePath string `json:"sourcePath" protobuf:"bytes,1,opt,name=sourcePath"` // destinationDir is the relative directory within the build directory // where files copied from the image are placed. DestinationDir string `json:"destinationDir" protobuf:"bytes,2,opt,name=destinationDir"` } // SecretBuildSource describes a secret and its destination directory that will be // used only at the build time. The content of the secret referenced here will // be copied into the destination directory instead of mounting. type SecretBuildSource struct { // secret is a reference to an existing secret that you want to use in your // build. Secret corev1.LocalObjectReference `json:"secret" protobuf:"bytes,1,opt,name=secret"` // destinationDir is the directory where the files from the secret should be // available for the build time. // For the Source build strategy, these will be injected into a container // where the assemble script runs. Later, when the script finishes, all files // injected will be truncated to zero length. // For the container image build strategy, these will be copied into the build // directory, where the Dockerfile is located, so users can ADD or COPY them // during container image build. DestinationDir string `json:"destinationDir,omitempty" protobuf:"bytes,2,opt,name=destinationDir"` } // ConfigMapBuildSource describes a configmap and its destination directory that will be // used only at the build time. The content of the configmap referenced here will // be copied into the destination directory instead of mounting. type ConfigMapBuildSource struct { // configMap is a reference to an existing configmap that you want to use in your // build. ConfigMap corev1.LocalObjectReference `json:"configMap" protobuf:"bytes,1,opt,name=configMap"` // destinationDir is the directory where the files from the configmap should be // available for the build time. // For the Source build strategy, these will be injected into a container // where the assemble script runs. // For the container image build strategy, these will be copied into the build // directory, where the Dockerfile is located, so users can ADD or COPY them // during container image build. DestinationDir string `json:"destinationDir,omitempty" protobuf:"bytes,2,opt,name=destinationDir"` } // BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, // where the file will be extracted and used as the build source. type BinaryBuildSource struct { // asFile indicates that the provided binary input should be considered a single file // within the build input. For example, specifying "webapp.war" would place the provided // binary as `/webapp.war` for the builder. If left empty, the Docker and Source build // strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. // The custom strategy receives this binary as standard input. This filename may not // contain slashes or be '..' or '.'. AsFile string `json:"asFile,omitempty" protobuf:"bytes,1,opt,name=asFile"` } // SourceRevision is the revision or commit information from the source for the build type SourceRevision struct { // type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images' // +k8s:conversion-gen=false Type BuildSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` // Git contains information about git-based build source Git *GitSourceRevision `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"` } // GitSourceRevision is the commit information from a git source for a build type GitSourceRevision struct { // commit is the commit hash identifying a specific commit Commit string `json:"commit,omitempty" protobuf:"bytes,1,opt,name=commit"` // author is the author of a specific commit Author SourceControlUser `json:"author,omitempty" protobuf:"bytes,2,opt,name=author"` // committer is the committer of a specific commit Committer SourceControlUser `json:"committer,omitempty" protobuf:"bytes,3,opt,name=committer"` // message is the description of a specific commit Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` } // ProxyConfig defines what proxies to use for an operation type ProxyConfig struct { // httpProxy is a proxy used to reach the git repository over http HTTPProxy *string `json:"httpProxy,omitempty" protobuf:"bytes,3,opt,name=httpProxy"` // httpsProxy is a proxy used to reach the git repository over https HTTPSProxy *string `json:"httpsProxy,omitempty" protobuf:"bytes,4,opt,name=httpsProxy"` // noProxy is the list of domains for which the proxy should not be used NoProxy *string `json:"noProxy,omitempty" protobuf:"bytes,5,opt,name=noProxy"` } // GitBuildSource defines the parameters of a Git SCM type GitBuildSource struct { // uri points to the source that will be built. The structure of the source // will depend on the type of build to run URI string `json:"uri" protobuf:"bytes,1,opt,name=uri"` // ref is the branch/tag/ref to build. Ref string `json:"ref,omitempty" protobuf:"bytes,2,opt,name=ref"` // proxyConfig defines the proxies to use for the git clone operation. Values // not set here are inherited from cluster-wide build git proxy settings. ProxyConfig `json:",inline" protobuf:"bytes,3,opt,name=proxyConfig"` } // SourceControlUser defines the identity of a user of source control type SourceControlUser struct { // name of the source control user Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` // email of the source control user Email string `json:"email,omitempty" protobuf:"bytes,2,opt,name=email"` } // BuildStrategy contains the details of how to perform a build. type BuildStrategy struct { // type is the kind of build strategy. // +k8s:conversion-gen=false // +optional Type BuildStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildStrategyType"` // dockerStrategy holds the parameters to the container image build strategy. DockerStrategy *DockerBuildStrategy `json:"dockerStrategy,omitempty" protobuf:"bytes,2,opt,name=dockerStrategy"` // sourceStrategy holds the parameters to the Source build strategy. SourceStrategy *SourceBuildStrategy `json:"sourceStrategy,omitempty" protobuf:"bytes,3,opt,name=sourceStrategy"` // customStrategy holds the parameters to the Custom build strategy CustomStrategy *CustomBuildStrategy `json:"customStrategy,omitempty" protobuf:"bytes,4,opt,name=customStrategy"` // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. // Deprecated: use OpenShift Pipelines JenkinsPipelineStrategy *JenkinsPipelineBuildStrategy `json:"jenkinsPipelineStrategy,omitempty" protobuf:"bytes,5,opt,name=jenkinsPipelineStrategy"` } // BuildStrategyType describes a particular way of performing a build. type BuildStrategyType string // Valid values for BuildStrategyType. const ( // DockerBuildStrategyType performs builds using a Dockerfile. DockerBuildStrategyType BuildStrategyType = "Docker" // SourceBuildStrategyType performs builds build using Source To Images with a Git repository // and a builder image. SourceBuildStrategyType BuildStrategyType = "Source" // CustomBuildStrategyType performs builds using custom builder container image. CustomBuildStrategyType BuildStrategyType = "Custom" // JenkinsPipelineBuildStrategyType indicates the build will run via Jenkine Pipeline. JenkinsPipelineBuildStrategyType BuildStrategyType = "JenkinsPipeline" ) // CustomBuildStrategy defines input parameters specific to Custom build. type CustomBuildStrategy struct { // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which // the container image should be pulled From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` // pullSecret is the name of a Secret that would be used for setting up // the authentication for pulling the container images from the private Docker // registries PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"`
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/build/v1/register.go
vendor/github.com/openshift/api/build/v1/register.go
package v1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( GroupName = "build.openshift.io" GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) // Install is a function which adds this version to a scheme Install = schemeBuilder.AddToScheme // SchemeGroupVersion generated code relies on this name // Deprecated SchemeGroupVersion = GroupVersion // AddToScheme exists solely to keep the old generators creating valid code // DEPRECATED AddToScheme = schemeBuilder.AddToScheme ) // Resource generated code relies on this being here, but it logically belongs to the group // DEPRECATED func Resource(resource string) schema.GroupResource { return schema.GroupResource{Group: GroupName, Resource: resource} } // addKnownTypes adds types to API group func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(GroupVersion, &Build{}, &BuildList{}, &BuildConfig{}, &BuildConfigList{}, &BuildLog{}, &BuildRequest{}, &BuildLogOptions{}, &BinaryBuildRequestOptions{}, // This is needed for webhooks &corev1.PodProxyOptions{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go
vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go
package v1 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more // information on the implementation: https://github.com/emicklei/go-restful/pull/215 // // TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE var map_BinaryBuildRequestOptions = map[string]string{ "": "BinaryBuildRequestOptions are the options required to fully speficy a binary build request\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "asFile": "asFile determines if the binary should be created as a file within the source rather than extracted as an archive", "revision.commit": "revision.commit is the value identifying a specific commit", "revision.message": "revision.message is the description of a specific commit", "revision.authorName": "revision.authorName of the source control user", "revision.authorEmail": "revision.authorEmail of the source control user", "revision.committerName": "revision.committerName of the source control user", "revision.committerEmail": "revision.committerEmail of the source control user", } func (BinaryBuildRequestOptions) SwaggerDoc() map[string]string { return map_BinaryBuildRequestOptions } var map_BinaryBuildSource = map[string]string{ "": "BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, where the file will be extracted and used as the build source.", "asFile": "asFile indicates that the provided binary input should be considered a single file within the build input. For example, specifying \"webapp.war\" would place the provided binary as `/webapp.war` for the builder. If left empty, the Docker and Source build strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. The custom strategy receives this binary as standard input. This filename may not contain slashes or be '..' or '.'.", } func (BinaryBuildSource) SwaggerDoc() map[string]string { return map_BinaryBuildSource } var map_BitbucketWebHookCause = map[string]string{ "": "BitbucketWebHookCause has information about a Bitbucket webhook that triggered a build.", } func (BitbucketWebHookCause) SwaggerDoc() map[string]string { return map_BitbucketWebHookCause } var map_Build = map[string]string{ "": "Build encapsulates the inputs needed to produce a new deployable image, as well as the status of the execution and a reference to the Pod which executed the build.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec is all the inputs used to execute the build.", "status": "status is the current status of the build.", } func (Build) SwaggerDoc() map[string]string { return map_Build } var map_BuildCondition = map[string]string{ "": "BuildCondition describes the state of a build at a certain point.", "type": "Type of build condition.", "status": "Status of the condition, one of True, False, Unknown.", "lastUpdateTime": "The last time this condition was updated.", "lastTransitionTime": "The last time the condition transitioned from one status to another.", "reason": "The reason for the condition's last transition.", "message": "A human readable message indicating details about the transition.", } func (BuildCondition) SwaggerDoc() map[string]string { return map_BuildCondition } var map_BuildConfig = map[string]string{ "": "Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the \"output\" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created.\n\nEach build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have \"output\" set can be used to test code or run a verification build.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec holds all the input necessary to produce a new build, and the conditions when to trigger them.", "status": "status holds any relevant information about a build config", } func (BuildConfig) SwaggerDoc() map[string]string { return map_BuildConfig } var map_BuildConfigList = map[string]string{ "": "BuildConfigList is a collection of BuildConfigs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is a list of build configs", } func (BuildConfigList) SwaggerDoc() map[string]string { return map_BuildConfigList } var map_BuildConfigSpec = map[string]string{ "": "BuildConfigSpec describes when and how builds are created", "triggers": "triggers determine how new Builds can be launched from a BuildConfig. If no triggers are defined, a new build can only occur as a result of an explicit client build creation.", "runPolicy": "RunPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".", "successfulBuildsHistoryLimit": "successfulBuildsHistoryLimit is the number of old successful builds to retain. When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. If removed after the BuildConfig has been created, all successful builds are retained.", "failedBuildsHistoryLimit": "failedBuildsHistoryLimit is the number of old failed builds to retain. When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. If removed after the BuildConfig has been created, all failed builds are retained.", } func (BuildConfigSpec) SwaggerDoc() map[string]string { return map_BuildConfigSpec } var map_BuildConfigStatus = map[string]string{ "": "BuildConfigStatus contains current state of the build config object.", "lastVersion": "lastVersion is used to inform about number of last triggered build.", "imageChangeTriggers": "ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.", } func (BuildConfigStatus) SwaggerDoc() map[string]string { return map_BuildConfigStatus } var map_BuildList = map[string]string{ "": "BuildList is a collection of Builds.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is a list of builds", } func (BuildList) SwaggerDoc() map[string]string { return map_BuildList } var map_BuildLog = map[string]string{ "": "BuildLog is the (unused) resource associated with the build log redirector\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", } func (BuildLog) SwaggerDoc() map[string]string { return map_BuildLog } var map_BuildLogOptions = map[string]string{ "": "BuildLogOptions is the REST options for a build log\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "container": "cointainer for which to stream logs. Defaults to only container if there is one container in the pod.", "follow": "follow if true indicates that the build log should be streamed until the build terminates.", "previous": "previous returns previous build logs. Defaults to false.", "sinceSeconds": "sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", "sinceTime": "sinceTime is an RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", "timestamps": "timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", "tailLines": "tailLines, If set, is the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", "limitBytes": "limitBytes, If set, is the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", "nowait": "noWait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.", "version": "version of the build for which to view logs.", "insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).", } func (BuildLogOptions) SwaggerDoc() map[string]string { return map_BuildLogOptions } var map_BuildOutput = map[string]string{ "": "BuildOutput is input to a build strategy and describes the container image that the strategy should produce.", "to": "to defines an optional location to push the output of this build to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This value will be used to look up a container image repository to push to. In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of the build unless Namespace is specified.", "pushSecret": "PushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).", "imageLabels": "imageLabels define a list of labels that are applied to the resulting image. If there are multiple labels with the same name then the last one in the list is used.", } func (BuildOutput) SwaggerDoc() map[string]string { return map_BuildOutput } var map_BuildPostCommitSpec = map[string]string{ "": "A BuildPostCommitSpec holds a build post commit hook specification. The hook executes a command in a temporary container running the build output image, immediately after the last layer of the image is committed and before the image is pushed to a registry. The command is executed with the current working directory ($PWD) set to the image's WORKDIR.\n\nThe build will be marked as failed if the hook execution fails. It will fail if the script or command return a non-zero exit code, or if there is any other error related to starting the temporary container.\n\nThere are five different ways to configure the hook. As an example, all forms below are equivalent and will execute `rake test --verbose`.\n\n1. Shell script:\n\n\t \"postCommit\": {\n\t \"script\": \"rake test --verbose\",\n\t }\n\n\tThe above is a convenient form which is equivalent to:\n\n\t \"postCommit\": {\n\t \"command\": [\"/bin/sh\", \"-ic\"],\n\t \"args\": [\"rake test --verbose\"]\n\t }\n\n2. A command as the image entrypoint:\n\n\t \"postCommit\": {\n\t \"commit\": [\"rake\", \"test\", \"--verbose\"]\n\t }\n\n\tCommand overrides the image entrypoint in the exec form, as documented in\n\tDocker: https://docs.docker.com/engine/reference/builder/#entrypoint.\n\n3. Pass arguments to the default entrypoint:\n\n\t \"postCommit\": {\n\t\t\t \"args\": [\"rake\", \"test\", \"--verbose\"]\n\t\t }\n\n\t This form is only useful if the image entrypoint can handle arguments.\n\n4. Shell script with arguments:\n\n\t \"postCommit\": {\n\t \"script\": \"rake test $1\",\n\t \"args\": [\"--verbose\"]\n\t }\n\n\tThis form is useful if you need to pass arguments that would otherwise be\n\thard to quote properly in the shell script. In the script, $0 will be\n\t\"/bin/sh\" and $1, $2, etc, are the positional arguments from Args.\n\n5. Command with arguments:\n\n\t \"postCommit\": {\n\t \"command\": [\"rake\", \"test\"],\n\t \"args\": [\"--verbose\"]\n\t }\n\n\tThis form is equivalent to appending the arguments to the Command slice.\n\nIt is invalid to provide both Script and Command simultaneously. If none of the fields are specified, the hook is not executed.", "command": "command is the command to run. It may not be specified with Script. This might be needed if the image doesn't have `/bin/sh`, or if you do not want to use a shell. In all other cases, using Script might be more convenient.", "args": "args is a list of arguments that are provided to either Command, Script or the container image's default entrypoint. The arguments are placed immediately after the command to be run.", "script": "script is a shell script to be run with `/bin/sh -ic`. It may not be specified with Command. Use Script when a shell script is appropriate to execute the post build hook, for example for running unit tests with `rake test`. If you need control over the image entrypoint, or if the image does not have `/bin/sh`, use Command and/or Args. The `-i` flag is needed to support CentOS and RHEL images that use Software Collections (SCL), in order to have the appropriate collections enabled in the shell. E.g., in the Ruby image, this is necessary to make `ruby`, `bundle` and other binaries available in the PATH.", } func (BuildPostCommitSpec) SwaggerDoc() map[string]string { return map_BuildPostCommitSpec } var map_BuildRequest = map[string]string{ "": "BuildRequest is the resource used to pass parameters to build generator\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "revision": "revision is the information from the source for a specific repo snapshot.", "triggeredByImage": "triggeredByImage is the Image that triggered this build.", "from": "from is the reference to the ImageStreamTag that triggered the build.", "binary": "binary indicates a request to build from a binary provided to the builder", "lastVersion": "lastVersion (optional) is the LastVersion of the BuildConfig that was used to generate the build. If the BuildConfig in the generator doesn't match, a build will not be generated.", "env": "env contains additional environment variables you want to pass into a builder container.", "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.", "dockerStrategyOptions": "DockerStrategyOptions contains additional docker-strategy specific options for the build", "sourceStrategyOptions": "SourceStrategyOptions contains additional source-strategy specific options for the build", } func (BuildRequest) SwaggerDoc() map[string]string { return map_BuildRequest } var map_BuildSource = map[string]string{ "": "BuildSource is the SCM used for the build.", "type": "type of build input to accept", "binary": "binary builds accept a binary as their input. The binary is generally assumed to be a tar, gzipped tar, or zip file depending on the strategy. For container image builds, this is the build context and an optional Dockerfile may be specified to override any Dockerfile in the build context. For Source builds, this is assumed to be an archive as described above. For Source and container image builds, if binary.asFile is set the build will receive a directory with a single file. contextDir may be used when an archive is provided. Custom builds will receive this binary as input on STDIN.", "dockerfile": "dockerfile is the raw contents of a Dockerfile which should be built. When this option is specified, the FROM may be modified based on your strategy base image and additional ENV stanzas from your strategy environment will be added after the FROM, but before the rest of your Dockerfile stanzas. The Dockerfile source type may be used with other options like git - in those cases the Git repo will have any innate Dockerfile replaced in the context dir.", "git": "git contains optional information about git build source", "images": "images describes a set of images to be used to provide source for the build", "contextDir": "contextDir specifies the sub-directory where the source code for the application exists. This allows to have buildable sources in directory other than root of repository.", "sourceSecret": "sourceSecret is the name of a Secret that would be used for setting up the authentication for cloning private repository. The secret contains valid credentials for remote repository, where the data's key represent the authentication method to be used and value is the base64 encoded credentials. Supported auth methods are: ssh-privatekey.", "secrets": "secrets represents a list of secrets and their destinations that will be used only for the build.", "configMaps": "configMaps represents a list of configMaps and their destinations that will be used for the build.", } func (BuildSource) SwaggerDoc() map[string]string { return map_BuildSource } var map_BuildSpec = map[string]string{ "": "BuildSpec has the information to represent a build and also additional information about a build", "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.", } func (BuildSpec) SwaggerDoc() map[string]string { return map_BuildSpec } var map_BuildStatus = map[string]string{ "": "BuildStatus contains the status of a build", "phase": "phase is the point in the build lifecycle. Possible values are \"New\", \"Pending\", \"Running\", \"Complete\", \"Failed\", \"Error\", and \"Cancelled\".", "cancelled": "cancelled describes if a cancel event was triggered for the build.", "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", "message": "message is a human-readable message indicating details about why the build has this status.", "startTimestamp": "startTimestamp is a timestamp representing the server time when this Build started running in a Pod. It is represented in RFC3339 form and is in UTC.", "completionTimestamp": "completionTimestamp is a timestamp representing the server time when this Build was finished, whether that build failed or succeeded. It reflects the time at which the Pod running the Build terminated. It is represented in RFC3339 form and is in UTC.", "duration": "duration contains time.Duration object describing build time.", "outputDockerImageReference": "outputDockerImageReference contains a reference to the container image that will be built by this build. Its value is computed from Build.Spec.Output.To, and should include the registry address, so that it can be used to push and pull the image.", "config": "config is an ObjectReference to the BuildConfig this Build is based on.", "output": "output describes the container image the build has produced.", "stages": "stages contains details about each stage that occurs during the build including start time, duration (in milliseconds), and the steps that occured within each stage.", "logSnippet": "logSnippet is the last few lines of the build log. This value is only set for builds that failed.", "conditions": "Conditions represents the latest available observations of a build's current state.", } func (BuildStatus) SwaggerDoc() map[string]string { return map_BuildStatus } var map_BuildStatusOutput = map[string]string{ "": "BuildStatusOutput contains the status of the built image.", "to": "to describes the status of the built image being pushed to a registry.", } func (BuildStatusOutput) SwaggerDoc() map[string]string { return map_BuildStatusOutput } var map_BuildStatusOutputTo = map[string]string{ "": "BuildStatusOutputTo describes the status of the built image with regards to image registry to which it was supposed to be pushed.", "imageDigest": "imageDigest is the digest of the built container image. The digest uniquely identifies the image in the registry to which it was pushed.\n\nPlease note that this field may not always be set even if the push completes successfully - e.g. when the registry returns no digest or returns it in a format that the builder doesn't understand.", } func (BuildStatusOutputTo) SwaggerDoc() map[string]string { return map_BuildStatusOutputTo } var map_BuildStrategy = map[string]string{ "": "BuildStrategy contains the details of how to perform a build.", "type": "type is the kind of build strategy.", "dockerStrategy": "dockerStrategy holds the parameters to the container image build strategy.", "sourceStrategy": "sourceStrategy holds the parameters to the Source build strategy.", "customStrategy": "customStrategy holds the parameters to the Custom build strategy", "jenkinsPipelineStrategy": "JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. Deprecated: use OpenShift Pipelines", } func (BuildStrategy) SwaggerDoc() map[string]string { return map_BuildStrategy } var map_BuildTriggerCause = map[string]string{ "": "BuildTriggerCause holds information about a triggered build. It is used for displaying build trigger data for each build and build configuration in oc describe. It is also used to describe which triggers led to the most recent update in the build configuration.", "message": "message is used to store a human readable message for why the build was triggered. E.g.: \"Manually triggered by user\", \"Configuration change\",etc.", "genericWebHook": "genericWebHook holds data about a builds generic webhook trigger.", "githubWebHook": "gitHubWebHook represents data for a GitHub webhook that fired a specific build.", "imageChangeBuild": "imageChangeBuild stores information about an imagechange event that triggered a new build.", "gitlabWebHook": "GitLabWebHook represents data for a GitLab webhook that fired a specific build.", "bitbucketWebHook": "BitbucketWebHook represents data for a Bitbucket webhook that fired a specific build.", } func (BuildTriggerCause) SwaggerDoc() map[string]string { return map_BuildTriggerCause } var map_BuildTriggerPolicy = map[string]string{ "": "BuildTriggerPolicy describes a policy for a single trigger that results in a new Build.", "type": "type is the type of build trigger. Valid values:\n\n- GitHub GitHubWebHookBuildTriggerType represents a trigger that launches builds on GitHub webhook invocations\n\n- Generic GenericWebHookBuildTriggerType represents a trigger that launches builds on generic webhook invocations\n\n- GitLab GitLabWebHookBuildTriggerType represents a trigger that launches builds on GitLab webhook invocations\n\n- Bitbucket BitbucketWebHookBuildTriggerType represents a trigger that launches builds on Bitbucket webhook invocations\n\n- ImageChange ImageChangeBuildTriggerType represents a trigger that launches builds on availability of a new version of an image\n\n- ConfigChange ConfigChangeBuildTriggerType will trigger a build on an initial build config creation WARNING: In the future the behavior will change to trigger a build on any config change", "github": "github contains the parameters for a GitHub webhook type of trigger", "generic": "generic contains the parameters for a Generic webhook type of trigger", "imageChange": "imageChange contains parameters for an ImageChange type of trigger", "gitlab": "GitLabWebHook contains the parameters for a GitLab webhook type of trigger", "bitbucket": "BitbucketWebHook contains the parameters for a Bitbucket webhook type of trigger", } func (BuildTriggerPolicy) SwaggerDoc() map[string]string { return map_BuildTriggerPolicy } var map_BuildVolume = map[string]string{ "": "BuildVolume describes a volume that is made available to build pods, such that it can be mounted into buildah's runtime environment. Only a subset of Kubernetes Volume sources are supported.", "name": "name is a unique identifier for this BuildVolume. It must conform to the Kubernetes DNS label standard and be unique within the pod. Names that collide with those added by the build controller will result in a failed build with an error message detailing which name caused the error. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "source": "source represents the location and type of the mounted volume.", "mounts": "mounts represents the location of the volume in the image build container", } func (BuildVolume) SwaggerDoc() map[string]string { return map_BuildVolume } var map_BuildVolumeMount = map[string]string{ "": "BuildVolumeMount describes the mounting of a Volume within buildah's runtime environment.", "destinationPath": "destinationPath is the path within the buildah runtime environment at which the volume should be mounted. The transient mount within the build image and the backing volume will both be mounted read only. Must be an absolute path, must not contain '..' or ':', and must not collide with a destination path generated by the builder process Paths that collide with those added by the build controller will result in a failed build with an error message detailing which path caused the error.", } func (BuildVolumeMount) SwaggerDoc() map[string]string { return map_BuildVolumeMount } var map_BuildVolumeSource = map[string]string{ "": "BuildVolumeSource represents the source of a volume to mount Only one of its supported types may be specified at any given time.", "type": "type is the BuildVolumeSourceType for the volume source. Type must match the populated volume source. Valid types are: Secret, ConfigMap", "secret": "secret represents a Secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "configMap": "configMap represents a ConfigMap that should populate this volume", "csi": "csi represents ephemeral storage provided by external CSI drivers which support this capability", } func (BuildVolumeSource) SwaggerDoc() map[string]string { return map_BuildVolumeSource } var map_CommonSpec = map[string]string{ "": "CommonSpec encapsulates all the inputs necessary to represent a build.", "serviceAccount": "serviceAccount is the name of the ServiceAccount to use to run the pod created by this build. The pod will be allowed to use secrets referenced by the ServiceAccount", "source": "source describes the SCM in use.", "revision": "revision is the information from the source for a specific repo snapshot. This is optional.", "strategy": "strategy defines how to perform a build.", "output": "output describes the container image the Strategy should produce.", "resources": "resources computes resource requirements to execute the build.", "postCommit": "postCommit is a build hook executed after the build output image is committed, before it is pushed to a registry.", "completionDeadlineSeconds": "completionDeadlineSeconds is an optional duration in seconds, counted from the time when a build pod gets scheduled in the system, that the build may be active on a node before the system actively tries to terminate the build; value must be positive integer", "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node If nil, it can be overridden by default build nodeselector values for the cluster. If set to an empty map or a map with any values, default build nodeselector values are ignored.", "mountTrustedCA": "mountTrustedCA bind mounts the cluster's trusted certificate authorities, as defined in the cluster's proxy configuration, into the build. This lets processes within a build trust components signed by custom PKI certificate authorities, such as private artifact repositories and HTTPS proxies.\n\nWhen this field is set to true, the contents of `/etc/pki/ca-trust` within the build are managed by the build container, and any changes to this directory or its subdirectories (for example - within a Dockerfile `RUN` instruction) are not persisted in the build's output image.", } func (CommonSpec) SwaggerDoc() map[string]string { return map_CommonSpec } var map_CommonWebHookCause = map[string]string{ "": "CommonWebHookCause factors out the identical format of these webhook causes into struct so we can share it in the specific causes; it is too late for GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket.", "revision": "Revision is the git source revision information of the trigger.", "secret": "Secret is the obfuscated webhook secret that triggered a build.", } func (CommonWebHookCause) SwaggerDoc() map[string]string { return map_CommonWebHookCause } var map_ConfigMapBuildSource = map[string]string{
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/build/v1/doc.go
vendor/github.com/openshift/api/build/v1/doc.go
// +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=github.com/openshift/origin/pkg/build/apis/build // +k8s:defaulter-gen=TypeMeta // +k8s:openapi-gen=true // +groupName=build.openshift.io // Package v1 is the v1 version of the API. package v1
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/build/v1/generated.pb.go
vendor/github.com/openshift/api/build/v1/generated.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/openshift/api/build/v1/generated.proto package v1 import ( fmt "fmt" io "io" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" k8s_io_api_core_v1 "k8s.io/api/core/v1" v11 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" reflect "reflect" strings "strings" time "time" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf var _ = time.Kitchen // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *BinaryBuildRequestOptions) Reset() { *m = BinaryBuildRequestOptions{} } func (*BinaryBuildRequestOptions) ProtoMessage() {} func (*BinaryBuildRequestOptions) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{0} } func (m *BinaryBuildRequestOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BinaryBuildRequestOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BinaryBuildRequestOptions) XXX_Merge(src proto.Message) { xxx_messageInfo_BinaryBuildRequestOptions.Merge(m, src) } func (m *BinaryBuildRequestOptions) XXX_Size() int { return m.Size() } func (m *BinaryBuildRequestOptions) XXX_DiscardUnknown() { xxx_messageInfo_BinaryBuildRequestOptions.DiscardUnknown(m) } var xxx_messageInfo_BinaryBuildRequestOptions proto.InternalMessageInfo func (m *BinaryBuildSource) Reset() { *m = BinaryBuildSource{} } func (*BinaryBuildSource) ProtoMessage() {} func (*BinaryBuildSource) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{1} } func (m *BinaryBuildSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BinaryBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BinaryBuildSource) XXX_Merge(src proto.Message) { xxx_messageInfo_BinaryBuildSource.Merge(m, src) } func (m *BinaryBuildSource) XXX_Size() int { return m.Size() } func (m *BinaryBuildSource) XXX_DiscardUnknown() { xxx_messageInfo_BinaryBuildSource.DiscardUnknown(m) } var xxx_messageInfo_BinaryBuildSource proto.InternalMessageInfo func (m *BitbucketWebHookCause) Reset() { *m = BitbucketWebHookCause{} } func (*BitbucketWebHookCause) ProtoMessage() {} func (*BitbucketWebHookCause) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{2} } func (m *BitbucketWebHookCause) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BitbucketWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BitbucketWebHookCause) XXX_Merge(src proto.Message) { xxx_messageInfo_BitbucketWebHookCause.Merge(m, src) } func (m *BitbucketWebHookCause) XXX_Size() int { return m.Size() } func (m *BitbucketWebHookCause) XXX_DiscardUnknown() { xxx_messageInfo_BitbucketWebHookCause.DiscardUnknown(m) } var xxx_messageInfo_BitbucketWebHookCause proto.InternalMessageInfo func (m *Build) Reset() { *m = Build{} } func (*Build) ProtoMessage() {} func (*Build) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{3} } func (m *Build) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *Build) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *Build) XXX_Merge(src proto.Message) { xxx_messageInfo_Build.Merge(m, src) } func (m *Build) XXX_Size() int { return m.Size() } func (m *Build) XXX_DiscardUnknown() { xxx_messageInfo_Build.DiscardUnknown(m) } var xxx_messageInfo_Build proto.InternalMessageInfo func (m *BuildCondition) Reset() { *m = BuildCondition{} } func (*BuildCondition) ProtoMessage() {} func (*BuildCondition) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{4} } func (m *BuildCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildCondition) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildCondition.Merge(m, src) } func (m *BuildCondition) XXX_Size() int { return m.Size() } func (m *BuildCondition) XXX_DiscardUnknown() { xxx_messageInfo_BuildCondition.DiscardUnknown(m) } var xxx_messageInfo_BuildCondition proto.InternalMessageInfo func (m *BuildConfig) Reset() { *m = BuildConfig{} } func (*BuildConfig) ProtoMessage() {} func (*BuildConfig) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{5} } func (m *BuildConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildConfig.Merge(m, src) } func (m *BuildConfig) XXX_Size() int { return m.Size() } func (m *BuildConfig) XXX_DiscardUnknown() { xxx_messageInfo_BuildConfig.DiscardUnknown(m) } var xxx_messageInfo_BuildConfig proto.InternalMessageInfo func (m *BuildConfigList) Reset() { *m = BuildConfigList{} } func (*BuildConfigList) ProtoMessage() {} func (*BuildConfigList) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{6} } func (m *BuildConfigList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildConfigList) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildConfigList.Merge(m, src) } func (m *BuildConfigList) XXX_Size() int { return m.Size() } func (m *BuildConfigList) XXX_DiscardUnknown() { xxx_messageInfo_BuildConfigList.DiscardUnknown(m) } var xxx_messageInfo_BuildConfigList proto.InternalMessageInfo func (m *BuildConfigSpec) Reset() { *m = BuildConfigSpec{} } func (*BuildConfigSpec) ProtoMessage() {} func (*BuildConfigSpec) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{7} } func (m *BuildConfigSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildConfigSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildConfigSpec.Merge(m, src) } func (m *BuildConfigSpec) XXX_Size() int { return m.Size() } func (m *BuildConfigSpec) XXX_DiscardUnknown() { xxx_messageInfo_BuildConfigSpec.DiscardUnknown(m) } var xxx_messageInfo_BuildConfigSpec proto.InternalMessageInfo func (m *BuildConfigStatus) Reset() { *m = BuildConfigStatus{} } func (*BuildConfigStatus) ProtoMessage() {} func (*BuildConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{8} } func (m *BuildConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildConfigStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildConfigStatus.Merge(m, src) } func (m *BuildConfigStatus) XXX_Size() int { return m.Size() } func (m *BuildConfigStatus) XXX_DiscardUnknown() { xxx_messageInfo_BuildConfigStatus.DiscardUnknown(m) } var xxx_messageInfo_BuildConfigStatus proto.InternalMessageInfo func (m *BuildList) Reset() { *m = BuildList{} } func (*BuildList) ProtoMessage() {} func (*BuildList) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{9} } func (m *BuildList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildList) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildList.Merge(m, src) } func (m *BuildList) XXX_Size() int { return m.Size() } func (m *BuildList) XXX_DiscardUnknown() { xxx_messageInfo_BuildList.DiscardUnknown(m) } var xxx_messageInfo_BuildList proto.InternalMessageInfo func (m *BuildLog) Reset() { *m = BuildLog{} } func (*BuildLog) ProtoMessage() {} func (*BuildLog) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{10} } func (m *BuildLog) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildLog) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildLog.Merge(m, src) } func (m *BuildLog) XXX_Size() int { return m.Size() } func (m *BuildLog) XXX_DiscardUnknown() { xxx_messageInfo_BuildLog.DiscardUnknown(m) } var xxx_messageInfo_BuildLog proto.InternalMessageInfo func (m *BuildLogOptions) Reset() { *m = BuildLogOptions{} } func (*BuildLogOptions) ProtoMessage() {} func (*BuildLogOptions) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{11} } func (m *BuildLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildLogOptions) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildLogOptions.Merge(m, src) } func (m *BuildLogOptions) XXX_Size() int { return m.Size() } func (m *BuildLogOptions) XXX_DiscardUnknown() { xxx_messageInfo_BuildLogOptions.DiscardUnknown(m) } var xxx_messageInfo_BuildLogOptions proto.InternalMessageInfo func (m *BuildOutput) Reset() { *m = BuildOutput{} } func (*BuildOutput) ProtoMessage() {} func (*BuildOutput) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{12} } func (m *BuildOutput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildOutput) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildOutput.Merge(m, src) } func (m *BuildOutput) XXX_Size() int { return m.Size() } func (m *BuildOutput) XXX_DiscardUnknown() { xxx_messageInfo_BuildOutput.DiscardUnknown(m) } var xxx_messageInfo_BuildOutput proto.InternalMessageInfo func (m *BuildPostCommitSpec) Reset() { *m = BuildPostCommitSpec{} } func (*BuildPostCommitSpec) ProtoMessage() {} func (*BuildPostCommitSpec) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{13} } func (m *BuildPostCommitSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildPostCommitSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildPostCommitSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildPostCommitSpec.Merge(m, src) } func (m *BuildPostCommitSpec) XXX_Size() int { return m.Size() } func (m *BuildPostCommitSpec) XXX_DiscardUnknown() { xxx_messageInfo_BuildPostCommitSpec.DiscardUnknown(m) } var xxx_messageInfo_BuildPostCommitSpec proto.InternalMessageInfo func (m *BuildRequest) Reset() { *m = BuildRequest{} } func (*BuildRequest) ProtoMessage() {} func (*BuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{14} } func (m *BuildRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildRequest.Merge(m, src) } func (m *BuildRequest) XXX_Size() int { return m.Size() } func (m *BuildRequest) XXX_DiscardUnknown() { xxx_messageInfo_BuildRequest.DiscardUnknown(m) } var xxx_messageInfo_BuildRequest proto.InternalMessageInfo func (m *BuildSource) Reset() { *m = BuildSource{} } func (*BuildSource) ProtoMessage() {} func (*BuildSource) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{15} } func (m *BuildSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildSource) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildSource.Merge(m, src) } func (m *BuildSource) XXX_Size() int { return m.Size() } func (m *BuildSource) XXX_DiscardUnknown() { xxx_messageInfo_BuildSource.DiscardUnknown(m) } var xxx_messageInfo_BuildSource proto.InternalMessageInfo func (m *BuildSpec) Reset() { *m = BuildSpec{} } func (*BuildSpec) ProtoMessage() {} func (*BuildSpec) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{16} } func (m *BuildSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildSpec.Merge(m, src) } func (m *BuildSpec) XXX_Size() int { return m.Size() } func (m *BuildSpec) XXX_DiscardUnknown() { xxx_messageInfo_BuildSpec.DiscardUnknown(m) } var xxx_messageInfo_BuildSpec proto.InternalMessageInfo func (m *BuildStatus) Reset() { *m = BuildStatus{} } func (*BuildStatus) ProtoMessage() {} func (*BuildStatus) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{17} } func (m *BuildStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildStatus.Merge(m, src) } func (m *BuildStatus) XXX_Size() int { return m.Size() } func (m *BuildStatus) XXX_DiscardUnknown() { xxx_messageInfo_BuildStatus.DiscardUnknown(m) } var xxx_messageInfo_BuildStatus proto.InternalMessageInfo func (m *BuildStatusOutput) Reset() { *m = BuildStatusOutput{} } func (*BuildStatusOutput) ProtoMessage() {} func (*BuildStatusOutput) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{18} } func (m *BuildStatusOutput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildStatusOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildStatusOutput) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildStatusOutput.Merge(m, src) } func (m *BuildStatusOutput) XXX_Size() int { return m.Size() } func (m *BuildStatusOutput) XXX_DiscardUnknown() { xxx_messageInfo_BuildStatusOutput.DiscardUnknown(m) } var xxx_messageInfo_BuildStatusOutput proto.InternalMessageInfo func (m *BuildStatusOutputTo) Reset() { *m = BuildStatusOutputTo{} } func (*BuildStatusOutputTo) ProtoMessage() {} func (*BuildStatusOutputTo) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{19} } func (m *BuildStatusOutputTo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildStatusOutputTo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildStatusOutputTo) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildStatusOutputTo.Merge(m, src) } func (m *BuildStatusOutputTo) XXX_Size() int { return m.Size() } func (m *BuildStatusOutputTo) XXX_DiscardUnknown() { xxx_messageInfo_BuildStatusOutputTo.DiscardUnknown(m) } var xxx_messageInfo_BuildStatusOutputTo proto.InternalMessageInfo func (m *BuildStrategy) Reset() { *m = BuildStrategy{} } func (*BuildStrategy) ProtoMessage() {} func (*BuildStrategy) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{20} } func (m *BuildStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildStrategy) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildStrategy.Merge(m, src) } func (m *BuildStrategy) XXX_Size() int { return m.Size() } func (m *BuildStrategy) XXX_DiscardUnknown() { xxx_messageInfo_BuildStrategy.DiscardUnknown(m) } var xxx_messageInfo_BuildStrategy proto.InternalMessageInfo func (m *BuildTriggerCause) Reset() { *m = BuildTriggerCause{} } func (*BuildTriggerCause) ProtoMessage() {} func (*BuildTriggerCause) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{21} } func (m *BuildTriggerCause) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildTriggerCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildTriggerCause) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildTriggerCause.Merge(m, src) } func (m *BuildTriggerCause) XXX_Size() int { return m.Size() } func (m *BuildTriggerCause) XXX_DiscardUnknown() { xxx_messageInfo_BuildTriggerCause.DiscardUnknown(m) } var xxx_messageInfo_BuildTriggerCause proto.InternalMessageInfo func (m *BuildTriggerPolicy) Reset() { *m = BuildTriggerPolicy{} } func (*BuildTriggerPolicy) ProtoMessage() {} func (*BuildTriggerPolicy) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{22} } func (m *BuildTriggerPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildTriggerPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildTriggerPolicy) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildTriggerPolicy.Merge(m, src) } func (m *BuildTriggerPolicy) XXX_Size() int { return m.Size() } func (m *BuildTriggerPolicy) XXX_DiscardUnknown() { xxx_messageInfo_BuildTriggerPolicy.DiscardUnknown(m) } var xxx_messageInfo_BuildTriggerPolicy proto.InternalMessageInfo func (m *BuildVolume) Reset() { *m = BuildVolume{} } func (*BuildVolume) ProtoMessage() {} func (*BuildVolume) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{23} } func (m *BuildVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildVolume) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildVolume.Merge(m, src) } func (m *BuildVolume) XXX_Size() int { return m.Size() } func (m *BuildVolume) XXX_DiscardUnknown() { xxx_messageInfo_BuildVolume.DiscardUnknown(m) } var xxx_messageInfo_BuildVolume proto.InternalMessageInfo func (m *BuildVolumeMount) Reset() { *m = BuildVolumeMount{} } func (*BuildVolumeMount) ProtoMessage() {} func (*BuildVolumeMount) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{24} } func (m *BuildVolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildVolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildVolumeMount) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildVolumeMount.Merge(m, src) } func (m *BuildVolumeMount) XXX_Size() int { return m.Size() } func (m *BuildVolumeMount) XXX_DiscardUnknown() { xxx_messageInfo_BuildVolumeMount.DiscardUnknown(m) } var xxx_messageInfo_BuildVolumeMount proto.InternalMessageInfo func (m *BuildVolumeSource) Reset() { *m = BuildVolumeSource{} } func (*BuildVolumeSource) ProtoMessage() {} func (*BuildVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{25} } func (m *BuildVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BuildVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BuildVolumeSource) XXX_Merge(src proto.Message) { xxx_messageInfo_BuildVolumeSource.Merge(m, src) } func (m *BuildVolumeSource) XXX_Size() int { return m.Size() } func (m *BuildVolumeSource) XXX_DiscardUnknown() { xxx_messageInfo_BuildVolumeSource.DiscardUnknown(m) } var xxx_messageInfo_BuildVolumeSource proto.InternalMessageInfo func (m *CommonSpec) Reset() { *m = CommonSpec{} } func (*CommonSpec) ProtoMessage() {} func (*CommonSpec) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{26} } func (m *CommonSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CommonSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *CommonSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_CommonSpec.Merge(m, src) } func (m *CommonSpec) XXX_Size() int { return m.Size() } func (m *CommonSpec) XXX_DiscardUnknown() { xxx_messageInfo_CommonSpec.DiscardUnknown(m) } var xxx_messageInfo_CommonSpec proto.InternalMessageInfo func (m *CommonWebHookCause) Reset() { *m = CommonWebHookCause{} } func (*CommonWebHookCause) ProtoMessage() {} func (*CommonWebHookCause) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{27} } func (m *CommonWebHookCause) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CommonWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *CommonWebHookCause) XXX_Merge(src proto.Message) { xxx_messageInfo_CommonWebHookCause.Merge(m, src) } func (m *CommonWebHookCause) XXX_Size() int { return m.Size() } func (m *CommonWebHookCause) XXX_DiscardUnknown() { xxx_messageInfo_CommonWebHookCause.DiscardUnknown(m) } var xxx_messageInfo_CommonWebHookCause proto.InternalMessageInfo func (m *ConfigMapBuildSource) Reset() { *m = ConfigMapBuildSource{} } func (*ConfigMapBuildSource) ProtoMessage() {} func (*ConfigMapBuildSource) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{28} } func (m *ConfigMapBuildSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ConfigMapBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *ConfigMapBuildSource) XXX_Merge(src proto.Message) { xxx_messageInfo_ConfigMapBuildSource.Merge(m, src) } func (m *ConfigMapBuildSource) XXX_Size() int { return m.Size() } func (m *ConfigMapBuildSource) XXX_DiscardUnknown() { xxx_messageInfo_ConfigMapBuildSource.DiscardUnknown(m) } var xxx_messageInfo_ConfigMapBuildSource proto.InternalMessageInfo func (m *CustomBuildStrategy) Reset() { *m = CustomBuildStrategy{} } func (*CustomBuildStrategy) ProtoMessage() {} func (*CustomBuildStrategy) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{29} } func (m *CustomBuildStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CustomBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *CustomBuildStrategy) XXX_Merge(src proto.Message) { xxx_messageInfo_CustomBuildStrategy.Merge(m, src) } func (m *CustomBuildStrategy) XXX_Size() int { return m.Size() } func (m *CustomBuildStrategy) XXX_DiscardUnknown() { xxx_messageInfo_CustomBuildStrategy.DiscardUnknown(m) } var xxx_messageInfo_CustomBuildStrategy proto.InternalMessageInfo func (m *DockerBuildStrategy) Reset() { *m = DockerBuildStrategy{} } func (*DockerBuildStrategy) ProtoMessage() {} func (*DockerBuildStrategy) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{30} } func (m *DockerBuildStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *DockerBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *DockerBuildStrategy) XXX_Merge(src proto.Message) { xxx_messageInfo_DockerBuildStrategy.Merge(m, src) } func (m *DockerBuildStrategy) XXX_Size() int { return m.Size() } func (m *DockerBuildStrategy) XXX_DiscardUnknown() { xxx_messageInfo_DockerBuildStrategy.DiscardUnknown(m) } var xxx_messageInfo_DockerBuildStrategy proto.InternalMessageInfo func (m *DockerStrategyOptions) Reset() { *m = DockerStrategyOptions{} } func (*DockerStrategyOptions) ProtoMessage() {} func (*DockerStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{31} } func (m *DockerStrategyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *DockerStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *DockerStrategyOptions) XXX_Merge(src proto.Message) { xxx_messageInfo_DockerStrategyOptions.Merge(m, src) } func (m *DockerStrategyOptions) XXX_Size() int { return m.Size() } func (m *DockerStrategyOptions) XXX_DiscardUnknown() { xxx_messageInfo_DockerStrategyOptions.DiscardUnknown(m) } var xxx_messageInfo_DockerStrategyOptions proto.InternalMessageInfo func (m *GenericWebHookCause) Reset() { *m = GenericWebHookCause{} } func (*GenericWebHookCause) ProtoMessage() {} func (*GenericWebHookCause) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{32} } func (m *GenericWebHookCause) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GenericWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *GenericWebHookCause) XXX_Merge(src proto.Message) { xxx_messageInfo_GenericWebHookCause.Merge(m, src) } func (m *GenericWebHookCause) XXX_Size() int { return m.Size() } func (m *GenericWebHookCause) XXX_DiscardUnknown() { xxx_messageInfo_GenericWebHookCause.DiscardUnknown(m) } var xxx_messageInfo_GenericWebHookCause proto.InternalMessageInfo func (m *GenericWebHookEvent) Reset() { *m = GenericWebHookEvent{} } func (*GenericWebHookEvent) ProtoMessage() {} func (*GenericWebHookEvent) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{33} } func (m *GenericWebHookEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GenericWebHookEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *GenericWebHookEvent) XXX_Merge(src proto.Message) { xxx_messageInfo_GenericWebHookEvent.Merge(m, src) } func (m *GenericWebHookEvent) XXX_Size() int { return m.Size() } func (m *GenericWebHookEvent) XXX_DiscardUnknown() { xxx_messageInfo_GenericWebHookEvent.DiscardUnknown(m) } var xxx_messageInfo_GenericWebHookEvent proto.InternalMessageInfo func (m *GitBuildSource) Reset() { *m = GitBuildSource{} } func (*GitBuildSource) ProtoMessage() {} func (*GitBuildSource) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{34} } func (m *GitBuildSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GitBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *GitBuildSource) XXX_Merge(src proto.Message) { xxx_messageInfo_GitBuildSource.Merge(m, src) } func (m *GitBuildSource) XXX_Size() int { return m.Size() } func (m *GitBuildSource) XXX_DiscardUnknown() { xxx_messageInfo_GitBuildSource.DiscardUnknown(m) } var xxx_messageInfo_GitBuildSource proto.InternalMessageInfo func (m *GitHubWebHookCause) Reset() { *m = GitHubWebHookCause{} } func (*GitHubWebHookCause) ProtoMessage() {} func (*GitHubWebHookCause) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{35} } func (m *GitHubWebHookCause) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GitHubWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *GitHubWebHookCause) XXX_Merge(src proto.Message) { xxx_messageInfo_GitHubWebHookCause.Merge(m, src) } func (m *GitHubWebHookCause) XXX_Size() int { return m.Size() } func (m *GitHubWebHookCause) XXX_DiscardUnknown() { xxx_messageInfo_GitHubWebHookCause.DiscardUnknown(m) } var xxx_messageInfo_GitHubWebHookCause proto.InternalMessageInfo func (m *GitInfo) Reset() { *m = GitInfo{} } func (*GitInfo) ProtoMessage() {} func (*GitInfo) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{36} } func (m *GitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *GitInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_GitInfo.Merge(m, src) } func (m *GitInfo) XXX_Size() int { return m.Size() } func (m *GitInfo) XXX_DiscardUnknown() { xxx_messageInfo_GitInfo.DiscardUnknown(m) } var xxx_messageInfo_GitInfo proto.InternalMessageInfo func (m *GitLabWebHookCause) Reset() { *m = GitLabWebHookCause{} } func (*GitLabWebHookCause) ProtoMessage() {} func (*GitLabWebHookCause) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{37} } func (m *GitLabWebHookCause) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GitLabWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *GitLabWebHookCause) XXX_Merge(src proto.Message) { xxx_messageInfo_GitLabWebHookCause.Merge(m, src) } func (m *GitLabWebHookCause) XXX_Size() int { return m.Size() } func (m *GitLabWebHookCause) XXX_DiscardUnknown() { xxx_messageInfo_GitLabWebHookCause.DiscardUnknown(m) } var xxx_messageInfo_GitLabWebHookCause proto.InternalMessageInfo func (m *GitRefInfo) Reset() { *m = GitRefInfo{} } func (*GitRefInfo) ProtoMessage() {} func (*GitRefInfo) Descriptor() ([]byte, []int) { return fileDescriptor_2ba579f6f004cb75, []int{38} }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/build/v1/legacy.go
vendor/github.com/openshift/api/build/v1/legacy.go
package v1 import ( corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme ) func addLegacyKnownTypes(scheme *runtime.Scheme) error { types := []runtime.Object{ &Build{}, &BuildList{}, &BuildConfig{}, &BuildConfigList{}, &BuildLog{}, &BuildRequest{}, &BuildLogOptions{}, &BinaryBuildRequestOptions{}, } scheme.AddKnownTypes(legacyGroupVersion, types...) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go
vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated // Code generated by deepcopy-gen. DO NOT EDIT. package v1 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterRoleScopeRestriction) DeepCopyInto(out *ClusterRoleScopeRestriction) { *out = *in if in.RoleNames != nil { in, out := &in.RoleNames, &out.RoleNames *out = make([]string, len(*in)) copy(*out, *in) } if in.Namespaces != nil { in, out := &in.Namespaces, &out.Namespaces *out = make([]string, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleScopeRestriction. func (in *ClusterRoleScopeRestriction) DeepCopy() *ClusterRoleScopeRestriction { if in == nil { return nil } out := new(ClusterRoleScopeRestriction) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OAuthAccessToken) DeepCopyInto(out *OAuthAccessToken) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Scopes != nil { in, out := &in.Scopes, &out.Scopes *out = make([]string, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAccessToken. func (in *OAuthAccessToken) DeepCopy() *OAuthAccessToken { if in == nil { return nil } out := new(OAuthAccessToken) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *OAuthAccessToken) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OAuthAccessTokenList) DeepCopyInto(out *OAuthAccessTokenList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]OAuthAccessToken, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAccessTokenList. func (in *OAuthAccessTokenList) DeepCopy() *OAuthAccessTokenList { if in == nil { return nil } out := new(OAuthAccessTokenList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *OAuthAccessTokenList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OAuthAuthorizeToken) DeepCopyInto(out *OAuthAuthorizeToken) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Scopes != nil { in, out := &in.Scopes, &out.Scopes *out = make([]string, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAuthorizeToken. func (in *OAuthAuthorizeToken) DeepCopy() *OAuthAuthorizeToken { if in == nil { return nil } out := new(OAuthAuthorizeToken) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *OAuthAuthorizeToken) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OAuthAuthorizeTokenList) DeepCopyInto(out *OAuthAuthorizeTokenList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]OAuthAuthorizeToken, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAuthorizeTokenList. func (in *OAuthAuthorizeTokenList) DeepCopy() *OAuthAuthorizeTokenList { if in == nil { return nil } out := new(OAuthAuthorizeTokenList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *OAuthAuthorizeTokenList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OAuthClient) DeepCopyInto(out *OAuthClient) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.AdditionalSecrets != nil { in, out := &in.AdditionalSecrets, &out.AdditionalSecrets *out = make([]string, len(*in)) copy(*out, *in) } if in.RedirectURIs != nil { in, out := &in.RedirectURIs, &out.RedirectURIs *out = make([]string, len(*in)) copy(*out, *in) } if in.ScopeRestrictions != nil { in, out := &in.ScopeRestrictions, &out.ScopeRestrictions *out = make([]ScopeRestriction, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.AccessTokenMaxAgeSeconds != nil { in, out := &in.AccessTokenMaxAgeSeconds, &out.AccessTokenMaxAgeSeconds *out = new(int32) **out = **in } if in.AccessTokenInactivityTimeoutSeconds != nil { in, out := &in.AccessTokenInactivityTimeoutSeconds, &out.AccessTokenInactivityTimeoutSeconds *out = new(int32) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClient. func (in *OAuthClient) DeepCopy() *OAuthClient { if in == nil { return nil } out := new(OAuthClient) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *OAuthClient) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OAuthClientAuthorization) DeepCopyInto(out *OAuthClientAuthorization) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Scopes != nil { in, out := &in.Scopes, &out.Scopes *out = make([]string, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientAuthorization. func (in *OAuthClientAuthorization) DeepCopy() *OAuthClientAuthorization { if in == nil { return nil } out := new(OAuthClientAuthorization) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *OAuthClientAuthorization) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OAuthClientAuthorizationList) DeepCopyInto(out *OAuthClientAuthorizationList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]OAuthClientAuthorization, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientAuthorizationList. func (in *OAuthClientAuthorizationList) DeepCopy() *OAuthClientAuthorizationList { if in == nil { return nil } out := new(OAuthClientAuthorizationList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *OAuthClientAuthorizationList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OAuthClientList) DeepCopyInto(out *OAuthClientList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]OAuthClient, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientList. func (in *OAuthClientList) DeepCopy() *OAuthClientList { if in == nil { return nil } out := new(OAuthClientList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *OAuthClientList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OAuthRedirectReference) DeepCopyInto(out *OAuthRedirectReference) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Reference = in.Reference return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRedirectReference. func (in *OAuthRedirectReference) DeepCopy() *OAuthRedirectReference { if in == nil { return nil } out := new(OAuthRedirectReference) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *OAuthRedirectReference) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RedirectReference) DeepCopyInto(out *RedirectReference) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectReference. func (in *RedirectReference) DeepCopy() *RedirectReference { if in == nil { return nil } out := new(RedirectReference) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ScopeRestriction) DeepCopyInto(out *ScopeRestriction) { *out = *in if in.ExactValues != nil { in, out := &in.ExactValues, &out.ExactValues *out = make([]string, len(*in)) copy(*out, *in) } if in.ClusterRole != nil { in, out := &in.ClusterRole, &out.ClusterRole *out = new(ClusterRoleScopeRestriction) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeRestriction. func (in *ScopeRestriction) DeepCopy() *ScopeRestriction { if in == nil { return nil } out := new(ScopeRestriction) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UserOAuthAccessToken) DeepCopyInto(out *UserOAuthAccessToken) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Scopes != nil { in, out := &in.Scopes, &out.Scopes *out = make([]string, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserOAuthAccessToken. func (in *UserOAuthAccessToken) DeepCopy() *UserOAuthAccessToken { if in == nil { return nil } out := new(UserOAuthAccessToken) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *UserOAuthAccessToken) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UserOAuthAccessTokenList) DeepCopyInto(out *UserOAuthAccessTokenList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]UserOAuthAccessToken, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserOAuthAccessTokenList. func (in *UserOAuthAccessTokenList) DeepCopy() *UserOAuthAccessTokenList { if in == nil { return nil } out := new(UserOAuthAccessTokenList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *UserOAuthAccessTokenList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/oauth/v1/types.go
vendor/github.com/openshift/api/oauth/v1/types.go
package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // OAuthAccessToken describes an OAuth access token. // The name of a token must be prefixed with a `sha256~` string, must not contain "/" or "%" characters and must be at // least 32 characters long. // // The name of the token is constructed from the actual token by sha256-hashing it and using URL-safe unpadded // base64-encoding (as described in RFC4648) on the hashed result. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type OAuthAccessToken struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // ClientName references the client that created this token. ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` // ExpiresIn is the seconds from CreationTime before this token expires. ExpiresIn int64 `json:"expiresIn,omitempty" protobuf:"varint,3,opt,name=expiresIn"` // Scopes is an array of the requested scopes. Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,rep,name=scopes"` // RedirectURI is the redirection associated with the token. RedirectURI string `json:"redirectURI,omitempty" protobuf:"bytes,5,opt,name=redirectURI"` // UserName is the user name associated with this token UserName string `json:"userName,omitempty" protobuf:"bytes,6,opt,name=userName"` // UserUID is the unique UID associated with this token UserUID string `json:"userUID,omitempty" protobuf:"bytes,7,opt,name=userUID"` // AuthorizeToken contains the token that authorized this token AuthorizeToken string `json:"authorizeToken,omitempty" protobuf:"bytes,8,opt,name=authorizeToken"` // RefreshToken is the value by which this token can be renewed. Can be blank. RefreshToken string `json:"refreshToken,omitempty" protobuf:"bytes,9,opt,name=refreshToken"` // InactivityTimeoutSeconds is the value in seconds, from the // CreationTimestamp, after which this token can no longer be used. // The value is automatically incremented when the token is used. InactivityTimeoutSeconds int32 `json:"inactivityTimeoutSeconds,omitempty" protobuf:"varint,10,opt,name=inactivityTimeoutSeconds"` } // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // OAuthAuthorizeToken describes an OAuth authorization token // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type OAuthAuthorizeToken struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // ClientName references the client that created this token. ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` // ExpiresIn is the seconds from CreationTime before this token expires. ExpiresIn int64 `json:"expiresIn,omitempty" protobuf:"varint,3,opt,name=expiresIn"` // Scopes is an array of the requested scopes. Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,rep,name=scopes"` // RedirectURI is the redirection associated with the token. RedirectURI string `json:"redirectURI,omitempty" protobuf:"bytes,5,opt,name=redirectURI"` // State data from request State string `json:"state,omitempty" protobuf:"bytes,6,opt,name=state"` // UserName is the user name associated with this token UserName string `json:"userName,omitempty" protobuf:"bytes,7,opt,name=userName"` // UserUID is the unique UID associated with this token. UserUID and UserName must both match // for this token to be valid. UserUID string `json:"userUID,omitempty" protobuf:"bytes,8,opt,name=userUID"` // CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 CodeChallenge string `json:"codeChallenge,omitempty" protobuf:"bytes,9,opt,name=codeChallenge"` // CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 CodeChallengeMethod string `json:"codeChallengeMethod,omitempty" protobuf:"bytes,10,opt,name=codeChallengeMethod"` } // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // OAuthClient describes an OAuth client // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type OAuthClient struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Secret is the unique secret associated with a client Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` // AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation // and for service account token validation AdditionalSecrets []string `json:"additionalSecrets,omitempty" protobuf:"bytes,3,rep,name=additionalSecrets"` // RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects RespondWithChallenges bool `json:"respondWithChallenges,omitempty" protobuf:"varint,4,opt,name=respondWithChallenges"` // RedirectURIs is the valid redirection URIs associated with a client // +patchStrategy=merge RedirectURIs []string `json:"redirectURIs,omitempty" patchStrategy:"merge" protobuf:"bytes,5,rep,name=redirectURIs"` // GrantMethod is a required field which determines how to handle grants for this client. // Valid grant handling methods are: // - auto: always approves grant requests, useful for trusted clients // - prompt: prompts the end user for approval of grant requests, useful for third-party clients GrantMethod GrantHandlerType `json:"grantMethod,omitempty" protobuf:"bytes,6,opt,name=grantMethod,casttype=GrantHandlerType"` // ScopeRestrictions describes which scopes this client can request. Each requested scope // is checked against each restriction. If any restriction matches, then the scope is allowed. // If no restriction matches, then the scope is denied. ScopeRestrictions []ScopeRestriction `json:"scopeRestrictions,omitempty" protobuf:"bytes,7,rep,name=scopeRestrictions"` // AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. // 0 means no expiration. AccessTokenMaxAgeSeconds *int32 `json:"accessTokenMaxAgeSeconds,omitempty" protobuf:"varint,8,opt,name=accessTokenMaxAgeSeconds"` // AccessTokenInactivityTimeoutSeconds overrides the default token // inactivity timeout for tokens granted to this client. // The value represents the maximum amount of time that can occur between // consecutive uses of the token. Tokens become invalid if they are not // used within this temporal window. The user will need to acquire a new // token to regain access once a token times out. // This value needs to be set only if the default set in configuration is // not appropriate for this client. Valid values are: // - 0: Tokens for this client never time out // - X: Tokens time out if there is no activity for X seconds // The current minimum allowed value for X is 300 (5 minutes) // // WARNING: existing tokens' timeout will not be affected (lowered) by changing this value AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty" protobuf:"varint,9,opt,name=accessTokenInactivityTimeoutSeconds"` } type GrantHandlerType string const ( // GrantHandlerAuto auto-approves client authorization grant requests GrantHandlerAuto GrantHandlerType = "auto" // GrantHandlerPrompt prompts the user to approve new client authorization grant requests GrantHandlerPrompt GrantHandlerType = "prompt" // GrantHandlerDeny auto-denies client authorization grant requests GrantHandlerDeny GrantHandlerType = "deny" ) // ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil. type ScopeRestriction struct { // ExactValues means the scope has to match a particular set of strings exactly ExactValues []string `json:"literals,omitempty" protobuf:"bytes,1,rep,name=literals"` // ClusterRole describes a set of restrictions for cluster role scoping. ClusterRole *ClusterRoleScopeRestriction `json:"clusterRole,omitempty" protobuf:"bytes,2,opt,name=clusterRole"` } // ClusterRoleScopeRestriction describes restrictions on cluster role scopes type ClusterRoleScopeRestriction struct { // RoleNames is the list of cluster roles that can referenced. * means anything RoleNames []string `json:"roleNames" protobuf:"bytes,1,rep,name=roleNames"` // Namespaces is the list of namespaces that can be referenced. * means any of them (including *) Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` // AllowEscalation indicates whether you can request roles and their escalating resources AllowEscalation bool `json:"allowEscalation" protobuf:"varint,3,opt,name=allowEscalation"` } // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // OAuthClientAuthorization describes an authorization created by an OAuth client // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type OAuthClientAuthorization struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // ClientName references the client that created this authorization ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` // UserName is the user name that authorized this client UserName string `json:"userName,omitempty" protobuf:"bytes,3,opt,name=userName"` // UserUID is the unique UID associated with this authorization. UserUID and UserName // must both match for this authorization to be valid. UserUID string `json:"userUID,omitempty" protobuf:"bytes,4,opt,name=userUID"` // Scopes is an array of the granted scopes. Scopes []string `json:"scopes,omitempty" protobuf:"bytes,5,rep,name=scopes"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // OAuthAccessTokenList is a collection of OAuth access tokens // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type OAuthAccessTokenList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of OAuth access tokens Items []OAuthAccessToken `json:"items" protobuf:"bytes,2,rep,name=items"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // OAuthAuthorizeTokenList is a collection of OAuth authorization tokens // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type OAuthAuthorizeTokenList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of OAuth authorization tokens Items []OAuthAuthorizeToken `json:"items" protobuf:"bytes,2,rep,name=items"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // OAuthClientList is a collection of OAuth clients // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type OAuthClientList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of OAuth clients Items []OAuthClient `json:"items" protobuf:"bytes,2,rep,name=items"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // OAuthClientAuthorizationList is a collection of OAuth client authorizations // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type OAuthClientAuthorizationList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of OAuth client authorizations Items []OAuthClientAuthorization `json:"items" protobuf:"bytes,2,rep,name=items"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // OAuthRedirectReference is a reference to an OAuth redirect object. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type OAuthRedirectReference struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The reference to an redirect object in the current namespace. Reference RedirectReference `json:"reference,omitempty" protobuf:"bytes,2,opt,name=reference"` } // RedirectReference specifies the target in the current namespace that resolves into redirect URIs. Only the 'Route' kind is currently allowed. type RedirectReference struct { // The group of the target that is being referred to. Group string `json:"group" protobuf:"bytes,1,opt,name=group"` // The kind of the target that is being referred to. Currently, only 'Route' is allowed. Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` // The name of the target that is being referred to. e.g. name of the Route. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` } // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // UserOAuthAccessToken is a virtual resource to mirror OAuthAccessTokens to // the user the access token was issued for // +openshift:compatibility-gen:level=1 type UserOAuthAccessToken OAuthAccessToken // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // UserOAuthAccessTokenList is a collection of access tokens issued on behalf of // the requesting user // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type UserOAuthAccessTokenList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` Items []UserOAuthAccessToken `json:"items" protobuf:"bytes,2,rep,name=items"` }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/oauth/v1/register.go
vendor/github.com/openshift/api/oauth/v1/register.go
package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( GroupName = "oauth.openshift.io" GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) // Install is a function which adds this version to a scheme Install = schemeBuilder.AddToScheme // SchemeGroupVersion generated code relies on this name // Deprecated SchemeGroupVersion = GroupVersion // AddToScheme exists solely to keep the old generators creating valid code // DEPRECATED AddToScheme = schemeBuilder.AddToScheme ) // Resource generated code relies on this being here, but it logically belongs to the group // DEPRECATED func Resource(resource string) schema.GroupResource { return schema.GroupResource{Group: GroupName, Resource: resource} } // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(GroupVersion, &OAuthAccessToken{}, &OAuthAccessTokenList{}, &OAuthAuthorizeToken{}, &OAuthAuthorizeTokenList{}, &OAuthClient{}, &OAuthClientList{}, &OAuthClientAuthorization{}, &OAuthClientAuthorizationList{}, &OAuthRedirectReference{}, &UserOAuthAccessToken{}, &UserOAuthAccessTokenList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go
vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go
package v1 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more // information on the implementation: https://github.com/emicklei/go-restful/pull/215 // // TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE var map_ClusterRoleScopeRestriction = map[string]string{ "": "ClusterRoleScopeRestriction describes restrictions on cluster role scopes", "roleNames": "RoleNames is the list of cluster roles that can referenced. * means anything", "namespaces": "Namespaces is the list of namespaces that can be referenced. * means any of them (including *)", "allowEscalation": "AllowEscalation indicates whether you can request roles and their escalating resources", } func (ClusterRoleScopeRestriction) SwaggerDoc() map[string]string { return map_ClusterRoleScopeRestriction } var map_OAuthAccessToken = map[string]string{ "": "OAuthAccessToken describes an OAuth access token. The name of a token must be prefixed with a `sha256~` string, must not contain \"/\" or \"%\" characters and must be at least 32 characters long.\n\nThe name of the token is constructed from the actual token by sha256-hashing it and using URL-safe unpadded base64-encoding (as described in RFC4648) on the hashed result.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "clientName": "ClientName references the client that created this token.", "expiresIn": "ExpiresIn is the seconds from CreationTime before this token expires.", "scopes": "Scopes is an array of the requested scopes.", "redirectURI": "RedirectURI is the redirection associated with the token.", "userName": "UserName is the user name associated with this token", "userUID": "UserUID is the unique UID associated with this token", "authorizeToken": "AuthorizeToken contains the token that authorized this token", "refreshToken": "RefreshToken is the value by which this token can be renewed. Can be blank.", "inactivityTimeoutSeconds": "InactivityTimeoutSeconds is the value in seconds, from the CreationTimestamp, after which this token can no longer be used. The value is automatically incremented when the token is used.", } func (OAuthAccessToken) SwaggerDoc() map[string]string { return map_OAuthAccessToken } var map_OAuthAccessTokenList = map[string]string{ "": "OAuthAccessTokenList is a collection of OAuth access tokens\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of OAuth access tokens", } func (OAuthAccessTokenList) SwaggerDoc() map[string]string { return map_OAuthAccessTokenList } var map_OAuthAuthorizeToken = map[string]string{ "": "OAuthAuthorizeToken describes an OAuth authorization token\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "clientName": "ClientName references the client that created this token.", "expiresIn": "ExpiresIn is the seconds from CreationTime before this token expires.", "scopes": "Scopes is an array of the requested scopes.", "redirectURI": "RedirectURI is the redirection associated with the token.", "state": "State data from request", "userName": "UserName is the user name associated with this token", "userUID": "UserUID is the unique UID associated with this token. UserUID and UserName must both match for this token to be valid.", "codeChallenge": "CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636", "codeChallengeMethod": "CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636", } func (OAuthAuthorizeToken) SwaggerDoc() map[string]string { return map_OAuthAuthorizeToken } var map_OAuthAuthorizeTokenList = map[string]string{ "": "OAuthAuthorizeTokenList is a collection of OAuth authorization tokens\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of OAuth authorization tokens", } func (OAuthAuthorizeTokenList) SwaggerDoc() map[string]string { return map_OAuthAuthorizeTokenList } var map_OAuthClient = map[string]string{ "": "OAuthClient describes an OAuth client\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "secret": "Secret is the unique secret associated with a client", "additionalSecrets": "AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation and for service account token validation", "respondWithChallenges": "RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects", "redirectURIs": "RedirectURIs is the valid redirection URIs associated with a client", "grantMethod": "GrantMethod is a required field which determines how to handle grants for this client. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients", "scopeRestrictions": "ScopeRestrictions describes which scopes this client can request. Each requested scope is checked against each restriction. If any restriction matches, then the scope is allowed. If no restriction matches, then the scope is denied.", "accessTokenMaxAgeSeconds": "AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. 0 means no expiration.", "accessTokenInactivityTimeoutSeconds": "AccessTokenInactivityTimeoutSeconds overrides the default token inactivity timeout for tokens granted to this client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. This value needs to be set only if the default set in configuration is not appropriate for this client. Valid values are: - 0: Tokens for this client never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", } func (OAuthClient) SwaggerDoc() map[string]string { return map_OAuthClient } var map_OAuthClientAuthorization = map[string]string{ "": "OAuthClientAuthorization describes an authorization created by an OAuth client\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "clientName": "ClientName references the client that created this authorization", "userName": "UserName is the user name that authorized this client", "userUID": "UserUID is the unique UID associated with this authorization. UserUID and UserName must both match for this authorization to be valid.", "scopes": "Scopes is an array of the granted scopes.", } func (OAuthClientAuthorization) SwaggerDoc() map[string]string { return map_OAuthClientAuthorization } var map_OAuthClientAuthorizationList = map[string]string{ "": "OAuthClientAuthorizationList is a collection of OAuth client authorizations\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of OAuth client authorizations", } func (OAuthClientAuthorizationList) SwaggerDoc() map[string]string { return map_OAuthClientAuthorizationList } var map_OAuthClientList = map[string]string{ "": "OAuthClientList is a collection of OAuth clients\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of OAuth clients", } func (OAuthClientList) SwaggerDoc() map[string]string { return map_OAuthClientList } var map_OAuthRedirectReference = map[string]string{ "": "OAuthRedirectReference is a reference to an OAuth redirect object.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "reference": "The reference to an redirect object in the current namespace.", } func (OAuthRedirectReference) SwaggerDoc() map[string]string { return map_OAuthRedirectReference } var map_RedirectReference = map[string]string{ "": "RedirectReference specifies the target in the current namespace that resolves into redirect URIs. Only the 'Route' kind is currently allowed.", "group": "The group of the target that is being referred to.", "kind": "The kind of the target that is being referred to. Currently, only 'Route' is allowed.", "name": "The name of the target that is being referred to. e.g. name of the Route.", } func (RedirectReference) SwaggerDoc() map[string]string { return map_RedirectReference } var map_ScopeRestriction = map[string]string{ "": "ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil.", "literals": "ExactValues means the scope has to match a particular set of strings exactly", "clusterRole": "ClusterRole describes a set of restrictions for cluster role scoping.", } func (ScopeRestriction) SwaggerDoc() map[string]string { return map_ScopeRestriction } var map_UserOAuthAccessTokenList = map[string]string{ "": "UserOAuthAccessTokenList is a collection of access tokens issued on behalf of the requesting user\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (UserOAuthAccessTokenList) SwaggerDoc() map[string]string { return map_UserOAuthAccessTokenList } // AUTO-GENERATED FUNCTIONS END HERE
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/oauth/v1/doc.go
vendor/github.com/openshift/api/oauth/v1/doc.go
// +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=github.com/openshift/origin/pkg/oauth/apis/oauth // +k8s:defaulter-gen=TypeMeta // +k8s:openapi-gen=true // +groupName=oauth.openshift.io // Package v1 is the v1 version of the API. package v1
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/oauth/v1/generated.pb.go
vendor/github.com/openshift/api/oauth/v1/generated.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/openshift/api/oauth/v1/generated.proto package v1 import ( fmt "fmt" io "io" proto "github.com/gogo/protobuf/proto" math "math" math_bits "math/bits" reflect "reflect" strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ClusterRoleScopeRestriction) Reset() { *m = ClusterRoleScopeRestriction{} } func (*ClusterRoleScopeRestriction) ProtoMessage() {} func (*ClusterRoleScopeRestriction) Descriptor() ([]byte, []int) { return fileDescriptor_bd688dca7ea39c8a, []int{0} } func (m *ClusterRoleScopeRestriction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ClusterRoleScopeRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *ClusterRoleScopeRestriction) XXX_Merge(src proto.Message) { xxx_messageInfo_ClusterRoleScopeRestriction.Merge(m, src) } func (m *ClusterRoleScopeRestriction) XXX_Size() int { return m.Size() } func (m *ClusterRoleScopeRestriction) XXX_DiscardUnknown() { xxx_messageInfo_ClusterRoleScopeRestriction.DiscardUnknown(m) } var xxx_messageInfo_ClusterRoleScopeRestriction proto.InternalMessageInfo func (m *OAuthAccessToken) Reset() { *m = OAuthAccessToken{} } func (*OAuthAccessToken) ProtoMessage() {} func (*OAuthAccessToken) Descriptor() ([]byte, []int) { return fileDescriptor_bd688dca7ea39c8a, []int{1} } func (m *OAuthAccessToken) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OAuthAccessToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *OAuthAccessToken) XXX_Merge(src proto.Message) { xxx_messageInfo_OAuthAccessToken.Merge(m, src) } func (m *OAuthAccessToken) XXX_Size() int { return m.Size() } func (m *OAuthAccessToken) XXX_DiscardUnknown() { xxx_messageInfo_OAuthAccessToken.DiscardUnknown(m) } var xxx_messageInfo_OAuthAccessToken proto.InternalMessageInfo func (m *OAuthAccessTokenList) Reset() { *m = OAuthAccessTokenList{} } func (*OAuthAccessTokenList) ProtoMessage() {} func (*OAuthAccessTokenList) Descriptor() ([]byte, []int) { return fileDescriptor_bd688dca7ea39c8a, []int{2} } func (m *OAuthAccessTokenList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OAuthAccessTokenList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *OAuthAccessTokenList) XXX_Merge(src proto.Message) { xxx_messageInfo_OAuthAccessTokenList.Merge(m, src) } func (m *OAuthAccessTokenList) XXX_Size() int { return m.Size() } func (m *OAuthAccessTokenList) XXX_DiscardUnknown() { xxx_messageInfo_OAuthAccessTokenList.DiscardUnknown(m) } var xxx_messageInfo_OAuthAccessTokenList proto.InternalMessageInfo func (m *OAuthAuthorizeToken) Reset() { *m = OAuthAuthorizeToken{} } func (*OAuthAuthorizeToken) ProtoMessage() {} func (*OAuthAuthorizeToken) Descriptor() ([]byte, []int) { return fileDescriptor_bd688dca7ea39c8a, []int{3} } func (m *OAuthAuthorizeToken) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OAuthAuthorizeToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *OAuthAuthorizeToken) XXX_Merge(src proto.Message) { xxx_messageInfo_OAuthAuthorizeToken.Merge(m, src) } func (m *OAuthAuthorizeToken) XXX_Size() int { return m.Size() } func (m *OAuthAuthorizeToken) XXX_DiscardUnknown() { xxx_messageInfo_OAuthAuthorizeToken.DiscardUnknown(m) } var xxx_messageInfo_OAuthAuthorizeToken proto.InternalMessageInfo func (m *OAuthAuthorizeTokenList) Reset() { *m = OAuthAuthorizeTokenList{} } func (*OAuthAuthorizeTokenList) ProtoMessage() {} func (*OAuthAuthorizeTokenList) Descriptor() ([]byte, []int) { return fileDescriptor_bd688dca7ea39c8a, []int{4} } func (m *OAuthAuthorizeTokenList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OAuthAuthorizeTokenList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *OAuthAuthorizeTokenList) XXX_Merge(src proto.Message) { xxx_messageInfo_OAuthAuthorizeTokenList.Merge(m, src) } func (m *OAuthAuthorizeTokenList) XXX_Size() int { return m.Size() } func (m *OAuthAuthorizeTokenList) XXX_DiscardUnknown() { xxx_messageInfo_OAuthAuthorizeTokenList.DiscardUnknown(m) } var xxx_messageInfo_OAuthAuthorizeTokenList proto.InternalMessageInfo func (m *OAuthClient) Reset() { *m = OAuthClient{} } func (*OAuthClient) ProtoMessage() {} func (*OAuthClient) Descriptor() ([]byte, []int) { return fileDescriptor_bd688dca7ea39c8a, []int{5} } func (m *OAuthClient) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OAuthClient) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *OAuthClient) XXX_Merge(src proto.Message) { xxx_messageInfo_OAuthClient.Merge(m, src) } func (m *OAuthClient) XXX_Size() int { return m.Size() } func (m *OAuthClient) XXX_DiscardUnknown() { xxx_messageInfo_OAuthClient.DiscardUnknown(m) } var xxx_messageInfo_OAuthClient proto.InternalMessageInfo func (m *OAuthClientAuthorization) Reset() { *m = OAuthClientAuthorization{} } func (*OAuthClientAuthorization) ProtoMessage() {} func (*OAuthClientAuthorization) Descriptor() ([]byte, []int) { return fileDescriptor_bd688dca7ea39c8a, []int{6} } func (m *OAuthClientAuthorization) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OAuthClientAuthorization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *OAuthClientAuthorization) XXX_Merge(src proto.Message) { xxx_messageInfo_OAuthClientAuthorization.Merge(m, src) } func (m *OAuthClientAuthorization) XXX_Size() int { return m.Size() } func (m *OAuthClientAuthorization) XXX_DiscardUnknown() { xxx_messageInfo_OAuthClientAuthorization.DiscardUnknown(m) } var xxx_messageInfo_OAuthClientAuthorization proto.InternalMessageInfo func (m *OAuthClientAuthorizationList) Reset() { *m = OAuthClientAuthorizationList{} } func (*OAuthClientAuthorizationList) ProtoMessage() {} func (*OAuthClientAuthorizationList) Descriptor() ([]byte, []int) { return fileDescriptor_bd688dca7ea39c8a, []int{7} } func (m *OAuthClientAuthorizationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OAuthClientAuthorizationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *OAuthClientAuthorizationList) XXX_Merge(src proto.Message) { xxx_messageInfo_OAuthClientAuthorizationList.Merge(m, src) } func (m *OAuthClientAuthorizationList) XXX_Size() int { return m.Size() } func (m *OAuthClientAuthorizationList) XXX_DiscardUnknown() { xxx_messageInfo_OAuthClientAuthorizationList.DiscardUnknown(m) } var xxx_messageInfo_OAuthClientAuthorizationList proto.InternalMessageInfo func (m *OAuthClientList) Reset() { *m = OAuthClientList{} } func (*OAuthClientList) ProtoMessage() {} func (*OAuthClientList) Descriptor() ([]byte, []int) { return fileDescriptor_bd688dca7ea39c8a, []int{8} } func (m *OAuthClientList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OAuthClientList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *OAuthClientList) XXX_Merge(src proto.Message) { xxx_messageInfo_OAuthClientList.Merge(m, src) } func (m *OAuthClientList) XXX_Size() int { return m.Size() } func (m *OAuthClientList) XXX_DiscardUnknown() { xxx_messageInfo_OAuthClientList.DiscardUnknown(m) } var xxx_messageInfo_OAuthClientList proto.InternalMessageInfo func (m *OAuthRedirectReference) Reset() { *m = OAuthRedirectReference{} } func (*OAuthRedirectReference) ProtoMessage() {} func (*OAuthRedirectReference) Descriptor() ([]byte, []int) { return fileDescriptor_bd688dca7ea39c8a, []int{9} } func (m *OAuthRedirectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OAuthRedirectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *OAuthRedirectReference) XXX_Merge(src proto.Message) { xxx_messageInfo_OAuthRedirectReference.Merge(m, src) } func (m *OAuthRedirectReference) XXX_Size() int { return m.Size() } func (m *OAuthRedirectReference) XXX_DiscardUnknown() { xxx_messageInfo_OAuthRedirectReference.DiscardUnknown(m) } var xxx_messageInfo_OAuthRedirectReference proto.InternalMessageInfo func (m *RedirectReference) Reset() { *m = RedirectReference{} } func (*RedirectReference) ProtoMessage() {} func (*RedirectReference) Descriptor() ([]byte, []int) { return fileDescriptor_bd688dca7ea39c8a, []int{10} } func (m *RedirectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RedirectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *RedirectReference) XXX_Merge(src proto.Message) { xxx_messageInfo_RedirectReference.Merge(m, src) } func (m *RedirectReference) XXX_Size() int { return m.Size() } func (m *RedirectReference) XXX_DiscardUnknown() { xxx_messageInfo_RedirectReference.DiscardUnknown(m) } var xxx_messageInfo_RedirectReference proto.InternalMessageInfo func (m *ScopeRestriction) Reset() { *m = ScopeRestriction{} } func (*ScopeRestriction) ProtoMessage() {} func (*ScopeRestriction) Descriptor() ([]byte, []int) { return fileDescriptor_bd688dca7ea39c8a, []int{11} } func (m *ScopeRestriction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ScopeRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *ScopeRestriction) XXX_Merge(src proto.Message) { xxx_messageInfo_ScopeRestriction.Merge(m, src) } func (m *ScopeRestriction) XXX_Size() int { return m.Size() } func (m *ScopeRestriction) XXX_DiscardUnknown() { xxx_messageInfo_ScopeRestriction.DiscardUnknown(m) } var xxx_messageInfo_ScopeRestriction proto.InternalMessageInfo func (m *UserOAuthAccessToken) Reset() { *m = UserOAuthAccessToken{} } func (*UserOAuthAccessToken) ProtoMessage() {} func (*UserOAuthAccessToken) Descriptor() ([]byte, []int) { return fileDescriptor_bd688dca7ea39c8a, []int{12} } func (m *UserOAuthAccessToken) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *UserOAuthAccessToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *UserOAuthAccessToken) XXX_Merge(src proto.Message) { xxx_messageInfo_UserOAuthAccessToken.Merge(m, src) } func (m *UserOAuthAccessToken) XXX_Size() int { return m.Size() } func (m *UserOAuthAccessToken) XXX_DiscardUnknown() { xxx_messageInfo_UserOAuthAccessToken.DiscardUnknown(m) } var xxx_messageInfo_UserOAuthAccessToken proto.InternalMessageInfo func (m *UserOAuthAccessTokenList) Reset() { *m = UserOAuthAccessTokenList{} } func (*UserOAuthAccessTokenList) ProtoMessage() {} func (*UserOAuthAccessTokenList) Descriptor() ([]byte, []int) { return fileDescriptor_bd688dca7ea39c8a, []int{13} } func (m *UserOAuthAccessTokenList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *UserOAuthAccessTokenList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *UserOAuthAccessTokenList) XXX_Merge(src proto.Message) { xxx_messageInfo_UserOAuthAccessTokenList.Merge(m, src) } func (m *UserOAuthAccessTokenList) XXX_Size() int { return m.Size() } func (m *UserOAuthAccessTokenList) XXX_DiscardUnknown() { xxx_messageInfo_UserOAuthAccessTokenList.DiscardUnknown(m) } var xxx_messageInfo_UserOAuthAccessTokenList proto.InternalMessageInfo func init() { proto.RegisterType((*ClusterRoleScopeRestriction)(nil), "github.com.openshift.api.oauth.v1.ClusterRoleScopeRestriction") proto.RegisterType((*OAuthAccessToken)(nil), "github.com.openshift.api.oauth.v1.OAuthAccessToken") proto.RegisterType((*OAuthAccessTokenList)(nil), "github.com.openshift.api.oauth.v1.OAuthAccessTokenList") proto.RegisterType((*OAuthAuthorizeToken)(nil), "github.com.openshift.api.oauth.v1.OAuthAuthorizeToken") proto.RegisterType((*OAuthAuthorizeTokenList)(nil), "github.com.openshift.api.oauth.v1.OAuthAuthorizeTokenList") proto.RegisterType((*OAuthClient)(nil), "github.com.openshift.api.oauth.v1.OAuthClient") proto.RegisterType((*OAuthClientAuthorization)(nil), "github.com.openshift.api.oauth.v1.OAuthClientAuthorization") proto.RegisterType((*OAuthClientAuthorizationList)(nil), "github.com.openshift.api.oauth.v1.OAuthClientAuthorizationList") proto.RegisterType((*OAuthClientList)(nil), "github.com.openshift.api.oauth.v1.OAuthClientList") proto.RegisterType((*OAuthRedirectReference)(nil), "github.com.openshift.api.oauth.v1.OAuthRedirectReference") proto.RegisterType((*RedirectReference)(nil), "github.com.openshift.api.oauth.v1.RedirectReference") proto.RegisterType((*ScopeRestriction)(nil), "github.com.openshift.api.oauth.v1.ScopeRestriction") proto.RegisterType((*UserOAuthAccessToken)(nil), "github.com.openshift.api.oauth.v1.UserOAuthAccessToken") proto.RegisterType((*UserOAuthAccessTokenList)(nil), "github.com.openshift.api.oauth.v1.UserOAuthAccessTokenList") } func init() { proto.RegisterFile("github.com/openshift/api/oauth/v1/generated.proto", fileDescriptor_bd688dca7ea39c8a) } var fileDescriptor_bd688dca7ea39c8a = []byte{ // 1272 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x6f, 0x1b, 0xc5, 0x17, 0xcf, 0x36, 0x76, 0x62, 0x3f, 0x37, 0xbf, 0x26, 0x4d, 0xbb, 0xdf, 0xb6, 0x5f, 0xdb, 0x75, 0x24, 0x1a, 0x04, 0xac, 0x49, 0x28, 0xa5, 0x52, 0xa5, 0x4a, 0x76, 0xa8, 0x4a, 0x04, 0x69, 0xa5, 0x49, 0x03, 0x15, 0xf4, 0xd0, 0xe9, 0xee, 0x8b, 0x3d, 0x64, 0xbd, 0xbb, 0xec, 0x8c, 0x43, 0x83, 0x7a, 0xe0, 0xc2, 0x9d, 0x7f, 0x84, 0x0b, 0x77, 0x0e, 0x48, 0x1c, 0x7a, 0x42, 0x3d, 0x20, 0xd4, 0x93, 0x45, 0x8c, 0x38, 0xf0, 0x2f, 0x70, 0x42, 0x3b, 0xbb, 0xde, 0x1f, 0x8e, 0x4d, 0xdc, 0x03, 0x11, 0x87, 0xde, 0xbc, 0xef, 0x7d, 0x3e, 0x6f, 0xde, 0xcc, 0xbc, 0xcf, 0x9b, 0x19, 0xc3, 0x7a, 0x8b, 0xcb, 0x76, 0xf7, 0xb1, 0x61, 0xba, 0x9d, 0xba, 0xeb, 0xa1, 0x23, 0xda, 0x7c, 0x4f, 0xd6, 0x99, 0xc7, 0xeb, 0x2e, 0xeb, 0xca, 0x76, 0xfd, 0x60, 0xbd, 0xde, 0x42, 0x07, 0x7d, 0x26, 0xd1, 0x32, 0x3c, 0xdf, 0x95, 0x2e, 0xb9, 0x92, 0x50, 0x8c, 0x98, 0x62, 0x30, 0x8f, 0x1b, 0x8a, 0x62, 0x1c, 0xac, 0x5f, 0x7c, 0x2b, 0x15, 0xb5, 0xe5, 0xb6, 0xdc, 0xba, 0x62, 0x3e, 0xee, 0xee, 0xa9, 0x2f, 0xf5, 0xa1, 0x7e, 0x85, 0x11, 0x2f, 0x5e, 0xdb, 0xbf, 0x21, 0x0c, 0xee, 0x06, 0xc3, 0x76, 0x98, 0xd9, 0xe6, 0x0e, 0xfa, 0x87, 0x75, 0x6f, 0xbf, 0x15, 0x18, 0x44, 0xbd, 0x83, 0x92, 0x8d, 0xc8, 0xe3, 0xe2, 0xf5, 0x71, 0x2c, 0xbf, 0xeb, 0x48, 0xde, 0xc1, 0xba, 0x30, 0xdb, 0xd8, 0x61, 0xc3, 0xbc, 0xda, 0x0f, 0x1a, 0x5c, 0xda, 0xb4, 0xbb, 0x42, 0xa2, 0x4f, 0x5d, 0x1b, 0x77, 0x4c, 0xd7, 0x43, 0x8a, 0x42, 0xfa, 0xdc, 0x94, 0xdc, 0x75, 0xc8, 0x1b, 0x50, 0xf4, 0x5d, 0x1b, 0xef, 0xb2, 0x0e, 0x0a, 0x5d, 0xab, 0x4e, 0xaf, 0x15, 0x9b, 0x73, 0xfd, 0x5e, 0xa5, 0x48, 0x07, 0x46, 0x9a, 0xf8, 0x89, 0x01, 0xe0, 0x04, 0x3f, 0x3c, 0x66, 0xa2, 0xd0, 0xcf, 0x28, 0xf4, 0x7c, 0xbf, 0x57, 0x81, 0xbb, 0xb1, 0x95, 0xa6, 0x10, 0xa4, 0x01, 0x0b, 0xcc, 0xb6, 0xdd, 0x2f, 0x6f, 0x0b, 0x93, 0xd9, 0x2c, 0x18, 0x4f, 0x9f, 0xae, 0x6a, 0x6b, 0x85, 0xe6, 0x85, 0x67, 0xbd, 0xca, 0x54, 0xbf, 0x57, 0x59, 0x68, 0x64, 0xdd, 0x74, 0x18, 0x5f, 0xfb, 0x23, 0x07, 0x8b, 0xf7, 0x1a, 0x5d, 0xd9, 0x6e, 0x98, 0x26, 0x0a, 0x71, 0xdf, 0xdd, 0x47, 0x87, 0x3c, 0x82, 0x42, 0xb0, 0x4e, 0x16, 0x93, 0x4c, 0xd7, 0xaa, 0xda, 0x5a, 0x69, 0xe3, 0x6d, 0x23, 0x5c, 0x1f, 0x23, 0xbd, 0x3e, 0x86, 0xb7, 0xdf, 0x0a, 0x0c, 0xc2, 0x08, 0xd0, 0xc6, 0xc1, 0xba, 0x71, 0xef, 0xf1, 0xe7, 0x68, 0xca, 0x6d, 0x94, 0xac, 0x49, 0xa2, 0x14, 0x20, 0xb1, 0xd1, 0x38, 0x2a, 0xd9, 0x00, 0x30, 0x6d, 0x8e, 0x8e, 0x0c, 0x66, 0xa6, 0x9f, 0xa9, 0x6a, 0x6b, 0xc5, 0x84, 0xb1, 0x19, 0x7b, 0x68, 0x0a, 0x45, 0xea, 0x50, 0xc4, 0x27, 0x1e, 0xf7, 0x51, 0x6c, 0x85, 0xf3, 0x9c, 0x6e, 0x2e, 0x45, 0x94, 0xe2, 0xed, 0x81, 0x83, 0x26, 0x18, 0x52, 0x83, 0x19, 0x11, 0xec, 0x87, 0xd0, 0x73, 0x6a, 0x29, 0xa1, 0xdf, 0xab, 0xcc, 0xa8, 0x1d, 0x12, 0x34, 0xf2, 0x90, 0x77, 0xa1, 0xe4, 0xa3, 0xc5, 0x7d, 0x34, 0xe5, 0x2e, 0xdd, 0xd2, 0xf3, 0x2a, 0x93, 0xe5, 0x28, 0x6c, 0x89, 0x26, 0x2e, 0x9a, 0xc6, 0x91, 0x37, 0xa1, 0xd0, 0x15, 0xe8, 0xab, 0xec, 0x67, 0x14, 0x67, 0x31, 0xe2, 0x14, 0x76, 0x23, 0x3b, 0x8d, 0x11, 0xe4, 0x75, 0x98, 0x0d, 0x7e, 0xef, 0x6e, 0xbd, 0xaf, 0xcf, 0x2a, 0xf0, 0x42, 0x04, 0x9e, 0xdd, 0x0d, 0xcd, 0x74, 0xe0, 0x27, 0xb7, 0x60, 0x3e, 0xa8, 0x7b, 0xd7, 0xe7, 0x5f, 0xa1, 0xda, 0x0c, 0xbd, 0xa0, 0x18, 0xe7, 0x23, 0xc6, 0x7c, 0x23, 0xe3, 0xa5, 0x43, 0x68, 0x72, 0x03, 0xce, 0xfa, 0xb8, 0xe7, 0xa3, 0x68, 0x87, 0xec, 0xa2, 0x62, 0x9f, 0x8b, 0xd8, 0x67, 0x69, 0xca, 0x47, 0x33, 0x48, 0xf2, 0x10, 0x74, 0xee, 0x30, 0x53, 0xf2, 0x03, 0x2e, 0x0f, 0xef, 0xf3, 0x0e, 0xba, 0x5d, 0xb9, 0x83, 0xa6, 0xeb, 0x58, 0x42, 0x87, 0xaa, 0xb6, 0x96, 0x6f, 0x56, 0xa3, 0x28, 0xfa, 0xd6, 0x18, 0x1c, 0x1d, 0x1b, 0xa1, 0xf6, 0xb3, 0x06, 0xe7, 0x86, 0xeb, 0xec, 0x23, 0x2e, 0x24, 0x79, 0x78, 0xac, 0xd6, 0x8c, 0xc9, 0x6a, 0x2d, 0x60, 0xab, 0x4a, 0x8b, 0x57, 0x7e, 0x60, 0x49, 0xd5, 0xd9, 0x03, 0xc8, 0x73, 0x89, 0x9d, 0x50, 0x4c, 0xa5, 0x8d, 0x77, 0x8c, 0x13, 0xdb, 0x8d, 0x31, 0x9c, 0x65, 0x73, 0x2e, 0x8a, 0x9f, 0xdf, 0x0a, 0x22, 0xd1, 0x30, 0x60, 0xed, 0xc7, 0x1c, 0x2c, 0x87, 0xd0, 0xec, 0x06, 0xbc, 0xd2, 0xce, 0x49, 0xda, 0x59, 0x85, 0xbc, 0x90, 0x4c, 0x0e, 0x84, 0x13, 0x2f, 0xef, 0x4e, 0x60, 0xa4, 0xa1, 0x2f, 0x23, 0xb0, 0xd9, 0x97, 0x11, 0x58, 0xe1, 0x04, 0x81, 0xdd, 0x84, 0x39, 0xd3, 0xb5, 0x70, 0xb3, 0xcd, 0x6c, 0x1b, 0x9d, 0x16, 0x46, 0x0a, 0x59, 0x89, 0x08, 0x73, 0x9b, 0x69, 0x27, 0xcd, 0x62, 0xc9, 0x36, 0x2c, 0x67, 0x0c, 0xdb, 0x28, 0xdb, 0xae, 0xa5, 0xe4, 0x51, 0x6c, 0x5e, 0x8a, 0x42, 0x2c, 0x6f, 0x1e, 0x87, 0xd0, 0x51, 0xbc, 0xda, 0x2f, 0x1a, 0x5c, 0x18, 0x51, 0x43, 0xa7, 0xa0, 0x8b, 0xcf, 0xb2, 0xba, 0xb8, 0x3e, 0xb1, 0x2e, 0x32, 0x89, 0x8e, 0x91, 0xc6, 0x37, 0x33, 0x50, 0x52, 0xe8, 0xb0, 0x18, 0x4f, 0x41, 0x12, 0xaf, 0xc1, 0x8c, 0x40, 0xd3, 0x47, 0x19, 0xc9, 0x61, 0x3e, 0x42, 0xcf, 0xec, 0x28, 0x2b, 0x8d, 0xbc, 0x64, 0x13, 0x96, 0x98, 0x65, 0xf1, 0xe0, 0xe4, 0x63, 0x76, 0xe8, 0x13, 0xfa, 0xb4, 0x2a, 0xf0, 0x95, 0x7e, 0xaf, 0xb2, 0xd4, 0x18, 0x76, 0xd2, 0xe3, 0x78, 0xb2, 0x03, 0x2b, 0x3e, 0x0a, 0xcf, 0x75, 0xac, 0x4f, 0xb8, 0x6c, 0xc7, 0x7b, 0x1a, 0x28, 0x25, 0x38, 0x7b, 0xff, 0x1f, 0x8d, 0xbd, 0x42, 0x47, 0x81, 0xe8, 0x68, 0x2e, 0xb9, 0x16, 0xf4, 0xed, 0x58, 0x23, 0x42, 0xcf, 0xab, 0xa4, 0x16, 0xc3, 0x9e, 0x9d, 0xd8, 0x69, 0x06, 0x45, 0xb6, 0xa0, 0xd4, 0xf2, 0x99, 0x23, 0xa3, 0x3a, 0x0c, 0x05, 0x75, 0x75, 0xa0, 0xc0, 0x3b, 0x89, 0xeb, 0xaf, 0x5e, 0x65, 0x51, 0x7d, 0x7e, 0xc0, 0x1c, 0xcb, 0x46, 0xff, 0xfe, 0xa1, 0x87, 0x34, 0xcd, 0x25, 0x4f, 0x61, 0x49, 0x0c, 0x5d, 0x5e, 0x84, 0x3e, 0x3b, 0x71, 0xd7, 0x1c, 0xbe, 0xf8, 0x34, 0xff, 0x17, 0x65, 0xb1, 0x34, 0xec, 0x11, 0xf4, 0xf8, 0x40, 0xe4, 0x01, 0xe8, 0x2c, 0x69, 0xb9, 0xdb, 0xec, 0x49, 0xa3, 0x85, 0x83, 0xc3, 0xa7, 0xa0, 0x0e, 0x9f, 0xcb, 0xc1, 0xc1, 0xd3, 0x18, 0x83, 0xa1, 0x63, 0xd9, 0xe4, 0x10, 0x56, 0x53, 0xbe, 0x71, 0x27, 0x97, 0xea, 0x02, 0xf9, 0xe6, 0xd5, 0x7e, 0xaf, 0xb2, 0xda, 0x38, 0x19, 0x4e, 0x27, 0x89, 0x59, 0xfb, 0xee, 0x0c, 0xe8, 0x29, 0x1d, 0x0c, 0xb4, 0xa3, 0x2e, 0x5e, 0xff, 0xd1, 0x73, 0x22, 0xdd, 0x76, 0xa7, 0x5f, 0xa6, 0xed, 0xe6, 0x4e, 0x68, 0xbb, 0xc9, 0x79, 0x92, 0x1f, 0x77, 0x9e, 0xd4, 0x7a, 0x1a, 0x5c, 0x1e, 0xb7, 0x5e, 0xa7, 0xd0, 0x13, 0x1f, 0x65, 0x7b, 0xe2, 0xcd, 0x49, 0x7b, 0xe2, 0x88, 0x6c, 0xc7, 0x34, 0xc6, 0x9f, 0x34, 0x58, 0x48, 0x51, 0x4e, 0x61, 0x4e, 0x3b, 0xd9, 0x39, 0x19, 0x2f, 0x37, 0xa7, 0x31, 0xd3, 0x38, 0xd2, 0xe0, 0xbc, 0x42, 0x0d, 0x3a, 0x13, 0xc5, 0x3d, 0xf4, 0xd1, 0x31, 0xf1, 0x14, 0xaa, 0x1a, 0xa1, 0xe8, 0x0f, 0x86, 0x53, 0x45, 0x5d, 0xda, 0xb8, 0x36, 0xc1, 0xac, 0x8e, 0xa5, 0x9a, 0xdc, 0x7f, 0x62, 0x13, 0x4d, 0x22, 0xd7, 0x9e, 0xc2, 0xd2, 0xf1, 0xd9, 0xad, 0x42, 0xbe, 0xe5, 0xbb, 0x5d, 0x4f, 0x4d, 0x2d, 0x75, 0x73, 0xb9, 0x13, 0x18, 0x69, 0xe8, 0x23, 0x55, 0xc8, 0xed, 0x73, 0xc7, 0x8a, 0x04, 0x77, 0x36, 0xc2, 0xe4, 0x3e, 0xe4, 0x8e, 0x45, 0x95, 0x27, 0x40, 0x38, 0x89, 0xc0, 0x62, 0x84, 0x12, 0x97, 0xf2, 0xd4, 0xbe, 0xd7, 0x60, 0x71, 0xc4, 0x53, 0xb2, 0x60, 0x73, 0x89, 0x3e, 0xb3, 0x07, 0x2f, 0xc9, 0x85, 0xa0, 0xcb, 0xdf, 0x7e, 0xc2, 0x4c, 0xf9, 0x31, 0xb3, 0xbb, 0x28, 0x68, 0x0c, 0x20, 0x5f, 0x40, 0xc9, 0x4c, 0x9e, 0xa5, 0xd1, 0x42, 0xdd, 0x9a, 0x60, 0xa1, 0xfe, 0xe1, 0x31, 0x1b, 0x8e, 0x97, 0x02, 0xd0, 0xf4, 0x18, 0xb5, 0x3f, 0x73, 0x70, 0x2e, 0xd0, 0xfd, 0xab, 0xe7, 0xe4, 0xab, 0xe7, 0xe4, 0xbf, 0xfd, 0x9c, 0xfc, 0x55, 0x03, 0x7d, 0x54, 0xad, 0x9d, 0x42, 0x4b, 0x7d, 0x98, 0x6d, 0xa9, 0xef, 0x4d, 0xa0, 0xa9, 0x51, 0x99, 0x8e, 0xee, 0xad, 0xcd, 0x3b, 0xcf, 0x8e, 0xca, 0x53, 0xcf, 0x8f, 0xca, 0x53, 0x2f, 0x8e, 0xca, 0x53, 0x5f, 0xf7, 0xcb, 0xda, 0xb3, 0x7e, 0x59, 0x7b, 0xde, 0x2f, 0x6b, 0x2f, 0xfa, 0x65, 0xed, 0xb7, 0x7e, 0x59, 0xfb, 0xf6, 0xf7, 0xf2, 0xd4, 0xa7, 0x57, 0x4e, 0xfc, 0xa3, 0xed, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc6, 0xcf, 0x36, 0xd6, 0x8c, 0x13, 0x00, 0x00, } func (m *ClusterRoleScopeRestriction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ClusterRoleScopeRestriction) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ClusterRoleScopeRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l i-- if m.AllowEscalation { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x18 if len(m.Namespaces) > 0 { for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Namespaces[iNdEx]) copy(dAtA[i:], m.Namespaces[iNdEx]) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) i-- dAtA[i] = 0x12 } } if len(m.RoleNames) > 0 { for iNdEx := len(m.RoleNames) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.RoleNames[iNdEx]) copy(dAtA[i:], m.RoleNames[iNdEx]) i = encodeVarintGenerated(dAtA, i, uint64(len(m.RoleNames[iNdEx]))) i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } func (m *OAuthAccessToken) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OAuthAccessToken) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OAuthAccessToken) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l i = encodeVarintGenerated(dAtA, i, uint64(m.InactivityTimeoutSeconds)) i-- dAtA[i] = 0x50 i -= len(m.RefreshToken) copy(dAtA[i:], m.RefreshToken) i = encodeVarintGenerated(dAtA, i, uint64(len(m.RefreshToken))) i-- dAtA[i] = 0x4a i -= len(m.AuthorizeToken) copy(dAtA[i:], m.AuthorizeToken) i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorizeToken))) i-- dAtA[i] = 0x42 i -= len(m.UserUID) copy(dAtA[i:], m.UserUID) i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID))) i-- dAtA[i] = 0x3a i -= len(m.UserName) copy(dAtA[i:], m.UserName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName))) i-- dAtA[i] = 0x32 i -= len(m.RedirectURI) copy(dAtA[i:], m.RedirectURI) i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURI))) i-- dAtA[i] = 0x2a if len(m.Scopes) > 0 { for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Scopes[iNdEx]) copy(dAtA[i:], m.Scopes[iNdEx]) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) i-- dAtA[i] = 0x22 } } i = encodeVarintGenerated(dAtA, i, uint64(m.ExpiresIn)) i-- dAtA[i] = 0x18 i -= len(m.ClientName) copy(dAtA[i:], m.ClientName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName))) i-- dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *OAuthAccessTokenList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OAuthAccessTokenList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OAuthAccessTokenList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Items) > 0 { for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } } { size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *OAuthAuthorizeToken) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OAuthAuthorizeToken) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OAuthAuthorizeToken) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l i -= len(m.CodeChallengeMethod) copy(dAtA[i:], m.CodeChallengeMethod) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CodeChallengeMethod))) i-- dAtA[i] = 0x52 i -= len(m.CodeChallenge) copy(dAtA[i:], m.CodeChallenge) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CodeChallenge))) i-- dAtA[i] = 0x4a i -= len(m.UserUID) copy(dAtA[i:], m.UserUID) i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID))) i-- dAtA[i] = 0x42 i -= len(m.UserName) copy(dAtA[i:], m.UserName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName))) i-- dAtA[i] = 0x3a i -= len(m.State) copy(dAtA[i:], m.State) i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) i-- dAtA[i] = 0x32 i -= len(m.RedirectURI) copy(dAtA[i:], m.RedirectURI) i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURI))) i-- dAtA[i] = 0x2a if len(m.Scopes) > 0 { for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Scopes[iNdEx]) copy(dAtA[i:], m.Scopes[iNdEx]) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) i-- dAtA[i] = 0x22 } } i = encodeVarintGenerated(dAtA, i, uint64(m.ExpiresIn)) i-- dAtA[i] = 0x18 i -= len(m.ClientName) copy(dAtA[i:], m.ClientName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName))) i-- dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *OAuthAuthorizeTokenList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OAuthAuthorizeTokenList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OAuthAuthorizeTokenList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Items) > 0 { for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } } { size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *OAuthClient) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OAuthClient) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OAuthClient) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.AccessTokenInactivityTimeoutSeconds != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.AccessTokenInactivityTimeoutSeconds)) i-- dAtA[i] = 0x48 } if m.AccessTokenMaxAgeSeconds != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.AccessTokenMaxAgeSeconds)) i-- dAtA[i] = 0x40 } if len(m.ScopeRestrictions) > 0 { for iNdEx := len(m.ScopeRestrictions) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.ScopeRestrictions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x3a } } i -= len(m.GrantMethod) copy(dAtA[i:], m.GrantMethod) i = encodeVarintGenerated(dAtA, i, uint64(len(m.GrantMethod))) i-- dAtA[i] = 0x32 if len(m.RedirectURIs) > 0 { for iNdEx := len(m.RedirectURIs) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.RedirectURIs[iNdEx]) copy(dAtA[i:], m.RedirectURIs[iNdEx]) i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURIs[iNdEx]))) i-- dAtA[i] = 0x2a } } i-- if m.RespondWithChallenges { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x20 if len(m.AdditionalSecrets) > 0 { for iNdEx := len(m.AdditionalSecrets) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.AdditionalSecrets[iNdEx]) copy(dAtA[i:], m.AdditionalSecrets[iNdEx]) i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdditionalSecrets[iNdEx]))) i-- dAtA[i] = 0x1a } } i -= len(m.Secret) copy(dAtA[i:], m.Secret) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) i-- dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *OAuthClientAuthorization) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OAuthClientAuthorization) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OAuthClientAuthorization) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Scopes) > 0 { for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Scopes[iNdEx]) copy(dAtA[i:], m.Scopes[iNdEx]) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) i-- dAtA[i] = 0x2a } } i -= len(m.UserUID) copy(dAtA[i:], m.UserUID)
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/oauth/v1/legacy.go
vendor/github.com/openshift/api/oauth/v1/legacy.go
package v1 import ( corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme) DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme ) func addLegacyKnownTypes(scheme *runtime.Scheme) error { types := []runtime.Object{ &OAuthAccessToken{}, &OAuthAccessTokenList{}, &OAuthAuthorizeToken{}, &OAuthAuthorizeTokenList{}, &OAuthClient{}, &OAuthClientList{}, &OAuthClientAuthorization{}, &OAuthClientAuthorizationList{}, &OAuthRedirectReference{}, } scheme.AddKnownTypes(legacyGroupVersion, types...) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go
vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated // Code generated by deepcopy-gen. DO NOT EDIT. package v1 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Route) DeepCopyInto(out *Route) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route. func (in *Route) DeepCopy() *Route { if in == nil { return nil } out := new(Route) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *Route) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RouteIngress) DeepCopyInto(out *RouteIngress) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]RouteIngressCondition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteIngress. func (in *RouteIngress) DeepCopy() *RouteIngress { if in == nil { return nil } out := new(RouteIngress) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RouteIngressCondition) DeepCopyInto(out *RouteIngressCondition) { *out = *in if in.LastTransitionTime != nil { in, out := &in.LastTransitionTime, &out.LastTransitionTime *out = (*in).DeepCopy() } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteIngressCondition. func (in *RouteIngressCondition) DeepCopy() *RouteIngressCondition { if in == nil { return nil } out := new(RouteIngressCondition) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RouteList) DeepCopyInto(out *RouteList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Route, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteList. func (in *RouteList) DeepCopy() *RouteList { if in == nil { return nil } out := new(RouteList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *RouteList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RoutePort) DeepCopyInto(out *RoutePort) { *out = *in out.TargetPort = in.TargetPort return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutePort. func (in *RoutePort) DeepCopy() *RoutePort { if in == nil { return nil } out := new(RoutePort) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RouteSpec) DeepCopyInto(out *RouteSpec) { *out = *in in.To.DeepCopyInto(&out.To) if in.AlternateBackends != nil { in, out := &in.AlternateBackends, &out.AlternateBackends *out = make([]RouteTargetReference, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Port != nil { in, out := &in.Port, &out.Port *out = new(RoutePort) **out = **in } if in.TLS != nil { in, out := &in.TLS, &out.TLS *out = new(TLSConfig) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSpec. func (in *RouteSpec) DeepCopy() *RouteSpec { if in == nil { return nil } out := new(RouteSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RouteStatus) DeepCopyInto(out *RouteStatus) { *out = *in if in.Ingress != nil { in, out := &in.Ingress, &out.Ingress *out = make([]RouteIngress, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteStatus. func (in *RouteStatus) DeepCopy() *RouteStatus { if in == nil { return nil } out := new(RouteStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RouteTargetReference) DeepCopyInto(out *RouteTargetReference) { *out = *in if in.Weight != nil { in, out := &in.Weight, &out.Weight *out = new(int32) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTargetReference. func (in *RouteTargetReference) DeepCopy() *RouteTargetReference { if in == nil { return nil } out := new(RouteTargetReference) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RouterShard) DeepCopyInto(out *RouterShard) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterShard. func (in *RouterShard) DeepCopy() *RouterShard { if in == nil { return nil } out := new(RouterShard) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. func (in *TLSConfig) DeepCopy() *TLSConfig { if in == nil { return nil } out := new(TLSConfig) in.DeepCopyInto(out) return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/route/v1/types.go
vendor/github.com/openshift/api/route/v1/types.go
package v1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true // +kubebuilder:subresource:status // A route allows developers to expose services through an HTTP(S) aware load balancing and proxy // layer via a public DNS entry. The route may further specify TLS options and a certificate, or // specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An // administrator typically configures their router to be visible outside the cluster firewall, and // may also add additional security, caching, or traffic controls on the service content. Routers // usually talk directly to the service endpoints. // // Once a route is created, the `host` field may not be changed. Generally, routers use the oldest // route with a given host when resolving conflicts. // // Routers are subject to additional customization and may support additional controls via the // annotations field. // // Because administrators may configure multiple routers, the route status field is used to // return information to clients about the names and states of the route under each router. // If a client chooses a duplicate name, for instance, the route status conditions are used // to indicate the route cannot be chosen. // // To enable HTTP/2 ALPN on a route it requires a custom // (non-wildcard) certificate. This prevents connection coalescing by // clients, notably web browsers. We do not support HTTP/2 ALPN on // routes that use the default certificate because of the risk of // connection re-use/coalescing. Routes that do not have their own // custom certificate will not be HTTP/2 ALPN-enabled on either the // frontend or the backend. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Route struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is the desired state of the route Spec RouteSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // status is the current state of the route // +optional Status RouteStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RouteList is a collection of Routes. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type RouteList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // items is a list of routes Items []Route `json:"items" protobuf:"bytes,2,rep,name=items"` } // RouteSpec describes the hostname or path the route exposes, any security information, // and one to four backends (services) the route points to. Requests are distributed // among the backends depending on the weights assigned to each backend. When using // roundrobin scheduling the portion of requests that go to each backend is the backend // weight divided by the sum of all of the backend weights. When the backend has more than // one endpoint the requests that end up on the backend are roundrobin distributed among // the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests // to the backend. If all weights are zero the route will be considered to have no backends // and return a standard 503 response. // // The `tls` field is optional and allows specific certificates or behavior for the // route. Routers typically configure a default certificate on a wildcard domain to // terminate routes without explicit certificates, but custom hostnames usually must // choose passthrough (send traffic directly to the backend via the TLS Server-Name- // Indication field) or provide a certificate. type RouteSpec struct { // host is an alias/DNS that points to the service. Optional. // If not specified a route name will typically be automatically // chosen. // Must follow DNS952 subdomain conventions. // // +optional // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` // subdomain is a DNS subdomain that is requested within the ingress controller's // domain (as a subdomain). If host is set this field is ignored. An ingress // controller may choose to ignore this suggested name, in which case the controller // will report the assigned name in the status.ingress array or refuse to admit the // route. If this value is set and the server does not support this field host will // be populated automatically. Otherwise host is left empty. The field may have // multiple parts separated by a dot, but not all ingress controllers may honor // the request. This field may not be changed after creation except by a user with // the update routes/custom-host permission. // // Example: subdomain `frontend` automatically receives the router subdomain // `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. // // +optional // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,8,opt,name=subdomain"` // path that the router watches for, to route traffic for to the service. Optional // // +optional // +kubebuilder:validation:Pattern=`^/` Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // to is an object the route should use as the primary backend. Only the Service kind // is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) // is set to zero, no traffic will be sent to this backend. To RouteTargetReference `json:"to" protobuf:"bytes,3,opt,name=to"` // alternateBackends allows up to 3 additional backends to be assigned to the route. // Only the Service kind is allowed, and it will be defaulted to Service. // Use the weight field in RouteTargetReference object to specify relative preference. // // +kubebuilder:validation:MaxItems=3 AlternateBackends []RouteTargetReference `json:"alternateBackends,omitempty" protobuf:"bytes,4,rep,name=alternateBackends"` // If specified, the port to be used by the router. Most routers will use all // endpoints exposed by the service by default - set this value to instruct routers // which port to use. Port *RoutePort `json:"port,omitempty" protobuf:"bytes,5,opt,name=port"` // The tls field provides the ability to configure certificates and termination for the route. TLS *TLSConfig `json:"tls,omitempty" protobuf:"bytes,6,opt,name=tls"` // Wildcard policy if any for the route. // Currently only 'Subdomain' or 'None' is allowed. // // +kubebuilder:validation:Enum=None;Subdomain;"" // +kubebuilder:default=None WildcardPolicy WildcardPolicyType `json:"wildcardPolicy,omitempty" protobuf:"bytes,7,opt,name=wildcardPolicy"` } // RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' // kind is allowed. Use 'weight' field to emphasize one over others. type RouteTargetReference struct { // The kind of target that the route is referring to. Currently, only 'Service' is allowed // // +kubebuilder:validation:Enum=Service;"" // +kubebuilder:default=Service Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // name of the service/target that is being referred to. e.g. name of the service // // +kubebuilder:validation:MinLength=1 Name string `json:"name" protobuf:"bytes,2,opt,name=name"` // weight as an integer between 0 and 256, default 100, that specifies the target's relative weight // against other target reference objects. 0 suppresses requests to this backend. // // +optional // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=256 // +kubebuilder:default=100 Weight *int32 `json:"weight" protobuf:"varint,3,opt,name=weight"` } // RoutePort defines a port mapping from a router to an endpoint in the service endpoints. type RoutePort struct { // The target port on pods selected by the service this route points to. // If this is a string, it will be looked up as a named port in the target // endpoints port list. Required TargetPort intstr.IntOrString `json:"targetPort" protobuf:"bytes,1,opt,name=targetPort"` } // RouteStatus provides relevant info about the status of a route, including which routers // acknowledge it. type RouteStatus struct { // ingress describes the places where the route may be exposed. The list of // ingress points may contain duplicate Host or RouterName values. Routes // are considered live once they are `Ready` Ingress []RouteIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } // RouteIngress holds information about the places where a route is exposed. type RouteIngress struct { // Host is the host string under which the route is exposed; this value is required Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` // Name is a name chosen by the router to identify itself; this value is required RouterName string `json:"routerName,omitempty" protobuf:"bytes,2,opt,name=routerName"` // Conditions is the state of the route, may be empty. Conditions []RouteIngressCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` // Wildcard policy is the wildcard policy that was allowed where this route is exposed. WildcardPolicy WildcardPolicyType `json:"wildcardPolicy,omitempty" protobuf:"bytes,4,opt,name=wildcardPolicy"` // CanonicalHostname is the external host name for the router that can be used as a CNAME // for the host requested for this route. This value is optional and may not be set in all cases. RouterCanonicalHostname string `json:"routerCanonicalHostname,omitempty" protobuf:"bytes,5,opt,name=routerCanonicalHostname"` } // RouteIngressConditionType is a valid value for RouteCondition type RouteIngressConditionType string // These are valid conditions of pod. const ( // RouteAdmitted means the route is able to service requests for the provided Host RouteAdmitted RouteIngressConditionType = "Admitted" // TODO: add other route condition types ) // RouteIngressCondition contains details for the current condition of this route on a particular // router. type RouteIngressCondition struct { // Type is the type of the condition. // Currently only Admitted. Type RouteIngressConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RouteIngressConditionType"` // Status is the status of the condition. // Can be True, False, Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` // (brief) reason for the condition's last transition, and is usually a machine and human // readable constant Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` // Human readable message indicating details about last transition. Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` // RFC 3339 date and time when this condition last transitioned LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,5,opt,name=lastTransitionTime"` } // RouterShard has information of a routing shard and is used to // generate host names and routing table entries when a routing shard is // allocated for a specific route. // Caveat: This is WIP and will likely undergo modifications when sharding // support is added. type RouterShard struct { // shardName uniquely identifies a router shard in the "set" of // routers used for routing traffic to the services. ShardName string `json:"shardName" protobuf:"bytes,1,opt,name=shardName"` // dnsSuffix for the shard ala: shard-1.v3.openshift.com DNSSuffix string `json:"dnsSuffix" protobuf:"bytes,2,opt,name=dnsSuffix"` } // TLSConfig defines config used to secure a route and provide termination // // +kubebuilder:validation:XValidation:rule="has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination=='passthrough') && (self.insecureEdgeTerminationPolicy=='Allow')) : true", message="cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow" type TLSConfig struct { // termination indicates termination type. // // * edge - TLS termination is done by the router and http is used to communicate with the backend (default) // * passthrough - Traffic is sent straight to the destination without the router providing TLS termination // * reencrypt - TLS termination is done by the router and https is used to communicate with the backend // // +kubebuilder:validation:Enum=edge;reencrypt;passthrough Termination TLSTerminationType `json:"termination" protobuf:"bytes,1,opt,name=termination,casttype=TLSTerminationType"` // certificate provides certificate contents. This should be a single serving certificate, not a certificate // chain. Do not include a CA certificate. Certificate string `json:"certificate,omitempty" protobuf:"bytes,2,opt,name=certificate"` // key provides key file contents Key string `json:"key,omitempty" protobuf:"bytes,3,opt,name=key"` // caCertificate provides the cert authority certificate contents CACertificate string `json:"caCertificate,omitempty" protobuf:"bytes,4,opt,name=caCertificate"` // destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt // termination this file should be provided in order to have routers use it for health checks on the secure connection. // If this field is not specified, the router may provide its own destination CA and perform hostname validation using // the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically // verify. DestinationCACertificate string `json:"destinationCACertificate,omitempty" protobuf:"bytes,5,opt,name=destinationCACertificate"` // insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While // each router may make its own decisions on which ports to expose, this is normally port 80. // // * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only) (default). // * None - no traffic is allowed on the insecure port. // * Redirect - clients are redirected to the secure port. // // +kubebuilder:validation:Enum=Allow;None;Redirect;"" InsecureEdgeTerminationPolicy InsecureEdgeTerminationPolicyType `json:"insecureEdgeTerminationPolicy,omitempty" protobuf:"bytes,6,opt,name=insecureEdgeTerminationPolicy,casttype=InsecureEdgeTerminationPolicyType"` } // TLSTerminationType dictates where the secure communication will stop // TODO: Reconsider this type in v2 type TLSTerminationType string // InsecureEdgeTerminationPolicyType dictates the behavior of insecure // connections to an edge-terminated route. type InsecureEdgeTerminationPolicyType string const ( // TLSTerminationEdge terminate encryption at the edge router. TLSTerminationEdge TLSTerminationType = "edge" // TLSTerminationPassthrough terminate encryption at the destination, the destination is responsible for decrypting traffic TLSTerminationPassthrough TLSTerminationType = "passthrough" // TLSTerminationReencrypt terminate encryption at the edge router and re-encrypt it with a new certificate supplied by the destination TLSTerminationReencrypt TLSTerminationType = "reencrypt" // InsecureEdgeTerminationPolicyNone disables insecure connections for an edge-terminated route. InsecureEdgeTerminationPolicyNone InsecureEdgeTerminationPolicyType = "None" // InsecureEdgeTerminationPolicyAllow allows insecure connections for an edge-terminated route. InsecureEdgeTerminationPolicyAllow InsecureEdgeTerminationPolicyType = "Allow" // InsecureEdgeTerminationPolicyRedirect redirects insecure connections for an edge-terminated route. // As an example, for routers that support HTTP and HTTPS, the // insecure HTTP connections will be redirected to use HTTPS. InsecureEdgeTerminationPolicyRedirect InsecureEdgeTerminationPolicyType = "Redirect" ) // WildcardPolicyType indicates the type of wildcard support needed by routes. type WildcardPolicyType string const ( // WildcardPolicyNone indicates no wildcard support is needed. WildcardPolicyNone WildcardPolicyType = "None" // WildcardPolicySubdomain indicates the host needs wildcard support for the subdomain. // Example: For host = "www.acme.test", indicates that the router // should support requests for *.acme.test // Note that this will not match acme.test only *.acme.test WildcardPolicySubdomain WildcardPolicyType = "Subdomain" ) // Route Annotations const ( // AllowNonDNSCompliantHostAnnotation indicates that the host name in a route // configuration is not required to follow strict DNS compliance. // Unless the annotation is set to true, the route host name must have at least one label. // Labels must have no more than 63 characters from the set of // alphanumeric characters, '-' or '.', and must start and end with an alphanumeric // character. A trailing dot is not allowed. The total host name length must be no more // than 253 characters. // // When the annotation is set to true, the host name must pass a smaller set of // requirements, i.e.: character set as described above, and total host name // length must be no more than 253 characters. // // NOTE: use of this annotation may validate routes that cannot be admitted and will // not function. The annotation is provided to allow a custom scenario, e.g. a custom // ingress controller that relies on the route API, but for some customized purpose // needs to use routes with invalid hosts. AllowNonDNSCompliantHostAnnotation = "route.openshift.io/allow-non-dns-compliant-host" ) // Ingress-to-route controller const ( // IngressToRouteIngressClassControllerName is the name of the // controller that translates ingresses into routes. This value is // intended to be used for the spec.controller field of ingressclasses. IngressToRouteIngressClassControllerName = "openshift.io/ingress-to-route" )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/route/v1/register.go
vendor/github.com/openshift/api/route/v1/register.go
package v1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( GroupName = "route.openshift.io" GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) // Install is a function which adds this version to a scheme Install = schemeBuilder.AddToScheme // SchemeGroupVersion generated code relies on this name // Deprecated SchemeGroupVersion = GroupVersion // AddToScheme exists solely to keep the old generators creating valid code // DEPRECATED AddToScheme = schemeBuilder.AddToScheme ) // Resource generated code relies on this being here, but it logically belongs to the group // DEPRECATED func Resource(resource string) schema.GroupResource { return schema.GroupResource{Group: GroupName, Resource: resource} } // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(GroupVersion, &Route{}, &RouteList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go
vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go
package v1 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more // information on the implementation: https://github.com/emicklei/go-restful/pull/215 // // TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE var map_Route = map[string]string{ "": "A route allows developers to expose services through an HTTP(S) aware load balancing and proxy layer via a public DNS entry. The route may further specify TLS options and a certificate, or specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An administrator typically configures their router to be visible outside the cluster firewall, and may also add additional security, caching, or traffic controls on the service content. Routers usually talk directly to the service endpoints.\n\nOnce a route is created, the `host` field may not be changed. Generally, routers use the oldest route with a given host when resolving conflicts.\n\nRouters are subject to additional customization and may support additional controls via the annotations field.\n\nBecause administrators may configure multiple routers, the route status field is used to return information to clients about the names and states of the route under each router. If a client chooses a duplicate name, for instance, the route status conditions are used to indicate the route cannot be chosen.\n\nTo enable HTTP/2 ALPN on a route it requires a custom (non-wildcard) certificate. This prevents connection coalescing by clients, notably web browsers. We do not support HTTP/2 ALPN on routes that use the default certificate because of the risk of connection re-use/coalescing. Routes that do not have their own custom certificate will not be HTTP/2 ALPN-enabled on either the frontend or the backend.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec is the desired state of the route", "status": "status is the current state of the route", } func (Route) SwaggerDoc() map[string]string { return map_Route } var map_RouteIngress = map[string]string{ "": "RouteIngress holds information about the places where a route is exposed.", "host": "Host is the host string under which the route is exposed; this value is required", "routerName": "Name is a name chosen by the router to identify itself; this value is required", "conditions": "Conditions is the state of the route, may be empty.", "wildcardPolicy": "Wildcard policy is the wildcard policy that was allowed where this route is exposed.", "routerCanonicalHostname": "CanonicalHostname is the external host name for the router that can be used as a CNAME for the host requested for this route. This value is optional and may not be set in all cases.", } func (RouteIngress) SwaggerDoc() map[string]string { return map_RouteIngress } var map_RouteIngressCondition = map[string]string{ "": "RouteIngressCondition contains details for the current condition of this route on a particular router.", "type": "Type is the type of the condition. Currently only Admitted.", "status": "Status is the status of the condition. Can be True, False, Unknown.", "reason": "(brief) reason for the condition's last transition, and is usually a machine and human readable constant", "message": "Human readable message indicating details about last transition.", "lastTransitionTime": "RFC 3339 date and time when this condition last transitioned", } func (RouteIngressCondition) SwaggerDoc() map[string]string { return map_RouteIngressCondition } var map_RouteList = map[string]string{ "": "RouteList is a collection of Routes.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is a list of routes", } func (RouteList) SwaggerDoc() map[string]string { return map_RouteList } var map_RoutePort = map[string]string{ "": "RoutePort defines a port mapping from a router to an endpoint in the service endpoints.", "targetPort": "The target port on pods selected by the service this route points to. If this is a string, it will be looked up as a named port in the target endpoints port list. Required", } func (RoutePort) SwaggerDoc() map[string]string { return map_RoutePort } var map_RouteSpec = map[string]string{ "": "RouteSpec describes the hostname or path the route exposes, any security information, and one to four backends (services) the route points to. Requests are distributed among the backends depending on the weights assigned to each backend. When using roundrobin scheduling the portion of requests that go to each backend is the backend weight divided by the sum of all of the backend weights. When the backend has more than one endpoint the requests that end up on the backend are roundrobin distributed among the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests to the backend. If all weights are zero the route will be considered to have no backends and return a standard 503 response.\n\nThe `tls` field is optional and allows specific certificates or behavior for the route. Routers typically configure a default certificate on a wildcard domain to terminate routes without explicit certificates, but custom hostnames usually must choose passthrough (send traffic directly to the backend via the TLS Server-Name- Indication field) or provide a certificate.", "host": "host is an alias/DNS that points to the service. Optional. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions.", "subdomain": "subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). If host is set this field is ignored. An ingress controller may choose to ignore this suggested name, in which case the controller will report the assigned name in the status.ingress array or refuse to admit the route. If this value is set and the server does not support this field host will be populated automatically. Otherwise host is left empty. The field may have multiple parts separated by a dot, but not all ingress controllers may honor the request. This field may not be changed after creation except by a user with the update routes/custom-host permission.\n\nExample: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`.", "path": "path that the router watches for, to route traffic for to the service. Optional", "to": "to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) is set to zero, no traffic will be sent to this backend.", "alternateBackends": "alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference.", "port": "If specified, the port to be used by the router. Most routers will use all endpoints exposed by the service by default - set this value to instruct routers which port to use.", "tls": "The tls field provides the ability to configure certificates and termination for the route.", "wildcardPolicy": "Wildcard policy if any for the route. Currently only 'Subdomain' or 'None' is allowed.", } func (RouteSpec) SwaggerDoc() map[string]string { return map_RouteSpec } var map_RouteStatus = map[string]string{ "": "RouteStatus provides relevant info about the status of a route, including which routers acknowledge it.", "ingress": "ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready`", } func (RouteStatus) SwaggerDoc() map[string]string { return map_RouteStatus } var map_RouteTargetReference = map[string]string{ "": "RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others.", "kind": "The kind of target that the route is referring to. Currently, only 'Service' is allowed", "name": "name of the service/target that is being referred to. e.g. name of the service", "weight": "weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend.", } func (RouteTargetReference) SwaggerDoc() map[string]string { return map_RouteTargetReference } var map_RouterShard = map[string]string{ "": "RouterShard has information of a routing shard and is used to generate host names and routing table entries when a routing shard is allocated for a specific route. Caveat: This is WIP and will likely undergo modifications when sharding support is added.", "shardName": "shardName uniquely identifies a router shard in the \"set\" of routers used for routing traffic to the services.", "dnsSuffix": "dnsSuffix for the shard ala: shard-1.v3.openshift.com", } func (RouterShard) SwaggerDoc() map[string]string { return map_RouterShard } var map_TLSConfig = map[string]string{ "": "TLSConfig defines config used to secure a route and provide termination", "termination": "termination indicates termination type.\n\n* edge - TLS termination is done by the router and http is used to communicate with the backend (default) * passthrough - Traffic is sent straight to the destination without the router providing TLS termination * reencrypt - TLS termination is done by the router and https is used to communicate with the backend", "certificate": "certificate provides certificate contents. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate.", "key": "key provides key file contents", "caCertificate": "caCertificate provides the cert authority certificate contents", "destinationCACertificate": "destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify.", "insecureEdgeTerminationPolicy": "insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80.\n\n* Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only) (default). * None - no traffic is allowed on the insecure port. * Redirect - clients are redirected to the secure port.", } func (TLSConfig) SwaggerDoc() map[string]string { return map_TLSConfig } // AUTO-GENERATED FUNCTIONS END HERE
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/route/v1/doc.go
vendor/github.com/openshift/api/route/v1/doc.go
// +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=github.com/openshift/origin/pkg/route/apis/route // +k8s:defaulter-gen=TypeMeta // +k8s:openapi-gen=true // +groupName=route.openshift.io // Package v1 is the v1 version of the API. package v1
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/route/v1/generated.pb.go
vendor/github.com/openshift/api/route/v1/generated.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/openshift/api/route/v1/generated.proto package v1 import ( fmt "fmt" io "io" proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" reflect "reflect" strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Route) Reset() { *m = Route{} } func (*Route) ProtoMessage() {} func (*Route) Descriptor() ([]byte, []int) { return fileDescriptor_373b8fa7ff738721, []int{0} } func (m *Route) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *Route) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *Route) XXX_Merge(src proto.Message) { xxx_messageInfo_Route.Merge(m, src) } func (m *Route) XXX_Size() int { return m.Size() } func (m *Route) XXX_DiscardUnknown() { xxx_messageInfo_Route.DiscardUnknown(m) } var xxx_messageInfo_Route proto.InternalMessageInfo func (m *RouteIngress) Reset() { *m = RouteIngress{} } func (*RouteIngress) ProtoMessage() {} func (*RouteIngress) Descriptor() ([]byte, []int) { return fileDescriptor_373b8fa7ff738721, []int{1} } func (m *RouteIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RouteIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *RouteIngress) XXX_Merge(src proto.Message) { xxx_messageInfo_RouteIngress.Merge(m, src) } func (m *RouteIngress) XXX_Size() int { return m.Size() } func (m *RouteIngress) XXX_DiscardUnknown() { xxx_messageInfo_RouteIngress.DiscardUnknown(m) } var xxx_messageInfo_RouteIngress proto.InternalMessageInfo func (m *RouteIngressCondition) Reset() { *m = RouteIngressCondition{} } func (*RouteIngressCondition) ProtoMessage() {} func (*RouteIngressCondition) Descriptor() ([]byte, []int) { return fileDescriptor_373b8fa7ff738721, []int{2} } func (m *RouteIngressCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RouteIngressCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *RouteIngressCondition) XXX_Merge(src proto.Message) { xxx_messageInfo_RouteIngressCondition.Merge(m, src) } func (m *RouteIngressCondition) XXX_Size() int { return m.Size() } func (m *RouteIngressCondition) XXX_DiscardUnknown() { xxx_messageInfo_RouteIngressCondition.DiscardUnknown(m) } var xxx_messageInfo_RouteIngressCondition proto.InternalMessageInfo func (m *RouteList) Reset() { *m = RouteList{} } func (*RouteList) ProtoMessage() {} func (*RouteList) Descriptor() ([]byte, []int) { return fileDescriptor_373b8fa7ff738721, []int{3} } func (m *RouteList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RouteList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *RouteList) XXX_Merge(src proto.Message) { xxx_messageInfo_RouteList.Merge(m, src) } func (m *RouteList) XXX_Size() int { return m.Size() } func (m *RouteList) XXX_DiscardUnknown() { xxx_messageInfo_RouteList.DiscardUnknown(m) } var xxx_messageInfo_RouteList proto.InternalMessageInfo func (m *RoutePort) Reset() { *m = RoutePort{} } func (*RoutePort) ProtoMessage() {} func (*RoutePort) Descriptor() ([]byte, []int) { return fileDescriptor_373b8fa7ff738721, []int{4} } func (m *RoutePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RoutePort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *RoutePort) XXX_Merge(src proto.Message) { xxx_messageInfo_RoutePort.Merge(m, src) } func (m *RoutePort) XXX_Size() int { return m.Size() } func (m *RoutePort) XXX_DiscardUnknown() { xxx_messageInfo_RoutePort.DiscardUnknown(m) } var xxx_messageInfo_RoutePort proto.InternalMessageInfo func (m *RouteSpec) Reset() { *m = RouteSpec{} } func (*RouteSpec) ProtoMessage() {} func (*RouteSpec) Descriptor() ([]byte, []int) { return fileDescriptor_373b8fa7ff738721, []int{5} } func (m *RouteSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RouteSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *RouteSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_RouteSpec.Merge(m, src) } func (m *RouteSpec) XXX_Size() int { return m.Size() } func (m *RouteSpec) XXX_DiscardUnknown() { xxx_messageInfo_RouteSpec.DiscardUnknown(m) } var xxx_messageInfo_RouteSpec proto.InternalMessageInfo func (m *RouteStatus) Reset() { *m = RouteStatus{} } func (*RouteStatus) ProtoMessage() {} func (*RouteStatus) Descriptor() ([]byte, []int) { return fileDescriptor_373b8fa7ff738721, []int{6} } func (m *RouteStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RouteStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *RouteStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_RouteStatus.Merge(m, src) } func (m *RouteStatus) XXX_Size() int { return m.Size() } func (m *RouteStatus) XXX_DiscardUnknown() { xxx_messageInfo_RouteStatus.DiscardUnknown(m) } var xxx_messageInfo_RouteStatus proto.InternalMessageInfo func (m *RouteTargetReference) Reset() { *m = RouteTargetReference{} } func (*RouteTargetReference) ProtoMessage() {} func (*RouteTargetReference) Descriptor() ([]byte, []int) { return fileDescriptor_373b8fa7ff738721, []int{7} } func (m *RouteTargetReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RouteTargetReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *RouteTargetReference) XXX_Merge(src proto.Message) { xxx_messageInfo_RouteTargetReference.Merge(m, src) } func (m *RouteTargetReference) XXX_Size() int { return m.Size() } func (m *RouteTargetReference) XXX_DiscardUnknown() { xxx_messageInfo_RouteTargetReference.DiscardUnknown(m) } var xxx_messageInfo_RouteTargetReference proto.InternalMessageInfo func (m *RouterShard) Reset() { *m = RouterShard{} } func (*RouterShard) ProtoMessage() {} func (*RouterShard) Descriptor() ([]byte, []int) { return fileDescriptor_373b8fa7ff738721, []int{8} } func (m *RouterShard) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RouterShard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *RouterShard) XXX_Merge(src proto.Message) { xxx_messageInfo_RouterShard.Merge(m, src) } func (m *RouterShard) XXX_Size() int { return m.Size() } func (m *RouterShard) XXX_DiscardUnknown() { xxx_messageInfo_RouterShard.DiscardUnknown(m) } var xxx_messageInfo_RouterShard proto.InternalMessageInfo func (m *TLSConfig) Reset() { *m = TLSConfig{} } func (*TLSConfig) ProtoMessage() {} func (*TLSConfig) Descriptor() ([]byte, []int) { return fileDescriptor_373b8fa7ff738721, []int{9} } func (m *TLSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TLSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *TLSConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_TLSConfig.Merge(m, src) } func (m *TLSConfig) XXX_Size() int { return m.Size() } func (m *TLSConfig) XXX_DiscardUnknown() { xxx_messageInfo_TLSConfig.DiscardUnknown(m) } var xxx_messageInfo_TLSConfig proto.InternalMessageInfo func init() { proto.RegisterType((*Route)(nil), "github.com.openshift.api.route.v1.Route") proto.RegisterType((*RouteIngress)(nil), "github.com.openshift.api.route.v1.RouteIngress") proto.RegisterType((*RouteIngressCondition)(nil), "github.com.openshift.api.route.v1.RouteIngressCondition") proto.RegisterType((*RouteList)(nil), "github.com.openshift.api.route.v1.RouteList") proto.RegisterType((*RoutePort)(nil), "github.com.openshift.api.route.v1.RoutePort") proto.RegisterType((*RouteSpec)(nil), "github.com.openshift.api.route.v1.RouteSpec") proto.RegisterType((*RouteStatus)(nil), "github.com.openshift.api.route.v1.RouteStatus") proto.RegisterType((*RouteTargetReference)(nil), "github.com.openshift.api.route.v1.RouteTargetReference") proto.RegisterType((*RouterShard)(nil), "github.com.openshift.api.route.v1.RouterShard") proto.RegisterType((*TLSConfig)(nil), "github.com.openshift.api.route.v1.TLSConfig") } func init() { proto.RegisterFile("github.com/openshift/api/route/v1/generated.proto", fileDescriptor_373b8fa7ff738721) } var fileDescriptor_373b8fa7ff738721 = []byte{ // 1168 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, 0x17, 0xcf, 0xfa, 0x57, 0xe2, 0x71, 0xdb, 0xef, 0xb7, 0x03, 0xa5, 0x6e, 0xa4, 0xd8, 0xc9, 0x1e, 0x50, 0x8a, 0xca, 0x2e, 0x09, 0x05, 0x2a, 0x21, 0x0e, 0x75, 0x8a, 0x4a, 0x1a, 0x27, 0x8d, 0xc6, 0x16, 0x15, 0x55, 0x0f, 0x4c, 0x76, 0xc7, 0xeb, 0xc1, 0xf6, 0xec, 0x32, 0x33, 0x4e, 0xf1, 0x05, 0x55, 0xe2, 0x1f, 0x28, 0xff, 0x0d, 0x77, 0x2e, 0x39, 0xf6, 0xd8, 0x03, 0xb2, 0x88, 0x39, 0xf2, 0x1f, 0xe4, 0x84, 0x66, 0x76, 0xec, 0x5d, 0x3b, 0x4e, 0xe2, 0xc2, 0xcd, 0xfb, 0xde, 0xe7, 0xf3, 0x79, 0x6f, 0xde, 0x7b, 0xf3, 0x46, 0x06, 0x5b, 0x01, 0x95, 0xed, 0xfe, 0x91, 0xe3, 0x85, 0x3d, 0x37, 0x8c, 0x08, 0x13, 0x6d, 0xda, 0x92, 0x2e, 0x8e, 0xa8, 0xcb, 0xc3, 0xbe, 0x24, 0xee, 0xf1, 0x96, 0x1b, 0x10, 0x46, 0x38, 0x96, 0xc4, 0x77, 0x22, 0x1e, 0xca, 0x10, 0x6e, 0x24, 0x14, 0x67, 0x42, 0x71, 0x70, 0x44, 0x1d, 0x4d, 0x71, 0x8e, 0xb7, 0x56, 0x3f, 0x4e, 0xa9, 0x06, 0x61, 0x10, 0xba, 0x9a, 0x79, 0xd4, 0x6f, 0xe9, 0x2f, 0xfd, 0xa1, 0x7f, 0xc5, 0x8a, 0xab, 0x76, 0xe7, 0x81, 0x70, 0x68, 0xa8, 0xc3, 0x7a, 0x21, 0x9f, 0x17, 0x75, 0xf5, 0x7e, 0x82, 0xe9, 0x61, 0xaf, 0x4d, 0x19, 0xe1, 0x03, 0x37, 0xea, 0x04, 0xca, 0x20, 0xdc, 0x1e, 0x91, 0x78, 0x1e, 0xeb, 0xf3, 0x8b, 0x58, 0xbc, 0xcf, 0x24, 0xed, 0x11, 0x57, 0x78, 0x6d, 0xd2, 0xc3, 0xe7, 0x78, 0x9f, 0x5e, 0xc4, 0xeb, 0x4b, 0xda, 0x75, 0x29, 0x93, 0x42, 0xf2, 0x59, 0x92, 0xfd, 0x6b, 0x06, 0xe4, 0x91, 0x2a, 0x01, 0xfc, 0x1e, 0xac, 0xa8, 0x8c, 0x7c, 0x2c, 0x71, 0xd9, 0x5a, 0xb7, 0x36, 0x4b, 0xdb, 0x9f, 0x38, 0xb1, 0xa2, 0x93, 0x56, 0x74, 0xa2, 0x4e, 0xa0, 0x0c, 0xc2, 0x51, 0x68, 0xe7, 0x78, 0xcb, 0x79, 0x7a, 0xf4, 0x03, 0xf1, 0xe4, 0x3e, 0x91, 0xb8, 0x06, 0x4f, 0x86, 0xd5, 0xa5, 0xd1, 0xb0, 0x0a, 0x12, 0x1b, 0x9a, 0xa8, 0xc2, 0x03, 0x90, 0x13, 0x11, 0xf1, 0xca, 0x19, 0xad, 0x7e, 0xcf, 0xb9, 0xb2, 0x27, 0x8e, 0xce, 0xac, 0x11, 0x11, 0xaf, 0x76, 0xcd, 0x28, 0xe7, 0xd4, 0x17, 0xd2, 0x3a, 0xf0, 0x5b, 0x50, 0x10, 0x12, 0xcb, 0xbe, 0x28, 0x67, 0xb5, 0xa2, 0xb3, 0xb0, 0xa2, 0x66, 0xd5, 0x6e, 0x18, 0xcd, 0x42, 0xfc, 0x8d, 0x8c, 0x9a, 0xfd, 0x4b, 0x16, 0x5c, 0xd3, 0xb8, 0x5d, 0x16, 0x70, 0x22, 0x04, 0x5c, 0x07, 0xb9, 0x76, 0x28, 0xa4, 0x2e, 0x4b, 0x31, 0x49, 0xe5, 0x9b, 0x50, 0x48, 0xa4, 0x3d, 0x70, 0x1b, 0x00, 0x1d, 0x82, 0x1f, 0xe0, 0x1e, 0xd1, 0x07, 0x2c, 0x26, 0xc5, 0x40, 0x13, 0x0f, 0x4a, 0xa1, 0x60, 0x17, 0x00, 0x2f, 0x64, 0x3e, 0x95, 0x34, 0x64, 0xea, 0x08, 0xd9, 0xcd, 0xd2, 0xf6, 0x83, 0x45, 0x8f, 0x60, 0x52, 0xdb, 0x19, 0x0b, 0x24, 0xd1, 0x26, 0x26, 0x81, 0x52, 0xfa, 0xb0, 0x09, 0x6e, 0xbc, 0xa4, 0x5d, 0xdf, 0xc3, 0xdc, 0x3f, 0x0c, 0xbb, 0xd4, 0x1b, 0x94, 0x73, 0x3a, 0xcb, 0x7b, 0x86, 0x77, 0xe3, 0xd9, 0x94, 0xf7, 0x6c, 0x58, 0x85, 0xd3, 0x96, 0xe6, 0x20, 0x22, 0x68, 0x46, 0x03, 0x7e, 0x07, 0x6e, 0xc7, 0x27, 0xda, 0xc1, 0x2c, 0x64, 0xd4, 0xc3, 0x5d, 0x55, 0x14, 0xa6, 0x8a, 0x90, 0xd7, 0xf2, 0x55, 0x23, 0x7f, 0x1b, 0xcd, 0x87, 0xa1, 0x8b, 0xf8, 0xf6, 0xdf, 0x19, 0x70, 0x6b, 0xee, 0x51, 0xe1, 0x57, 0x20, 0x27, 0x07, 0x11, 0x31, 0xed, 0xb8, 0x3b, 0x6e, 0x87, 0x4a, 0xf0, 0x6c, 0x58, 0xbd, 0x33, 0x97, 0xa4, 0xb3, 0xd7, 0x34, 0x58, 0x9f, 0x8c, 0x4d, 0xdc, 0xa7, 0xfb, 0xd3, 0x63, 0x70, 0x36, 0xac, 0xce, 0xb9, 0xdb, 0xce, 0x44, 0x69, 0x7a, 0x58, 0xe0, 0x87, 0xa0, 0xc0, 0x09, 0x16, 0x21, 0xd3, 0x43, 0x58, 0x4c, 0x86, 0x0a, 0x69, 0x2b, 0x32, 0x5e, 0x78, 0x17, 0x2c, 0xf7, 0x88, 0x10, 0x38, 0x20, 0xa6, 0xf0, 0xff, 0x33, 0xc0, 0xe5, 0xfd, 0xd8, 0x8c, 0xc6, 0x7e, 0xc8, 0x01, 0xec, 0x62, 0x21, 0x9b, 0x1c, 0x33, 0x11, 0x27, 0x4f, 0x4d, 0x3d, 0x4b, 0xdb, 0x1f, 0x2d, 0x76, 0x27, 0x15, 0xa3, 0xf6, 0xc1, 0x68, 0x58, 0x85, 0xf5, 0x73, 0x4a, 0x68, 0x8e, 0xba, 0xfd, 0x9b, 0x05, 0x8a, 0xba, 0x70, 0x75, 0x2a, 0x24, 0x7c, 0x71, 0x6e, 0x17, 0x38, 0x8b, 0xc5, 0x55, 0x6c, 0xbd, 0x09, 0xfe, 0x6f, 0x4e, 0xb7, 0x32, 0xb6, 0xa4, 0xf6, 0xc0, 0x3e, 0xc8, 0x53, 0x49, 0x7a, 0xaa, 0xfe, 0x6a, 0xe6, 0x37, 0x17, 0x9d, 0xf9, 0xda, 0x75, 0x23, 0x9a, 0xdf, 0x55, 0x74, 0x14, 0xab, 0xd8, 0x3f, 0x9a, 0xcc, 0x0f, 0x43, 0x2e, 0xa1, 0x0f, 0x80, 0xc4, 0x3c, 0x20, 0x52, 0x7d, 0x5d, 0xb9, 0xc7, 0xd4, 0x66, 0x74, 0xe2, 0xcd, 0xe8, 0xec, 0x32, 0xf9, 0x94, 0x37, 0x24, 0xa7, 0x2c, 0x48, 0x2e, 0x53, 0x73, 0xa2, 0x85, 0x52, 0xba, 0xf6, 0xef, 0x39, 0x13, 0x53, 0x6d, 0xa3, 0x05, 0xd6, 0x83, 0x0b, 0x8a, 0xa2, 0x7f, 0xe4, 0x87, 0x3d, 0x4c, 0x59, 0x79, 0x45, 0xc3, 0x6e, 0x1a, 0x58, 0xb1, 0x31, 0x76, 0xa0, 0x04, 0xa3, 0x24, 0x23, 0x2c, 0xdb, 0x66, 0x42, 0x27, 0x92, 0x87, 0x58, 0xb6, 0x91, 0xf6, 0xc0, 0x06, 0xc8, 0xc8, 0xd0, 0x2c, 0xbe, 0x2f, 0x16, 0xad, 0x60, 0x7c, 0x1c, 0x44, 0x5a, 0x84, 0x13, 0xe6, 0x91, 0x1a, 0x30, 0xc2, 0x99, 0x66, 0x88, 0x32, 0x32, 0x84, 0xaf, 0x2c, 0x70, 0x13, 0x77, 0x25, 0xe1, 0x0c, 0x4b, 0x52, 0xc3, 0x5e, 0x87, 0x30, 0x5f, 0x94, 0x73, 0xba, 0x4d, 0xff, 0x3a, 0xc8, 0x1d, 0x13, 0xe4, 0xe6, 0xc3, 0x59, 0x65, 0x74, 0x3e, 0x18, 0x7c, 0x02, 0x72, 0x91, 0x6a, 0x5d, 0xfe, 0xdd, 0x1e, 0x09, 0xd5, 0x96, 0xda, 0x8a, 0xae, 0x91, 0x6a, 0x96, 0xd6, 0x80, 0x8f, 0x41, 0x56, 0x76, 0x45, 0xb9, 0xb0, 0xb0, 0x54, 0xb3, 0xde, 0xd8, 0x09, 0x59, 0x8b, 0x06, 0xb5, 0xe5, 0xd1, 0xb0, 0x9a, 0x6d, 0xd6, 0x1b, 0x48, 0x29, 0xcc, 0x59, 0x9e, 0xcb, 0xff, 0x7d, 0x79, 0xda, 0x14, 0x94, 0x52, 0xcf, 0x11, 0x7c, 0x0e, 0x96, 0x69, 0xbc, 0xb5, 0xca, 0x96, 0xae, 0xb8, 0xfb, 0x8e, 0x8f, 0x41, 0xb2, 0x52, 0x8c, 0x01, 0x8d, 0x05, 0xed, 0x9f, 0xc1, 0xfb, 0xf3, 0x7a, 0xa3, 0xe6, 0xac, 0x43, 0x99, 0x3f, 0x3b, 0xba, 0x7b, 0x94, 0xf9, 0x48, 0x7b, 0x14, 0x82, 0x25, 0x6f, 0xda, 0x04, 0xa1, 0x5f, 0x33, 0xed, 0x81, 0x36, 0x28, 0xbc, 0x24, 0x34, 0x68, 0x4b, 0x3d, 0x8d, 0xf9, 0x1a, 0x50, 0xdb, 0xef, 0x99, 0xb6, 0x20, 0xe3, 0xb1, 0x43, 0x73, 0x54, 0xde, 0x68, 0x63, 0xee, 0xeb, 0xfb, 0xa0, 0x7e, 0xe8, 0xd7, 0xd2, 0x9a, 0xb9, 0x0f, 0x63, 0x07, 0x4a, 0x30, 0x8a, 0xe0, 0x33, 0xd1, 0xe8, 0xb7, 0x5a, 0xf4, 0x27, 0x93, 0xca, 0x84, 0xf0, 0xe8, 0xa0, 0x11, 0x3b, 0x50, 0x82, 0xb1, 0xff, 0xc8, 0x82, 0xe2, 0xa4, 0x9b, 0x70, 0x0f, 0x94, 0x24, 0xe1, 0x3d, 0xca, 0xb0, 0x5a, 0x78, 0x33, 0x0f, 0x47, 0xa9, 0x99, 0xb8, 0x54, 0xe7, 0x9a, 0xf5, 0x46, 0xca, 0xa2, 0x3b, 0x97, 0x66, 0xc3, 0xcf, 0x40, 0xc9, 0x23, 0x5c, 0xd2, 0x16, 0xf5, 0xb0, 0x1c, 0x17, 0xe6, 0xbd, 0xb1, 0xd8, 0x4e, 0xe2, 0x42, 0x69, 0x1c, 0x5c, 0x03, 0xd9, 0x0e, 0x19, 0x98, 0x57, 0xa2, 0x64, 0xe0, 0xd9, 0x3d, 0x32, 0x40, 0xca, 0x0e, 0xbf, 0x04, 0xd7, 0x3d, 0x9c, 0x22, 0x9b, 0x57, 0xe2, 0x96, 0x01, 0x5e, 0xdf, 0x79, 0x98, 0x56, 0x9e, 0xc6, 0xc2, 0x17, 0xa0, 0xec, 0x13, 0x21, 0x4d, 0x86, 0x53, 0x50, 0xf3, 0x0e, 0xaf, 0x1b, 0x9d, 0xf2, 0xa3, 0x0b, 0x70, 0xe8, 0x42, 0x05, 0xf8, 0xda, 0x02, 0x6b, 0x94, 0x09, 0xe2, 0xf5, 0x39, 0xf9, 0xda, 0x0f, 0x48, 0xaa, 0x3a, 0xe6, 0x36, 0x14, 0x74, 0x8c, 0x27, 0x26, 0xc6, 0xda, 0xee, 0x65, 0xe0, 0xb3, 0x61, 0x75, 0xe3, 0x52, 0x80, 0xae, 0xf8, 0xe5, 0x01, 0x6b, 0x8f, 0x4f, 0x4e, 0x2b, 0x4b, 0x6f, 0x4e, 0x2b, 0x4b, 0x6f, 0x4f, 0x2b, 0x4b, 0xaf, 0x46, 0x15, 0xeb, 0x64, 0x54, 0xb1, 0xde, 0x8c, 0x2a, 0xd6, 0xdb, 0x51, 0xc5, 0xfa, 0x73, 0x54, 0xb1, 0x5e, 0xff, 0x55, 0x59, 0x7a, 0xbe, 0x71, 0xe5, 0x1f, 0x85, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x8e, 0xb3, 0x95, 0x4c, 0x0c, 0x00, 0x00, } func (m *Route) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Route) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Route) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *RouteIngress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RouteIngress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *RouteIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l i -= len(m.RouterCanonicalHostname) copy(dAtA[i:], m.RouterCanonicalHostname) i = encodeVarintGenerated(dAtA, i, uint64(len(m.RouterCanonicalHostname))) i-- dAtA[i] = 0x2a i -= len(m.WildcardPolicy) copy(dAtA[i:], m.WildcardPolicy) i = encodeVarintGenerated(dAtA, i, uint64(len(m.WildcardPolicy))) i-- dAtA[i] = 0x22 if len(m.Conditions) > 0 { for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } i -= len(m.RouterName) copy(dAtA[i:], m.RouterName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.RouterName))) i-- dAtA[i] = 0x12 i -= len(m.Host) copy(dAtA[i:], m.Host) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *RouteIngressCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RouteIngressCondition) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *RouteIngressCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.LastTransitionTime != nil { { size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x2a } i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) i-- dAtA[i] = 0x22 i -= len(m.Reason) copy(dAtA[i:], m.Reason) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) i-- dAtA[i] = 0x1a i -= len(m.Status) copy(dAtA[i:], m.Status) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) i-- dAtA[i] = 0x12 i -= len(m.Type) copy(dAtA[i:], m.Type) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *RouteList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RouteList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *RouteList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Items) > 0 { for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } } { size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *RoutePort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RoutePort) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *RoutePort) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { size, err := m.TargetPort.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *RouteSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RouteSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *RouteSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l i -= len(m.Subdomain) copy(dAtA[i:], m.Subdomain) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) i-- dAtA[i] = 0x42 i -= len(m.WildcardPolicy) copy(dAtA[i:], m.WildcardPolicy) i = encodeVarintGenerated(dAtA, i, uint64(len(m.WildcardPolicy))) i-- dAtA[i] = 0x3a if m.TLS != nil { { size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x32 } if m.Port != nil { { size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x2a } if len(m.AlternateBackends) > 0 { for iNdEx := len(m.AlternateBackends) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.AlternateBackends[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 } } { size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a i -= len(m.Path) copy(dAtA[i:], m.Path) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) i-- dAtA[i] = 0x12 i -= len(m.Host) copy(dAtA[i:], m.Host) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *RouteStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RouteStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *RouteStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Ingress) > 0 { for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } func (m *RouteTargetReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RouteTargetReference) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *RouteTargetReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Weight != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.Weight)) i-- dAtA[i] = 0x18 } i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0x12 i -= len(m.Kind) copy(dAtA[i:], m.Kind) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *RouterShard) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RouterShard) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *RouterShard) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l i -= len(m.DNSSuffix) copy(dAtA[i:], m.DNSSuffix) i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSSuffix))) i-- dAtA[i] = 0x12 i -= len(m.ShardName) copy(dAtA[i:], m.ShardName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShardName))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *TLSConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *TLSConfig) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *TLSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l i -= len(m.InsecureEdgeTerminationPolicy) copy(dAtA[i:], m.InsecureEdgeTerminationPolicy) i = encodeVarintGenerated(dAtA, i, uint64(len(m.InsecureEdgeTerminationPolicy))) i-- dAtA[i] = 0x32 i -= len(m.DestinationCACertificate) copy(dAtA[i:], m.DestinationCACertificate) i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationCACertificate))) i-- dAtA[i] = 0x2a i -= len(m.CACertificate) copy(dAtA[i:], m.CACertificate) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CACertificate))) i-- dAtA[i] = 0x22 i -= len(m.Key) copy(dAtA[i:], m.Key) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) i-- dAtA[i] = 0x1a i -= len(m.Certificate) copy(dAtA[i:], m.Certificate) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) i-- dAtA[i] = 0x12 i -= len(m.Termination) copy(dAtA[i:], m.Termination) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Termination))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { offset -= sovGenerated(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func (m *Route) Size() (n int) { if m == nil { return 0 } var l int _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) return n } func (m *RouteIngress) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Host) n += 1 + l + sovGenerated(uint64(l)) l = len(m.RouterName) n += 1 + l + sovGenerated(uint64(l)) if len(m.Conditions) > 0 { for _, e := range m.Conditions { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } l = len(m.WildcardPolicy) n += 1 + l + sovGenerated(uint64(l)) l = len(m.RouterCanonicalHostname) n += 1 + l + sovGenerated(uint64(l)) return n } func (m *RouteIngressCondition) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Type) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Status) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Reason) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) if m.LastTransitionTime != nil { l = m.LastTransitionTime.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } func (m *RouteList) Size() (n int) { if m == nil { return 0 } var l int _ = l l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { for _, e := range m.Items { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } return n } func (m *RoutePort) Size() (n int) { if m == nil { return 0 } var l int _ = l l = m.TargetPort.Size() n += 1 + l + sovGenerated(uint64(l)) return n } func (m *RouteSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Host) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Path) n += 1 + l + sovGenerated(uint64(l)) l = m.To.Size() n += 1 + l + sovGenerated(uint64(l)) if len(m.AlternateBackends) > 0 { for _, e := range m.AlternateBackends { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } if m.Port != nil { l = m.Port.Size() n += 1 + l + sovGenerated(uint64(l)) } if m.TLS != nil { l = m.TLS.Size() n += 1 + l + sovGenerated(uint64(l)) } l = len(m.WildcardPolicy) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Subdomain) n += 1 + l + sovGenerated(uint64(l)) return n } func (m *RouteStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l if len(m.Ingress) > 0 { for _, e := range m.Ingress { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } return n } func (m *RouteTargetReference) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Kind) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) if m.Weight != nil { n += 1 + sovGenerated(uint64(*m.Weight)) } return n } func (m *RouterShard) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.ShardName) n += 1 + l + sovGenerated(uint64(l)) l = len(m.DNSSuffix) n += 1 + l + sovGenerated(uint64(l)) return n } func (m *TLSConfig) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Termination) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Certificate) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Key) n += 1 + l + sovGenerated(uint64(l)) l = len(m.CACertificate) n += 1 + l + sovGenerated(uint64(l)) l = len(m.DestinationCACertificate) n += 1 + l + sovGenerated(uint64(l)) l = len(m.InsecureEdgeTerminationPolicy) n += 1 + l + sovGenerated(uint64(l)) return n } func sovGenerated(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (this *Route) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&Route{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RouteSpec", "RouteSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "RouteStatus", "RouteStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } func (this *RouteIngress) String() string { if this == nil { return "nil" } repeatedStringForConditions := "[]RouteIngressCondition{" for _, f := range this.Conditions { repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "RouteIngressCondition", "RouteIngressCondition", 1), `&`, ``, 1) + "," } repeatedStringForConditions += "}"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/route/v1/legacy.go
vendor/github.com/openshift/api/route/v1/legacy.go
package v1 import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme ) func addLegacyKnownTypes(scheme *runtime.Scheme) error { types := []runtime.Object{ &Route{}, &RouteList{}, } scheme.AddKnownTypes(legacyGroupVersion, types...) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go
vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated // Code generated by deepcopy-gen. DO NOT EDIT. package v1 import ( corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { if in == nil { return nil } out := new(AllowedFlexVolume) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { *out = *in if in.Ranges != nil { in, out := &in.Ranges, &out.Ranges *out = make([]IDRange, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { if in == nil { return nil } out := new(FSGroupStrategyOptions) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IDRange) DeepCopyInto(out *IDRange) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. func (in *IDRange) DeepCopy() *IDRange { if in == nil { return nil } out := new(IDRange) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodSecurityPolicyReview) DeepCopyInto(out *PodSecurityPolicyReview) { *out = *in out.TypeMeta = in.TypeMeta in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyReview. func (in *PodSecurityPolicyReview) DeepCopy() *PodSecurityPolicyReview { if in == nil { return nil } out := new(PodSecurityPolicyReview) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *PodSecurityPolicyReview) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodSecurityPolicyReviewSpec) DeepCopyInto(out *PodSecurityPolicyReviewSpec) { *out = *in in.Template.DeepCopyInto(&out.Template) if in.ServiceAccountNames != nil { in, out := &in.ServiceAccountNames, &out.ServiceAccountNames *out = make([]string, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyReviewSpec. func (in *PodSecurityPolicyReviewSpec) DeepCopy() *PodSecurityPolicyReviewSpec { if in == nil { return nil } out := new(PodSecurityPolicyReviewSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodSecurityPolicyReviewStatus) DeepCopyInto(out *PodSecurityPolicyReviewStatus) { *out = *in if in.AllowedServiceAccounts != nil { in, out := &in.AllowedServiceAccounts, &out.AllowedServiceAccounts *out = make([]ServiceAccountPodSecurityPolicyReviewStatus, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyReviewStatus. func (in *PodSecurityPolicyReviewStatus) DeepCopy() *PodSecurityPolicyReviewStatus { if in == nil { return nil } out := new(PodSecurityPolicyReviewStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodSecurityPolicySelfSubjectReview) DeepCopyInto(out *PodSecurityPolicySelfSubjectReview) { *out = *in out.TypeMeta = in.TypeMeta in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySelfSubjectReview. func (in *PodSecurityPolicySelfSubjectReview) DeepCopy() *PodSecurityPolicySelfSubjectReview { if in == nil { return nil } out := new(PodSecurityPolicySelfSubjectReview) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *PodSecurityPolicySelfSubjectReview) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodSecurityPolicySelfSubjectReviewSpec) DeepCopyInto(out *PodSecurityPolicySelfSubjectReviewSpec) { *out = *in in.Template.DeepCopyInto(&out.Template) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySelfSubjectReviewSpec. func (in *PodSecurityPolicySelfSubjectReviewSpec) DeepCopy() *PodSecurityPolicySelfSubjectReviewSpec { if in == nil { return nil } out := new(PodSecurityPolicySelfSubjectReviewSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodSecurityPolicySubjectReview) DeepCopyInto(out *PodSecurityPolicySubjectReview) { *out = *in out.TypeMeta = in.TypeMeta in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySubjectReview. func (in *PodSecurityPolicySubjectReview) DeepCopy() *PodSecurityPolicySubjectReview { if in == nil { return nil } out := new(PodSecurityPolicySubjectReview) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *PodSecurityPolicySubjectReview) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodSecurityPolicySubjectReviewSpec) DeepCopyInto(out *PodSecurityPolicySubjectReviewSpec) { *out = *in in.Template.DeepCopyInto(&out.Template) if in.Groups != nil { in, out := &in.Groups, &out.Groups *out = make([]string, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySubjectReviewSpec. func (in *PodSecurityPolicySubjectReviewSpec) DeepCopy() *PodSecurityPolicySubjectReviewSpec { if in == nil { return nil } out := new(PodSecurityPolicySubjectReviewSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodSecurityPolicySubjectReviewStatus) DeepCopyInto(out *PodSecurityPolicySubjectReviewStatus) { *out = *in if in.AllowedBy != nil { in, out := &in.AllowedBy, &out.AllowedBy *out = new(corev1.ObjectReference) **out = **in } in.Template.DeepCopyInto(&out.Template) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySubjectReviewStatus. func (in *PodSecurityPolicySubjectReviewStatus) DeepCopy() *PodSecurityPolicySubjectReviewStatus { if in == nil { return nil } out := new(PodSecurityPolicySubjectReviewStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Data != nil { in, out := &in.Data, &out.Data *out = make([]byte, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. func (in *RangeAllocation) DeepCopy() *RangeAllocation { if in == nil { return nil } out := new(RangeAllocation) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *RangeAllocation) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RangeAllocationList) DeepCopyInto(out *RangeAllocationList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RangeAllocation, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocationList. func (in *RangeAllocationList) DeepCopy() *RangeAllocationList { if in == nil { return nil } out := new(RangeAllocationList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *RangeAllocationList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { *out = *in if in.UID != nil { in, out := &in.UID, &out.UID *out = new(int64) **out = **in } if in.UIDRangeMin != nil { in, out := &in.UIDRangeMin, &out.UIDRangeMin *out = new(int64) **out = **in } if in.UIDRangeMax != nil { in, out := &in.UIDRangeMax, &out.UIDRangeMax *out = new(int64) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { if in == nil { return nil } out := new(RunAsUserStrategyOptions) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxContextStrategyOptions) DeepCopyInto(out *SELinuxContextStrategyOptions) { *out = *in if in.SELinuxOptions != nil { in, out := &in.SELinuxOptions, &out.SELinuxOptions *out = new(corev1.SELinuxOptions) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxContextStrategyOptions. func (in *SELinuxContextStrategyOptions) DeepCopy() *SELinuxContextStrategyOptions { if in == nil { return nil } out := new(SELinuxContextStrategyOptions) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecurityContextConstraints) DeepCopyInto(out *SecurityContextConstraints) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Priority != nil { in, out := &in.Priority, &out.Priority *out = new(int32) **out = **in } if in.DefaultAddCapabilities != nil { in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities *out = make([]corev1.Capability, len(*in)) copy(*out, *in) } if in.RequiredDropCapabilities != nil { in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities *out = make([]corev1.Capability, len(*in)) copy(*out, *in) } if in.AllowedCapabilities != nil { in, out := &in.AllowedCapabilities, &out.AllowedCapabilities *out = make([]corev1.Capability, len(*in)) copy(*out, *in) } if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes *out = make([]FSType, len(*in)) copy(*out, *in) } if in.AllowedFlexVolumes != nil { in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes *out = make([]AllowedFlexVolume, len(*in)) copy(*out, *in) } if in.DefaultAllowPrivilegeEscalation != nil { in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation *out = new(bool) **out = **in } if in.AllowPrivilegeEscalation != nil { in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation *out = new(bool) **out = **in } in.SELinuxContext.DeepCopyInto(&out.SELinuxContext) in.RunAsUser.DeepCopyInto(&out.RunAsUser) in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) in.FSGroup.DeepCopyInto(&out.FSGroup) if in.Users != nil { in, out := &in.Users, &out.Users *out = make([]string, len(*in)) copy(*out, *in) } if in.Groups != nil { in, out := &in.Groups, &out.Groups *out = make([]string, len(*in)) copy(*out, *in) } if in.SeccompProfiles != nil { in, out := &in.SeccompProfiles, &out.SeccompProfiles *out = make([]string, len(*in)) copy(*out, *in) } if in.AllowedUnsafeSysctls != nil { in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls *out = make([]string, len(*in)) copy(*out, *in) } if in.ForbiddenSysctls != nil { in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls *out = make([]string, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContextConstraints. func (in *SecurityContextConstraints) DeepCopy() *SecurityContextConstraints { if in == nil { return nil } out := new(SecurityContextConstraints) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *SecurityContextConstraints) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecurityContextConstraintsList) DeepCopyInto(out *SecurityContextConstraintsList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]SecurityContextConstraints, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContextConstraintsList. func (in *SecurityContextConstraintsList) DeepCopy() *SecurityContextConstraintsList { if in == nil { return nil } out := new(SecurityContextConstraintsList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *SecurityContextConstraintsList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceAccountPodSecurityPolicyReviewStatus) DeepCopyInto(out *ServiceAccountPodSecurityPolicyReviewStatus) { *out = *in in.PodSecurityPolicySubjectReviewStatus.DeepCopyInto(&out.PodSecurityPolicySubjectReviewStatus) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountPodSecurityPolicyReviewStatus. func (in *ServiceAccountPodSecurityPolicyReviewStatus) DeepCopy() *ServiceAccountPodSecurityPolicyReviewStatus { if in == nil { return nil } out := new(ServiceAccountPodSecurityPolicyReviewStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { *out = *in if in.Ranges != nil { in, out := &in.Ranges, &out.Ranges *out = make([]IDRange, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { if in == nil { return nil } out := new(SupplementalGroupsStrategyOptions) in.DeepCopyInto(out) return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/security/v1/consts.go
vendor/github.com/openshift/api/security/v1/consts.go
package v1 const ( UIDRangeAnnotation = "openshift.io/sa.scc.uid-range" // SupplementalGroupsAnnotation contains a comma delimited list of allocated supplemental groups // for the namespace. Groups are in the form of a Block which supports {start}/{length} or {start}-{end} SupplementalGroupsAnnotation = "openshift.io/sa.scc.supplemental-groups" MCSAnnotation = "openshift.io/sa.scc.mcs" ValidatedSCCAnnotation = "openshift.io/scc" )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/security/v1/types.go
vendor/github.com/openshift/api/security/v1/types.go
package v1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // AllowAllCapabilities can be used as a value for the // SecurityContextConstraints.AllowAllCapabilities field and means that any // capabilities are allowed to be requested. var AllowAllCapabilities corev1.Capability = "*" // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SecurityContextConstraints governs the ability to make requests that affect the SecurityContext // that will be applied to a container. // For historical reasons SCC was exposed under the core Kubernetes API group. // That exposure is deprecated and will be removed in a future release - users // should instead use the security.openshift.io group to manage // SecurityContextConstraints. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=`.allowPrivilegedContainer`,description="Determines if a container can request to be run as privileged" // +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=`.allowedCapabilities`,description="A list of capabilities that can be requested to add to the container" // +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=`.seLinuxContext.type`,description="Strategy that will dictate what labels will be set in the SecurityContext" // +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=`.runAsUser.type`,description="Strategy that will dictate what RunAsUser is used in the SecurityContext" // +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=`.fsGroup.type`,description="Strategy that will dictate what fs group is used by the SecurityContext" // +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=`.supplementalGroups.type`,description="Strategy that will dictate what supplemental groups are used by the SecurityContext" // +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=`.priority`,description="Sort order of SCCs" // +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=`.readOnlyRootFilesystem`,description="Force containers to run with a read only root file system" // +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=`.volumes`,description="White list of allowed volume plugins" // +kubebuilder:singular=securitycontextconstraint // +openshift:compatibility-gen:level=1 type SecurityContextConstraints struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Priority influences the sort order of SCCs when evaluating which SCCs to try first for // a given pod request based on access in the Users and Groups fields. The higher the int, the // higher priority. An unset value is considered a 0 priority. If scores // for multiple SCCs are equal they will be sorted from most restrictive to // least restrictive. If both priorities and restrictions are equal the // SCCs will be sorted by name. // +nullable Priority *int32 `json:"priority" protobuf:"varint,2,opt,name=priority"` // AllowPrivilegedContainer determines if a container can request to be run as privileged. AllowPrivilegedContainer bool `json:"allowPrivilegedContainer" protobuf:"varint,3,opt,name=allowPrivilegedContainer"` // DefaultAddCapabilities is the default set of capabilities that will be added to the container // unless the pod spec specifically drops the capability. You may not list a capabiility in both // DefaultAddCapabilities and RequiredDropCapabilities. // +nullable DefaultAddCapabilities []corev1.Capability `json:"defaultAddCapabilities" protobuf:"bytes,4,rep,name=defaultAddCapabilities,casttype=Capability"` // RequiredDropCapabilities are the capabilities that will be dropped from the container. These // are required to be dropped and cannot be added. // +nullable RequiredDropCapabilities []corev1.Capability `json:"requiredDropCapabilities" protobuf:"bytes,5,rep,name=requiredDropCapabilities,casttype=Capability"` // AllowedCapabilities is a list of capabilities that can be requested to add to the container. // Capabilities in this field maybe added at the pod author's discretion. // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. // To allow all capabilities you may use '*'. // +nullable AllowedCapabilities []corev1.Capability `json:"allowedCapabilities" protobuf:"bytes,6,rep,name=allowedCapabilities,casttype=Capability"` // AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin // +k8s:conversion-gen=false AllowHostDirVolumePlugin bool `json:"allowHostDirVolumePlugin" protobuf:"varint,7,opt,name=allowHostDirVolumePlugin"` // Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". // To allow no volumes, set to ["none"]. // +nullable Volumes []FSType `json:"volumes" protobuf:"bytes,8,rep,name=volumes,casttype=FSType"` // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes // is allowed in the "Volumes" field. // +optional // +nullable AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,21,rep,name=allowedFlexVolumes"` // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. AllowHostNetwork bool `json:"allowHostNetwork" protobuf:"varint,9,opt,name=allowHostNetwork"` // AllowHostPorts determines if the policy allows host ports in the containers. AllowHostPorts bool `json:"allowHostPorts" protobuf:"varint,10,opt,name=allowHostPorts"` // AllowHostPID determines if the policy allows host pid in the containers. AllowHostPID bool `json:"allowHostPID" protobuf:"varint,11,opt,name=allowHostPID"` // AllowHostIPC determines if the policy allows host ipc in the containers. AllowHostIPC bool `json:"allowHostIPC" protobuf:"varint,12,opt,name=allowHostIPC"` // DefaultAllowPrivilegeEscalation controls the default setting for whether a // process can gain more privileges than its parent process. // +optional // +nullable DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,22,rep,name=defaultAllowPrivilegeEscalation"` // AllowPrivilegeEscalation determines if a pod can request to allow // privilege escalation. If unspecified, defaults to true. // +optional // +nullable AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,23,rep,name=allowPrivilegeEscalation"` // SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. // +nullable SELinuxContext SELinuxContextStrategyOptions `json:"seLinuxContext,omitempty" protobuf:"bytes,13,opt,name=seLinuxContext"` // RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. // +nullable RunAsUser RunAsUserStrategyOptions `json:"runAsUser,omitempty" protobuf:"bytes,14,opt,name=runAsUser"` // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. // +nullable SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups,omitempty" protobuf:"bytes,15,opt,name=supplementalGroups"` // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. // +nullable FSGroup FSGroupStrategyOptions `json:"fsGroup,omitempty" protobuf:"bytes,16,opt,name=fsGroup"` // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file // system. If the container specifically requests to run with a non-read only root file system // the SCC should deny the pod. // If set to false the container may run with a read only root file system if it wishes but it // will not be forced to. ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem" protobuf:"varint,17,opt,name=readOnlyRootFilesystem"` // The users who have permissions to use this security context constraints // +optional // +nullable Users []string `json:"users" protobuf:"bytes,18,rep,name=users"` // The groups that have permission to use this security context constraints // +optional // +nullable Groups []string `json:"groups" protobuf:"bytes,19,rep,name=groups"` // SeccompProfiles lists the allowed profiles that may be set for the pod or // container's seccomp annotations. An unset (nil) or empty value means that no profiles may // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When // used to generate a value for a pod the first non-wildcard profile will be used as // the default. // +nullable SeccompProfiles []string `json:"seccompProfiles,omitempty" protobuf:"bytes,20,opt,name=seccompProfiles"` // AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. // // Examples: // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. // +optional // +nullable AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,24,rep,name=allowedUnsafeSysctls"` // ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. // // Examples: // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. // +optional // +nullable ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,25,rep,name=forbiddenSysctls"` } // FS Type gives strong typing to different file systems that are used by volumes. type FSType string var ( FSTypeAzureFile FSType = "azureFile" FSTypeAzureDisk FSType = "azureDisk" FSTypeFlocker FSType = "flocker" FSTypeFlexVolume FSType = "flexVolume" FSTypeHostPath FSType = "hostPath" FSTypeEmptyDir FSType = "emptyDir" FSTypeGCEPersistentDisk FSType = "gcePersistentDisk" FSTypeAWSElasticBlockStore FSType = "awsElasticBlockStore" FSTypeGitRepo FSType = "gitRepo" FSTypeSecret FSType = "secret" FSTypeNFS FSType = "nfs" FSTypeISCSI FSType = "iscsi" FSTypeGlusterfs FSType = "glusterfs" FSTypePersistentVolumeClaim FSType = "persistentVolumeClaim" FSTypeRBD FSType = "rbd" FSTypeCinder FSType = "cinder" FSTypeCephFS FSType = "cephFS" FSTypeDownwardAPI FSType = "downwardAPI" FSTypeFC FSType = "fc" FSTypeConfigMap FSType = "configMap" FSTypeVsphereVolume FSType = "vsphere" FSTypeQuobyte FSType = "quobyte" FSTypePhotonPersistentDisk FSType = "photonPersistentDisk" FSProjected FSType = "projected" FSPortworxVolume FSType = "portworxVolume" FSScaleIO FSType = "scaleIO" FSStorageOS FSType = "storageOS" FSTypeCSI FSType = "csi" FSTypeEphemeral FSType = "ephemeral" FSTypeAll FSType = "*" FSTypeNone FSType = "none" ) // AllowedFlexVolume represents a single Flexvolume that is allowed to be used. type AllowedFlexVolume struct { // Driver is the name of the Flexvolume driver. Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` } // SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy. type SELinuxContextStrategyOptions struct { // Type is the strategy that will dictate what SELinux context is used in the SecurityContext. Type SELinuxContextStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=SELinuxContextStrategyType"` // seLinuxOptions required to run as; required for MustRunAs SELinuxOptions *corev1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` } // RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. type RunAsUserStrategyOptions struct { // Type is the strategy that will dictate what RunAsUser is used in the SecurityContext. Type RunAsUserStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=RunAsUserStrategyType"` // UID is the user id that containers must run as. Required for the MustRunAs strategy if not using // namespace/service account allocated uids. UID *int64 `json:"uid,omitempty" protobuf:"varint,2,opt,name=uid"` // UIDRangeMin defines the min value for a strategy that allocates by range. UIDRangeMin *int64 `json:"uidRangeMin,omitempty" protobuf:"varint,3,opt,name=uidRangeMin"` // UIDRangeMax defines the max value for a strategy that allocates by range. UIDRangeMax *int64 `json:"uidRangeMax,omitempty" protobuf:"varint,4,opt,name=uidRangeMax"` } // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. type FSGroupStrategyOptions struct { // Type is the strategy that will dictate what FSGroup is used in the SecurityContext. Type FSGroupStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=FSGroupStrategyType"` // Ranges are the allowed ranges of fs groups. If you would like to force a single // fs group then supply a single range with the same start and end. Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } // SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. type SupplementalGroupsStrategyOptions struct { // Type is the strategy that will dictate what supplemental groups is used in the SecurityContext. Type SupplementalGroupsStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=SupplementalGroupsStrategyType"` // Ranges are the allowed ranges of supplemental groups. If you would like to force a single // supplemental group then supply a single range with the same start and end. Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } // IDRange provides a min/max of an allowed range of IDs. // TODO: this could be reused for UIDs. type IDRange struct { // Min is the start of the range, inclusive. Min int64 `json:"min,omitempty" protobuf:"varint,1,opt,name=min"` // Max is the end of the range, inclusive. Max int64 `json:"max,omitempty" protobuf:"varint,2,opt,name=max"` } // SELinuxContextStrategyType denotes strategy types for generating SELinux options for a // SecurityContext type SELinuxContextStrategyType string // RunAsUserStrategyType denotes strategy types for generating RunAsUser values for a // SecurityContext type RunAsUserStrategyType string // SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental // groups for a SecurityContext. type SupplementalGroupsStrategyType string // FSGroupStrategyType denotes strategy types for generating FSGroup values for a // SecurityContext type FSGroupStrategyType string const ( // container must have SELinux labels of X applied. SELinuxStrategyMustRunAs SELinuxContextStrategyType = "MustRunAs" // container may make requests for any SELinux context labels. SELinuxStrategyRunAsAny SELinuxContextStrategyType = "RunAsAny" // container must run as a particular uid. RunAsUserStrategyMustRunAs RunAsUserStrategyType = "MustRunAs" // container must run as a particular uid. RunAsUserStrategyMustRunAsRange RunAsUserStrategyType = "MustRunAsRange" // container must run as a non-root uid RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategyType = "MustRunAsNonRoot" // container may make requests for any uid. RunAsUserStrategyRunAsAny RunAsUserStrategyType = "RunAsAny" // container must have FSGroup of X applied. FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" // container may make requests for any FSGroup labels. FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" // container must run as a particular gid. SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" // container may make requests for any gid. SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SecurityContextConstraintsList is a list of SecurityContextConstraints objects // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type SecurityContextConstraintsList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of security context constraints. Items []SecurityContextConstraints `json:"items" protobuf:"bytes,2,rep,name=items"` } // +genclient // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodSecurityPolicySubjectReview checks whether a particular user/SA tuple can create the PodTemplateSpec. // // Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=2 type PodSecurityPolicySubjectReview struct { metav1.TypeMeta `json:",inline"` // spec defines specification for the PodSecurityPolicySubjectReview. Spec PodSecurityPolicySubjectReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` // status represents the current information/status for the PodSecurityPolicySubjectReview. Status PodSecurityPolicySubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } // PodSecurityPolicySubjectReviewSpec defines specification for PodSecurityPolicySubjectReview type PodSecurityPolicySubjectReviewSpec struct { // template is the PodTemplateSpec to check. If template.spec.serviceAccountName is empty it will not be defaulted. // If its non-empty, it will be checked. Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,1,opt,name=template"` // user is the user you're testing for. // If you specify "user" but not "group", then is it interpreted as "What if user were not a member of any groups. // If user and groups are empty, then the check is performed using *only* the serviceAccountName in the template. User string `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` // groups is the groups you're testing for. Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` } // PodSecurityPolicySubjectReviewStatus contains information/status for PodSecurityPolicySubjectReview. type PodSecurityPolicySubjectReviewStatus struct { // allowedBy is a reference to the rule that allows the PodTemplateSpec. // A rule can be a SecurityContextConstraint or a PodSecurityPolicy // A `nil`, indicates that it was denied. AllowedBy *corev1.ObjectReference `json:"allowedBy,omitempty" protobuf:"bytes,1,opt,name=allowedBy"` // A machine-readable description of why this operation is in the // "Failure" status. If this value is empty there // is no information available. Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` // template is the PodTemplateSpec after the defaulting is applied. Template corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` } // +genclient // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodSecurityPolicySelfSubjectReview checks whether this user/SA tuple can create the PodTemplateSpec // // Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=2 type PodSecurityPolicySelfSubjectReview struct { metav1.TypeMeta `json:",inline"` // spec defines specification the PodSecurityPolicySelfSubjectReview. Spec PodSecurityPolicySelfSubjectReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` // status represents the current information/status for the PodSecurityPolicySelfSubjectReview. Status PodSecurityPolicySubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } // PodSecurityPolicySelfSubjectReviewSpec contains specification for PodSecurityPolicySelfSubjectReview. type PodSecurityPolicySelfSubjectReviewSpec struct { // template is the PodTemplateSpec to check. Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,1,opt,name=template"` } // +genclient // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodSecurityPolicyReview checks which service accounts (not users, since that would be cluster-wide) can create the `PodTemplateSpec` in question. // // Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=2 type PodSecurityPolicyReview struct { metav1.TypeMeta `json:",inline"` // spec is the PodSecurityPolicy to check. Spec PodSecurityPolicyReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` // status represents the current information/status for the PodSecurityPolicyReview. Status PodSecurityPolicyReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } // PodSecurityPolicyReviewSpec defines specification for PodSecurityPolicyReview type PodSecurityPolicyReviewSpec struct { // template is the PodTemplateSpec to check. The template.spec.serviceAccountName field is used // if serviceAccountNames is empty, unless the template.spec.serviceAccountName is empty, // in which case "default" is used. // If serviceAccountNames is specified, template.spec.serviceAccountName is ignored. Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,1,opt,name=template"` // serviceAccountNames is an optional set of ServiceAccounts to run the check with. // If serviceAccountNames is empty, the template.spec.serviceAccountName is used, // unless it's empty, in which case "default" is used instead. // If serviceAccountNames is specified, template.spec.serviceAccountName is ignored. ServiceAccountNames []string `json:"serviceAccountNames,omitempty" protobuf:"bytes,2,rep,name=serviceAccountNames"` // TODO: find a way to express 'all service accounts' } // PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview. type PodSecurityPolicyReviewStatus struct { // allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec. AllowedServiceAccounts []ServiceAccountPodSecurityPolicyReviewStatus `json:"allowedServiceAccounts" protobuf:"bytes,1,rep,name=allowedServiceAccounts"` } // ServiceAccountPodSecurityPolicyReviewStatus represents ServiceAccount name and related review status type ServiceAccountPodSecurityPolicyReviewStatus struct { PodSecurityPolicySubjectReviewStatus `json:",inline" protobuf:"bytes,1,opt,name=podSecurityPolicySubjectReviewStatus"` // name contains the allowed and the denied ServiceAccount name Name string `json:"name" protobuf:"bytes,2,opt,name=name"` } // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RangeAllocation is used so we can easily expose a RangeAllocation typed for security group // // Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. // +openshift:compatibility-gen:level=4 type RangeAllocation struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // range is a string representing a unique label for a range of uids, "1000000000-2000000000/10000". Range string `json:"range" protobuf:"bytes,2,opt,name=range"` // data is a byte array representing the serialized state of a range allocation. It is a bitmap // with each bit set to one to represent a range is taken. Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RangeAllocationList is a list of RangeAllocations objects // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type RangeAllocationList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of RangeAllocations. Items []RangeAllocation `json:"items" protobuf:"bytes,2,rep,name=items"` }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/security/v1/register.go
vendor/github.com/openshift/api/security/v1/register.go
package v1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( GroupName = "security.openshift.io" GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) // Install is a function which adds this version to a scheme Install = schemeBuilder.AddToScheme // SchemeGroupVersion generated code relies on this name // Deprecated SchemeGroupVersion = GroupVersion // AddToScheme exists solely to keep the old generators creating valid code // DEPRECATED AddToScheme = schemeBuilder.AddToScheme ) // Resource generated code relies on this being here, but it logically belongs to the group // DEPRECATED func Resource(resource string) schema.GroupResource { return schema.GroupResource{Group: GroupName, Resource: resource} } // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(GroupVersion, &SecurityContextConstraints{}, &SecurityContextConstraintsList{}, &PodSecurityPolicySubjectReview{}, &PodSecurityPolicySelfSubjectReview{}, &PodSecurityPolicyReview{}, &RangeAllocation{}, &RangeAllocationList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go
vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go
package v1 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more // information on the implementation: https://github.com/emicklei/go-restful/pull/215 // // TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE var map_AllowedFlexVolume = map[string]string{ "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", "driver": "Driver is the name of the Flexvolume driver.", } func (AllowedFlexVolume) SwaggerDoc() map[string]string { return map_AllowedFlexVolume } var map_FSGroupStrategyOptions = map[string]string{ "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", "type": "Type is the strategy that will dictate what FSGroup is used in the SecurityContext.", "ranges": "Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.", } func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { return map_FSGroupStrategyOptions } var map_IDRange = map[string]string{ "": "IDRange provides a min/max of an allowed range of IDs.", "min": "Min is the start of the range, inclusive.", "max": "Max is the end of the range, inclusive.", } func (IDRange) SwaggerDoc() map[string]string { return map_IDRange } var map_PodSecurityPolicyReview = map[string]string{ "": "PodSecurityPolicyReview checks which service accounts (not users, since that would be cluster-wide) can create the `PodTemplateSpec` in question.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", "spec": "spec is the PodSecurityPolicy to check.", "status": "status represents the current information/status for the PodSecurityPolicyReview.", } func (PodSecurityPolicyReview) SwaggerDoc() map[string]string { return map_PodSecurityPolicyReview } var map_PodSecurityPolicyReviewSpec = map[string]string{ "": "PodSecurityPolicyReviewSpec defines specification for PodSecurityPolicyReview", "template": "template is the PodTemplateSpec to check. The template.spec.serviceAccountName field is used if serviceAccountNames is empty, unless the template.spec.serviceAccountName is empty, in which case \"default\" is used. If serviceAccountNames is specified, template.spec.serviceAccountName is ignored.", "serviceAccountNames": "serviceAccountNames is an optional set of ServiceAccounts to run the check with. If serviceAccountNames is empty, the template.spec.serviceAccountName is used, unless it's empty, in which case \"default\" is used instead. If serviceAccountNames is specified, template.spec.serviceAccountName is ignored.", } func (PodSecurityPolicyReviewSpec) SwaggerDoc() map[string]string { return map_PodSecurityPolicyReviewSpec } var map_PodSecurityPolicyReviewStatus = map[string]string{ "": "PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview.", "allowedServiceAccounts": "allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec.", } func (PodSecurityPolicyReviewStatus) SwaggerDoc() map[string]string { return map_PodSecurityPolicyReviewStatus } var map_PodSecurityPolicySelfSubjectReview = map[string]string{ "": "PodSecurityPolicySelfSubjectReview checks whether this user/SA tuple can create the PodTemplateSpec\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", "spec": "spec defines specification the PodSecurityPolicySelfSubjectReview.", "status": "status represents the current information/status for the PodSecurityPolicySelfSubjectReview.", } func (PodSecurityPolicySelfSubjectReview) SwaggerDoc() map[string]string { return map_PodSecurityPolicySelfSubjectReview } var map_PodSecurityPolicySelfSubjectReviewSpec = map[string]string{ "": "PodSecurityPolicySelfSubjectReviewSpec contains specification for PodSecurityPolicySelfSubjectReview.", "template": "template is the PodTemplateSpec to check.", } func (PodSecurityPolicySelfSubjectReviewSpec) SwaggerDoc() map[string]string { return map_PodSecurityPolicySelfSubjectReviewSpec } var map_PodSecurityPolicySubjectReview = map[string]string{ "": "PodSecurityPolicySubjectReview checks whether a particular user/SA tuple can create the PodTemplateSpec.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", "spec": "spec defines specification for the PodSecurityPolicySubjectReview.", "status": "status represents the current information/status for the PodSecurityPolicySubjectReview.", } func (PodSecurityPolicySubjectReview) SwaggerDoc() map[string]string { return map_PodSecurityPolicySubjectReview } var map_PodSecurityPolicySubjectReviewSpec = map[string]string{ "": "PodSecurityPolicySubjectReviewSpec defines specification for PodSecurityPolicySubjectReview", "template": "template is the PodTemplateSpec to check. If template.spec.serviceAccountName is empty it will not be defaulted. If its non-empty, it will be checked.", "user": "user is the user you're testing for. If you specify \"user\" but not \"group\", then is it interpreted as \"What if user were not a member of any groups. If user and groups are empty, then the check is performed using *only* the serviceAccountName in the template.", "groups": "groups is the groups you're testing for.", } func (PodSecurityPolicySubjectReviewSpec) SwaggerDoc() map[string]string { return map_PodSecurityPolicySubjectReviewSpec } var map_PodSecurityPolicySubjectReviewStatus = map[string]string{ "": "PodSecurityPolicySubjectReviewStatus contains information/status for PodSecurityPolicySubjectReview.", "allowedBy": "allowedBy is a reference to the rule that allows the PodTemplateSpec. A rule can be a SecurityContextConstraint or a PodSecurityPolicy A `nil`, indicates that it was denied.", "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available.", "template": "template is the PodTemplateSpec after the defaulting is applied.", } func (PodSecurityPolicySubjectReviewStatus) SwaggerDoc() map[string]string { return map_PodSecurityPolicySubjectReviewStatus } var map_RangeAllocation = map[string]string{ "": "RangeAllocation is used so we can easily expose a RangeAllocation typed for security group\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "range": "range is a string representing a unique label for a range of uids, \"1000000000-2000000000/10000\".", "data": "data is a byte array representing the serialized state of a range allocation. It is a bitmap with each bit set to one to represent a range is taken.", } func (RangeAllocation) SwaggerDoc() map[string]string { return map_RangeAllocation } var map_RangeAllocationList = map[string]string{ "": "RangeAllocationList is a list of RangeAllocations objects\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "List of RangeAllocations.", } func (RangeAllocationList) SwaggerDoc() map[string]string { return map_RangeAllocationList } var map_RunAsUserStrategyOptions = map[string]string{ "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", "type": "Type is the strategy that will dictate what RunAsUser is used in the SecurityContext.", "uid": "UID is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids.", "uidRangeMin": "UIDRangeMin defines the min value for a strategy that allocates by range.", "uidRangeMax": "UIDRangeMax defines the max value for a strategy that allocates by range.", } func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { return map_RunAsUserStrategyOptions } var map_SELinuxContextStrategyOptions = map[string]string{ "": "SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy.", "type": "Type is the strategy that will dictate what SELinux context is used in the SecurityContext.", "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs", } func (SELinuxContextStrategyOptions) SwaggerDoc() map[string]string { return map_SELinuxContextStrategyOptions } var map_SecurityContextConstraints = map[string]string{ "": "SecurityContextConstraints governs the ability to make requests that affect the SecurityContext that will be applied to a container. For historical reasons SCC was exposed under the core Kubernetes API group. That exposure is deprecated and will be removed in a future release - users should instead use the security.openshift.io group to manage SecurityContextConstraints.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "priority": "Priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name.", "allowPrivilegedContainer": "AllowPrivilegedContainer determines if a container can request to be run as privileged.", "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", "requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", "allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'.", "allowHostDirVolumePlugin": "AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin", "volumes": "Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use \"*\". To allow no volumes, set to [\"none\"].", "allowedFlexVolumes": "AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.", "allowHostNetwork": "AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", "allowHostPorts": "AllowHostPorts determines if the policy allows host ports in the containers.", "allowHostPID": "AllowHostPID determines if the policy allows host pid in the containers.", "allowHostIPC": "AllowHostIPC determines if the policy allows host ipc in the containers.", "defaultAllowPrivilegeEscalation": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", "allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "seLinuxContext": "SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.", "runAsUser": "RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.", "supplementalGroups": "SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", "fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.", "readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", "users": "The users who have permissions to use this security context constraints", "groups": "The groups that have permission to use this security context constraints", "seccompProfiles": "SeccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default.", "allowedUnsafeSysctls": "AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", } func (SecurityContextConstraints) SwaggerDoc() map[string]string { return map_SecurityContextConstraints } var map_SecurityContextConstraintsList = map[string]string{ "": "SecurityContextConstraintsList is a list of SecurityContextConstraints objects\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "List of security context constraints.", } func (SecurityContextConstraintsList) SwaggerDoc() map[string]string { return map_SecurityContextConstraintsList } var map_ServiceAccountPodSecurityPolicyReviewStatus = map[string]string{ "": "ServiceAccountPodSecurityPolicyReviewStatus represents ServiceAccount name and related review status", "name": "name contains the allowed and the denied ServiceAccount name", } func (ServiceAccountPodSecurityPolicyReviewStatus) SwaggerDoc() map[string]string { return map_ServiceAccountPodSecurityPolicyReviewStatus } var map_SupplementalGroupsStrategyOptions = map[string]string{ "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", "type": "Type is the strategy that will dictate what supplemental groups is used in the SecurityContext.", "ranges": "Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.", } func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { return map_SupplementalGroupsStrategyOptions } // AUTO-GENERATED FUNCTIONS END HERE
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/security/v1/doc.go
vendor/github.com/openshift/api/security/v1/doc.go
// +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=github.com/openshift/origin/pkg/security/apis/security // +k8s:defaulter-gen=TypeMeta // +k8s:openapi-gen=true // +groupName=security.openshift.io // Package v1 is the v1 version of the API. package v1
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/security/v1/generated.pb.go
vendor/github.com/openshift/api/security/v1/generated.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/openshift/api/security/v1/generated.proto package v1 import ( fmt "fmt" io "io" proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" math "math" math_bits "math/bits" reflect "reflect" strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } func (*AllowedFlexVolume) ProtoMessage() {} func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{0} } func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { xxx_messageInfo_AllowedFlexVolume.Merge(m, src) } func (m *AllowedFlexVolume) XXX_Size() int { return m.Size() } func (m *AllowedFlexVolume) XXX_DiscardUnknown() { xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) } var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } func (*FSGroupStrategyOptions) ProtoMessage() {} func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{1} } func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) } func (m *FSGroupStrategyOptions) XXX_Size() int { return m.Size() } func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) } var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo func (m *IDRange) Reset() { *m = IDRange{} } func (*IDRange) ProtoMessage() {} func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{2} } func (m *IDRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *IDRange) XXX_Merge(src proto.Message) { xxx_messageInfo_IDRange.Merge(m, src) } func (m *IDRange) XXX_Size() int { return m.Size() } func (m *IDRange) XXX_DiscardUnknown() { xxx_messageInfo_IDRange.DiscardUnknown(m) } var xxx_messageInfo_IDRange proto.InternalMessageInfo func (m *PodSecurityPolicyReview) Reset() { *m = PodSecurityPolicyReview{} } func (*PodSecurityPolicyReview) ProtoMessage() {} func (*PodSecurityPolicyReview) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{3} } func (m *PodSecurityPolicyReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PodSecurityPolicyReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *PodSecurityPolicyReview) XXX_Merge(src proto.Message) { xxx_messageInfo_PodSecurityPolicyReview.Merge(m, src) } func (m *PodSecurityPolicyReview) XXX_Size() int { return m.Size() } func (m *PodSecurityPolicyReview) XXX_DiscardUnknown() { xxx_messageInfo_PodSecurityPolicyReview.DiscardUnknown(m) } var xxx_messageInfo_PodSecurityPolicyReview proto.InternalMessageInfo func (m *PodSecurityPolicyReviewSpec) Reset() { *m = PodSecurityPolicyReviewSpec{} } func (*PodSecurityPolicyReviewSpec) ProtoMessage() {} func (*PodSecurityPolicyReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{4} } func (m *PodSecurityPolicyReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PodSecurityPolicyReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *PodSecurityPolicyReviewSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_PodSecurityPolicyReviewSpec.Merge(m, src) } func (m *PodSecurityPolicyReviewSpec) XXX_Size() int { return m.Size() } func (m *PodSecurityPolicyReviewSpec) XXX_DiscardUnknown() { xxx_messageInfo_PodSecurityPolicyReviewSpec.DiscardUnknown(m) } var xxx_messageInfo_PodSecurityPolicyReviewSpec proto.InternalMessageInfo func (m *PodSecurityPolicyReviewStatus) Reset() { *m = PodSecurityPolicyReviewStatus{} } func (*PodSecurityPolicyReviewStatus) ProtoMessage() {} func (*PodSecurityPolicyReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{5} } func (m *PodSecurityPolicyReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PodSecurityPolicyReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *PodSecurityPolicyReviewStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_PodSecurityPolicyReviewStatus.Merge(m, src) } func (m *PodSecurityPolicyReviewStatus) XXX_Size() int { return m.Size() } func (m *PodSecurityPolicyReviewStatus) XXX_DiscardUnknown() { xxx_messageInfo_PodSecurityPolicyReviewStatus.DiscardUnknown(m) } var xxx_messageInfo_PodSecurityPolicyReviewStatus proto.InternalMessageInfo func (m *PodSecurityPolicySelfSubjectReview) Reset() { *m = PodSecurityPolicySelfSubjectReview{} } func (*PodSecurityPolicySelfSubjectReview) ProtoMessage() {} func (*PodSecurityPolicySelfSubjectReview) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{6} } func (m *PodSecurityPolicySelfSubjectReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PodSecurityPolicySelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *PodSecurityPolicySelfSubjectReview) XXX_Merge(src proto.Message) { xxx_messageInfo_PodSecurityPolicySelfSubjectReview.Merge(m, src) } func (m *PodSecurityPolicySelfSubjectReview) XXX_Size() int { return m.Size() } func (m *PodSecurityPolicySelfSubjectReview) XXX_DiscardUnknown() { xxx_messageInfo_PodSecurityPolicySelfSubjectReview.DiscardUnknown(m) } var xxx_messageInfo_PodSecurityPolicySelfSubjectReview proto.InternalMessageInfo func (m *PodSecurityPolicySelfSubjectReviewSpec) Reset() { *m = PodSecurityPolicySelfSubjectReviewSpec{} } func (*PodSecurityPolicySelfSubjectReviewSpec) ProtoMessage() {} func (*PodSecurityPolicySelfSubjectReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{7} } func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_PodSecurityPolicySelfSubjectReviewSpec.Merge(m, src) } func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_Size() int { return m.Size() } func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_DiscardUnknown() { xxx_messageInfo_PodSecurityPolicySelfSubjectReviewSpec.DiscardUnknown(m) } var xxx_messageInfo_PodSecurityPolicySelfSubjectReviewSpec proto.InternalMessageInfo func (m *PodSecurityPolicySubjectReview) Reset() { *m = PodSecurityPolicySubjectReview{} } func (*PodSecurityPolicySubjectReview) ProtoMessage() {} func (*PodSecurityPolicySubjectReview) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{8} } func (m *PodSecurityPolicySubjectReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PodSecurityPolicySubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *PodSecurityPolicySubjectReview) XXX_Merge(src proto.Message) { xxx_messageInfo_PodSecurityPolicySubjectReview.Merge(m, src) } func (m *PodSecurityPolicySubjectReview) XXX_Size() int { return m.Size() } func (m *PodSecurityPolicySubjectReview) XXX_DiscardUnknown() { xxx_messageInfo_PodSecurityPolicySubjectReview.DiscardUnknown(m) } var xxx_messageInfo_PodSecurityPolicySubjectReview proto.InternalMessageInfo func (m *PodSecurityPolicySubjectReviewSpec) Reset() { *m = PodSecurityPolicySubjectReviewSpec{} } func (*PodSecurityPolicySubjectReviewSpec) ProtoMessage() {} func (*PodSecurityPolicySubjectReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{9} } func (m *PodSecurityPolicySubjectReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PodSecurityPolicySubjectReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *PodSecurityPolicySubjectReviewSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_PodSecurityPolicySubjectReviewSpec.Merge(m, src) } func (m *PodSecurityPolicySubjectReviewSpec) XXX_Size() int { return m.Size() } func (m *PodSecurityPolicySubjectReviewSpec) XXX_DiscardUnknown() { xxx_messageInfo_PodSecurityPolicySubjectReviewSpec.DiscardUnknown(m) } var xxx_messageInfo_PodSecurityPolicySubjectReviewSpec proto.InternalMessageInfo func (m *PodSecurityPolicySubjectReviewStatus) Reset() { *m = PodSecurityPolicySubjectReviewStatus{} } func (*PodSecurityPolicySubjectReviewStatus) ProtoMessage() {} func (*PodSecurityPolicySubjectReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{10} } func (m *PodSecurityPolicySubjectReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PodSecurityPolicySubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *PodSecurityPolicySubjectReviewStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_PodSecurityPolicySubjectReviewStatus.Merge(m, src) } func (m *PodSecurityPolicySubjectReviewStatus) XXX_Size() int { return m.Size() } func (m *PodSecurityPolicySubjectReviewStatus) XXX_DiscardUnknown() { xxx_messageInfo_PodSecurityPolicySubjectReviewStatus.DiscardUnknown(m) } var xxx_messageInfo_PodSecurityPolicySubjectReviewStatus proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{11} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RangeAllocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *RangeAllocation) XXX_Merge(src proto.Message) { xxx_messageInfo_RangeAllocation.Merge(m, src) } func (m *RangeAllocation) XXX_Size() int { return m.Size() } func (m *RangeAllocation) XXX_DiscardUnknown() { xxx_messageInfo_RangeAllocation.DiscardUnknown(m) } var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *RangeAllocationList) Reset() { *m = RangeAllocationList{} } func (*RangeAllocationList) ProtoMessage() {} func (*RangeAllocationList) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{12} } func (m *RangeAllocationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RangeAllocationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *RangeAllocationList) XXX_Merge(src proto.Message) { xxx_messageInfo_RangeAllocationList.Merge(m, src) } func (m *RangeAllocationList) XXX_Size() int { return m.Size() } func (m *RangeAllocationList) XXX_DiscardUnknown() { xxx_messageInfo_RangeAllocationList.DiscardUnknown(m) } var xxx_messageInfo_RangeAllocationList proto.InternalMessageInfo func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{13} } func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) } func (m *RunAsUserStrategyOptions) XXX_Size() int { return m.Size() } func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) } var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo func (m *SELinuxContextStrategyOptions) Reset() { *m = SELinuxContextStrategyOptions{} } func (*SELinuxContextStrategyOptions) ProtoMessage() {} func (*SELinuxContextStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{14} } func (m *SELinuxContextStrategyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SELinuxContextStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *SELinuxContextStrategyOptions) XXX_Merge(src proto.Message) { xxx_messageInfo_SELinuxContextStrategyOptions.Merge(m, src) } func (m *SELinuxContextStrategyOptions) XXX_Size() int { return m.Size() } func (m *SELinuxContextStrategyOptions) XXX_DiscardUnknown() { xxx_messageInfo_SELinuxContextStrategyOptions.DiscardUnknown(m) } var xxx_messageInfo_SELinuxContextStrategyOptions proto.InternalMessageInfo func (m *SecurityContextConstraints) Reset() { *m = SecurityContextConstraints{} } func (*SecurityContextConstraints) ProtoMessage() {} func (*SecurityContextConstraints) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{15} } func (m *SecurityContextConstraints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SecurityContextConstraints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *SecurityContextConstraints) XXX_Merge(src proto.Message) { xxx_messageInfo_SecurityContextConstraints.Merge(m, src) } func (m *SecurityContextConstraints) XXX_Size() int { return m.Size() } func (m *SecurityContextConstraints) XXX_DiscardUnknown() { xxx_messageInfo_SecurityContextConstraints.DiscardUnknown(m) } var xxx_messageInfo_SecurityContextConstraints proto.InternalMessageInfo func (m *SecurityContextConstraintsList) Reset() { *m = SecurityContextConstraintsList{} } func (*SecurityContextConstraintsList) ProtoMessage() {} func (*SecurityContextConstraintsList) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{16} } func (m *SecurityContextConstraintsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SecurityContextConstraintsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *SecurityContextConstraintsList) XXX_Merge(src proto.Message) { xxx_messageInfo_SecurityContextConstraintsList.Merge(m, src) } func (m *SecurityContextConstraintsList) XXX_Size() int { return m.Size() } func (m *SecurityContextConstraintsList) XXX_DiscardUnknown() { xxx_messageInfo_SecurityContextConstraintsList.DiscardUnknown(m) } var xxx_messageInfo_SecurityContextConstraintsList proto.InternalMessageInfo func (m *ServiceAccountPodSecurityPolicyReviewStatus) Reset() { *m = ServiceAccountPodSecurityPolicyReviewStatus{} } func (*ServiceAccountPodSecurityPolicyReviewStatus) ProtoMessage() {} func (*ServiceAccountPodSecurityPolicyReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{17} } func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_ServiceAccountPodSecurityPolicyReviewStatus.Merge(m, src) } func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_Size() int { return m.Size() } func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_DiscardUnknown() { xxx_messageInfo_ServiceAccountPodSecurityPolicyReviewStatus.DiscardUnknown(m) } var xxx_messageInfo_ServiceAccountPodSecurityPolicyReviewStatus proto.InternalMessageInfo func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptor_af65d9655aa67551, []int{18} } func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) } func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { return m.Size() } func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) } var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo func init() { proto.RegisterType((*AllowedFlexVolume)(nil), "github.com.openshift.api.security.v1.AllowedFlexVolume") proto.RegisterType((*FSGroupStrategyOptions)(nil), "github.com.openshift.api.security.v1.FSGroupStrategyOptions") proto.RegisterType((*IDRange)(nil), "github.com.openshift.api.security.v1.IDRange") proto.RegisterType((*PodSecurityPolicyReview)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicyReview") proto.RegisterType((*PodSecurityPolicyReviewSpec)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicyReviewSpec") proto.RegisterType((*PodSecurityPolicyReviewStatus)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicyReviewStatus") proto.RegisterType((*PodSecurityPolicySelfSubjectReview)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySelfSubjectReview") proto.RegisterType((*PodSecurityPolicySelfSubjectReviewSpec)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySelfSubjectReviewSpec") proto.RegisterType((*PodSecurityPolicySubjectReview)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySubjectReview") proto.RegisterType((*PodSecurityPolicySubjectReviewSpec)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySubjectReviewSpec") proto.RegisterType((*PodSecurityPolicySubjectReviewStatus)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySubjectReviewStatus") proto.RegisterType((*RangeAllocation)(nil), "github.com.openshift.api.security.v1.RangeAllocation") proto.RegisterType((*RangeAllocationList)(nil), "github.com.openshift.api.security.v1.RangeAllocationList") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "github.com.openshift.api.security.v1.RunAsUserStrategyOptions") proto.RegisterType((*SELinuxContextStrategyOptions)(nil), "github.com.openshift.api.security.v1.SELinuxContextStrategyOptions") proto.RegisterType((*SecurityContextConstraints)(nil), "github.com.openshift.api.security.v1.SecurityContextConstraints") proto.RegisterType((*SecurityContextConstraintsList)(nil), "github.com.openshift.api.security.v1.SecurityContextConstraintsList") proto.RegisterType((*ServiceAccountPodSecurityPolicyReviewStatus)(nil), "github.com.openshift.api.security.v1.ServiceAccountPodSecurityPolicyReviewStatus") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "github.com.openshift.api.security.v1.SupplementalGroupsStrategyOptions") } func init() { proto.RegisterFile("github.com/openshift/api/security/v1/generated.proto", fileDescriptor_af65d9655aa67551) } var fileDescriptor_af65d9655aa67551 = []byte{ // 1750 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0x1c, 0x49, 0x15, 0x77, 0x7b, 0xfc, 0x35, 0x65, 0xc7, 0x1f, 0x65, 0xc7, 0xe9, 0x35, 0xeb, 0x19, 0xd3, 0x36, 0xab, 0x08, 0xd8, 0x19, 0x12, 0x2d, 0x6c, 0xd0, 0xb2, 0xd1, 0x4e, 0x7b, 0xd6, 0x59, 0xaf, 0x9c, 0x64, 0xb6, 0x66, 0xbd, 0x42, 0xab, 0x15, 0xa2, 0xdc, 0x53, 0x33, 0xae, 0xb8, 0xbf, 0xe8, 0xaa, 0x76, 0x3c, 0xe2, 0x12, 0x89, 0x0b, 0x47, 0x24, 0xae, 0x88, 0x33, 0xfc, 0x03, 0x5c, 0x10, 0x70, 0x8d, 0x04, 0x12, 0x39, 0xa1, 0x9c, 0x46, 0x64, 0x10, 0x27, 0x8e, 0xdc, 0x72, 0x42, 0x55, 0x53, 0xf3, 0xd1, 0x3d, 0xdd, 0xe3, 0x4e, 0x48, 0xa2, 0xbd, 0x4d, 0xbf, 0x8f, 0xdf, 0xef, 0xbd, 0xd7, 0xaf, 0x5f, 0xbd, 0x1a, 0xf0, 0x5e, 0x8b, 0xf2, 0xd3, 0xf0, 0xa4, 0x64, 0x79, 0x4e, 0xd9, 0xf3, 0x89, 0xcb, 0x4e, 0x69, 0x93, 0x97, 0xb1, 0x4f, 0xcb, 0x8c, 0x58, 0x61, 0x40, 0x79, 0xbb, 0x7c, 0x7e, 0xa3, 0xdc, 0x22, 0x2e, 0x09, 0x30, 0x27, 0x8d, 0x92, 0x1f, 0x78, 0xdc, 0x83, 0x7b, 0x43, 0xaf, 0xd2, 0xc0, 0xab, 0x84, 0x7d, 0x5a, 0xea, 0x7b, 0x95, 0xce, 0x6f, 0x6c, 0xbd, 0x3b, 0x82, 0xdd, 0xf2, 0x5a, 0x5e, 0x59, 0x3a, 0x9f, 0x84, 0x4d, 0xf9, 0x24, 0x1f, 0xe4, 0xaf, 0x1e, 0xe8, 0x96, 0x71, 0x76, 0x8b, 0x95, 0xa8, 0x27, 0xc9, 0x2d, 0x2f, 0x20, 0x09, 0xc4, 0x5b, 0xef, 0x0d, 0x6d, 0x1c, 0x6c, 0x9d, 0x52, 0x97, 0x04, 0xed, 0xb2, 0x7f, 0xd6, 0x12, 0x02, 0x56, 0x76, 0x08, 0xc7, 0x49, 0x5e, 0x3f, 0x48, 0xf3, 0x0a, 0x42, 0x97, 0x53, 0x87, 0x94, 0x99, 0x75, 0x4a, 0x1c, 0x1c, 0xf7, 0x33, 0x3e, 0x00, 0x6b, 0x15, 0xdb, 0xf6, 0x1e, 0x92, 0xc6, 0x81, 0x4d, 0x2e, 0xbe, 0xf0, 0xec, 0xd0, 0x21, 0xf0, 0x1d, 0x30, 0xd7, 0x08, 0xe8, 0x39, 0x09, 0x74, 0x6d, 0x47, 0xbb, 0x9e, 0x37, 0x97, 0x1f, 0x77, 0x8a, 0x53, 0xdd, 0x4e, 0x71, 0xae, 0x2a, 0xa5, 0x48, 0x69, 0x8d, 0xdf, 0x69, 0x60, 0xf3, 0xa0, 0x7e, 0x27, 0xf0, 0x42, 0xbf, 0xce, 0x05, 0x6a, 0xab, 0x7d, 0xdf, 0xe7, 0xd4, 0x73, 0x19, 0x7c, 0x1f, 0xcc, 0xf0, 0xb6, 0x4f, 0x14, 0xc0, 0xae, 0x02, 0x98, 0xf9, 0xbc, 0xed, 0x93, 0xe7, 0x9d, 0xe2, 0x7a, 0xcc, 0x4b, 0x88, 0x91, 0x74, 0x80, 0xc7, 0x60, 0x2e, 0xc0, 0x6e, 0x8b, 0x30, 0x7d, 0x7a, 0x27, 0x77, 0x7d, 0xf1, 0xe6, 0xbb, 0xa5, 0x2c, 0x2f, 0xa2, 0x74, 0x58, 0x45, 0xc2, 0x6b, 0x18, 0xaa, 0x7c, 0x64, 0x48, 0x81, 0x19, 0x77, 0xc0, 0xbc, 0x32, 0x81, 0xdb, 0x20, 0xe7, 0x50, 0x57, 0x46, 0x96, 0x33, 0x17, 0x95, 0x7d, 0xee, 0x2e, 0x75, 0x91, 0x90, 0x4b, 0x35, 0xbe, 0xd0, 0xa7, 0x63, 0x6a, 0x7c, 0x81, 0x84, 0xdc, 0xf8, 0x8f, 0x06, 0xae, 0xd5, 0xbc, 0x46, 0x5d, 0x71, 0xd7, 0x3c, 0x9b, 0x5a, 0x6d, 0x44, 0xce, 0x29, 0x79, 0x08, 0x2d, 0x30, 0xc3, 0x7c, 0x62, 0x49, 0xe8, 0xc5, 0x9b, 0x95, 0x6c, 0x91, 0xa7, 0x80, 0xd5, 0x7d, 0x62, 0x99, 0x4b, 0xfd, 0xba, 0x89, 0x27, 0x24, 0xc1, 0xe1, 0x19, 0x98, 0x63, 0x1c, 0xf3, 0x90, 0xc9, 0x10, 0x17, 0x6f, 0xee, 0xff, 0x7f, 0x34, 0x12, 0x6a, 0x58, 0xb6, 0xde, 0x33, 0x52, 0x14, 0xc6, 0x1f, 0x35, 0xf0, 0x8d, 0x09, 0x01, 0xc2, 0xcf, 0xc0, 0x02, 0x27, 0x8e, 0x6f, 0x63, 0x4e, 0x54, 0xd6, 0xbb, 0xa5, 0x5e, 0x27, 0xca, 0x00, 0x44, 0x8f, 0x2b, 0xf2, 0xcf, 0x95, 0x99, 0xcc, 0x6b, 0x55, 0xd1, 0x2d, 0xf4, 0xa5, 0x68, 0x00, 0x03, 0x0f, 0xc1, 0x3a, 0x23, 0xc1, 0x39, 0xb5, 0x48, 0xc5, 0xb2, 0xbc, 0xd0, 0xe5, 0xf7, 0xb0, 0xa3, 0xba, 0x21, 0x6f, 0x5e, 0xeb, 0x76, 0x8a, 0xeb, 0xf5, 0x71, 0x35, 0x4a, 0xf2, 0x31, 0xfe, 0xaa, 0x81, 0xed, 0x89, 0x79, 0xc3, 0xdf, 0x6b, 0x60, 0x13, 0xf7, 0xfa, 0x3f, 0x8a, 0xca, 0x74, 0x4d, 0xb6, 0xdf, 0x67, 0xd9, 0xaa, 0x1b, 0x75, 0x9e, 0x5c, 0xeb, 0x82, 0x4a, 0x7e, 0xb3, 0x92, 0x48, 0x8c, 0x52, 0x02, 0x32, 0x7e, 0x39, 0x0d, 0x8c, 0x31, 0xe4, 0x3a, 0xb1, 0x9b, 0xf5, 0xf0, 0xe4, 0x01, 0xb1, 0xb8, 0x6a, 0x42, 0x37, 0xd2, 0x84, 0x47, 0x2f, 0xd9, 0x1d, 0x63, 0xb8, 0xa9, 0xfd, 0x18, 0xc4, 0xfa, 0xf1, 0xd3, 0x97, 0x65, 0x8c, 0xb0, 0x4d, 0x6e, 0xcb, 0x9f, 0x83, 0x77, 0xb2, 0x45, 0xfc, 0x1a, 0x1a, 0xd4, 0x78, 0x34, 0x0d, 0x0a, 0x93, 0xa3, 0x87, 0x0f, 0x22, 0xef, 0xe0, 0x93, 0x57, 0x52, 0x91, 0xaf, 0x53, 0xfd, 0xff, 0xa4, 0x25, 0xb5, 0xe2, 0x1b, 0x28, 0x3e, 0xdc, 0x01, 0x33, 0x21, 0x23, 0x81, 0xcc, 0x35, 0x3f, 0xac, 0xc7, 0x31, 0x23, 0x01, 0x92, 0x1a, 0x68, 0x80, 0xb9, 0x96, 0x38, 0x5b, 0x98, 0x9e, 0x93, 0x23, 0x03, 0x88, 0xf8, 0xe5, 0x69, 0xc3, 0x90, 0xd2, 0x18, 0xff, 0xd5, 0xc0, 0x5e, 0x96, 0x02, 0xc0, 0x1a, 0xc8, 0xab, 0xaf, 0xd1, 0x6c, 0x4f, 0x4a, 0xe1, 0xbe, 0x72, 0x6d, 0x92, 0x80, 0xb8, 0x16, 0x31, 0xaf, 0x74, 0x3b, 0xc5, 0x7c, 0xa5, 0xef, 0x89, 0x86, 0x20, 0xe2, 0x6c, 0x0d, 0x08, 0x66, 0x9e, 0xab, 0x52, 0x18, 0x1e, 0x58, 0x52, 0x8a, 0x94, 0x36, 0x52, 0xbb, 0xdc, 0xab, 0x69, 0xdc, 0x3f, 0x68, 0x60, 0x45, 0x1e, 0x81, 0x22, 0x30, 0x0b, 0x8b, 0x83, 0x1a, 0xfe, 0x14, 0x2c, 0x88, 0x95, 0xa2, 0x81, 0x39, 0x56, 0xf9, 0x7d, 0x6f, 0x84, 0x66, 0xb0, 0x4a, 0x94, 0xfc, 0xb3, 0x96, 0x10, 0xb0, 0x92, 0xb0, 0x1e, 0x66, 0x7c, 0x97, 0x70, 0x6c, 0x42, 0xc5, 0x09, 0x86, 0x32, 0x34, 0x40, 0x85, 0xbb, 0x60, 0x56, 0x9e, 0xc1, 0x2a, 0xdf, 0x2b, 0xca, 0x78, 0x56, 0x46, 0x82, 0x7a, 0x3a, 0xf8, 0x36, 0x98, 0x91, 0x21, 0x88, 0x4c, 0x97, 0xcc, 0x05, 0xf1, 0x4a, 0xab, 0x98, 0x63, 0x24, 0xa5, 0xc6, 0xdf, 0x35, 0xb0, 0x1e, 0x0b, 0xfc, 0x88, 0x32, 0x0e, 0xbf, 0x1a, 0x0b, 0xbe, 0x94, 0x2d, 0x78, 0xe1, 0x2d, 0x43, 0x1f, 0x94, 0xab, 0x2f, 0x19, 0x09, 0xfc, 0x4b, 0x30, 0x4b, 0x39, 0x71, 0xfa, 0x8b, 0xc8, 0xf7, 0xb3, 0x7d, 0x57, 0xb1, 0x38, 0x87, 0xf9, 0x1e, 0x0a, 0x2c, 0xd4, 0x83, 0x34, 0xfe, 0xa1, 0x01, 0x1d, 0x85, 0x6e, 0x85, 0x89, 0xc6, 0x8d, 0xef, 0x4e, 0x3f, 0x8c, 0xec, 0x4e, 0xdf, 0x8a, 0xed, 0x4e, 0x57, 0xc7, 0xfc, 0x46, 0xb6, 0xa7, 0xb7, 0x40, 0x2e, 0xa4, 0x0d, 0xb5, 0xbc, 0xcc, 0x8b, 0xc5, 0xe5, 0xf8, 0xb0, 0x8a, 0x84, 0x0c, 0xde, 0x00, 0x8b, 0x21, 0x6d, 0xc8, 0xf0, 0xee, 0x52, 0x57, 0x56, 0x3a, 0x67, 0xae, 0x74, 0x3b, 0xc5, 0xc5, 0x63, 0xb5, 0x19, 0x89, 0x15, 0x68, 0xd4, 0x26, 0xe2, 0x82, 0x2f, 0xf4, 0x99, 0x04, 0x17, 0x7c, 0x81, 0x46, 0x6d, 0x8c, 0xbf, 0x68, 0x60, 0xbb, 0xfe, 0xf1, 0x11, 0x75, 0xc3, 0x8b, 0x7d, 0xcf, 0xe5, 0xe4, 0x82, 0xc7, 0xb3, 0xbb, 0x1d, 0xc9, 0xee, 0xdb, 0xb1, 0xec, 0xb6, 0x92, 0x9d, 0x47, 0x52, 0xfc, 0x09, 0x58, 0x66, 0x44, 0xda, 0x28, 0x44, 0x35, 0xf7, 0x8c, 0xa4, 0xcf, 0x43, 0xa1, 0x29, 0x4b, 0x13, 0x76, 0x3b, 0xc5, 0xe5, 0xa8, 0x0c, 0xc5, 0xd0, 0x8c, 0xdf, 0xac, 0x81, 0xad, 0xfe, 0x60, 0x50, 0x51, 0xec, 0x7b, 0x2e, 0xe3, 0x01, 0xa6, 0x2e, 0x67, 0x6f, 0xe0, 0x83, 0xb9, 0x0e, 0x16, 0xfc, 0x80, 0x7a, 0x82, 0x5f, 0xa6, 0x36, 0x6b, 0x2e, 0x89, 0x0e, 0xad, 0x29, 0x19, 0x1a, 0x68, 0xe1, 0x57, 0x40, 0x97, 0x83, 0xa5, 0x16, 0xd0, 0x73, 0x6a, 0x93, 0x16, 0x69, 0x88, 0x80, 0xb1, 0x08, 0x40, 0xbe, 0xdf, 0x05, 0x73, 0x47, 0x31, 0xe9, 0x95, 0x14, 0x3b, 0x94, 0x8a, 0x00, 0x19, 0xd8, 0x6c, 0x90, 0x26, 0x0e, 0x6d, 0x5e, 0x69, 0x34, 0xf6, 0xb1, 0x8f, 0x4f, 0xa8, 0x4d, 0x39, 0x25, 0x4c, 0x9f, 0x91, 0x83, 0xf5, 0x03, 0xb1, 0xc3, 0x54, 0x13, 0x2d, 0x9e, 0x77, 0x8a, 0xdb, 0xe3, 0x57, 0x9d, 0xd2, 0xc0, 0xa4, 0x8d, 0x52, 0xa0, 0x61, 0x1b, 0xe8, 0x01, 0xf9, 0x59, 0x48, 0x03, 0xd2, 0xa8, 0x06, 0x9e, 0x1f, 0xa1, 0x9d, 0x95, 0xb4, 0x1f, 0x8a, 0x74, 0x50, 0x8a, 0xcd, 0xe5, 0xc4, 0xa9, 0xf0, 0xf0, 0x01, 0x58, 0x57, 0x63, 0x3a, 0xc2, 0x3a, 0x27, 0x59, 0x6f, 0x89, 0xc5, 0xb3, 0x32, 0xae, 0xbe, 0x9c, 0x30, 0x09, 0x74, 0xf0, 0xe6, 0x3e, 0xf1, 0x18, 0xaf, 0xd2, 0xa0, 0x77, 0xef, 0xaa, 0xd9, 0x61, 0x8b, 0xba, 0xfa, 0x7c, 0xc2, 0x9b, 0x4b, 0xb0, 0x43, 0xa9, 0x08, 0xb0, 0x0c, 0xe6, 0xcf, 0xe5, 0x33, 0xd3, 0x17, 0x64, 0xf4, 0x57, 0xbb, 0x9d, 0xe2, 0x7c, 0xcf, 0x44, 0x44, 0x3c, 0x77, 0x50, 0x97, 0x1f, 0x54, 0xdf, 0x0a, 0xfe, 0x42, 0x03, 0x10, 0xc7, 0xaf, 0x81, 0x4c, 0xbf, 0x2a, 0x07, 0xdf, 0xfb, 0xd9, 0x06, 0xdf, 0xd8, 0x35, 0xd2, 0xdc, 0x52, 0x29, 0xc0, 0x31, 0x15, 0x43, 0x09, 0x74, 0xb0, 0x0a, 0x56, 0x07, 0x29, 0xdd, 0x23, 0xfc, 0xa1, 0x17, 0x9c, 0xe9, 0x79, 0x59, 0x0c, 0x5d, 0x21, 0xad, 0x56, 0x62, 0x7a, 0x34, 0xe6, 0x01, 0x6f, 0x83, 0xe5, 0x81, 0xac, 0xe6, 0x05, 0x9c, 0xe9, 0x40, 0x62, 0x6c, 0x2a, 0x8c, 0xe5, 0x4a, 0x44, 0x8b, 0x62, 0xd6, 0xf0, 0x16, 0x58, 0x1a, 0x4a, 0x0e, 0xab, 0xfa, 0xa2, 0xf4, 0xde, 0x50, 0xde, 0x4b, 0x95, 0x11, 0x1d, 0x8a, 0x58, 0x46, 0x3c, 0x0f, 0x6b, 0xfb, 0xfa, 0x52, 0x8a, 0xe7, 0x61, 0x6d, 0x1f, 0x45, 0x2c, 0xa1, 0x03, 0x8a, 0xfd, 0xef, 0x21, 0xf2, 0x35, 0x7e, 0xcc, 0x2c, 0x6c, 0xcb, 0x73, 0x44, 0xdf, 0x94, 0x60, 0xbb, 0xdd, 0x4e, 0xb1, 0x58, 0x9d, 0x6c, 0x8a, 0x2e, 0xc3, 0x82, 0x3f, 0x8e, 0xcf, 0x8d, 0x11, 0x9e, 0x6b, 0x92, 0xe7, 0xed, 0xf1, 0x99, 0x31, 0x42, 0x90, 0xea, 0x2d, 0x1a, 0xa9, 0x3f, 0x4f, 0xd5, 0xec, 0xd4, 0xaf, 0xbc, 0xc8, 0x2d, 0x75, 0xe2, 0xd1, 0x31, 0x7c, 0x85, 0x51, 0x33, 0x14, 0xa3, 0x84, 0x1e, 0xc8, 0x07, 0xfd, 0x43, 0x52, 0x5f, 0x96, 0xfc, 0xb7, 0x33, 0x9e, 0xde, 0x29, 0x67, 0xb2, 0xb9, 0xa6, 0xa8, 0xf3, 0x03, 0x0b, 0x34, 0xe4, 0x80, 0xbf, 0xd6, 0x00, 0x64, 0xa1, 0xef, 0xdb, 0xc4, 0x21, 0x2e, 0xc7, 0x76, 0x6f, 0xdd, 0xd4, 0x57, 0x24, 0xf5, 0x9d, 0x8c, 0xa9, 0x8f, 0xf9, 0xc7, 0x63, 0x18, 0x7c, 0x4f, 0xe3, 0xa6, 0x28, 0x81, 0x1e, 0xb6, 0xc0, 0x7c, 0x93, 0xc9, 0xdf, 0xfa, 0xaa, 0x8c, 0xe4, 0x47, 0xd9, 0x22, 0x49, 0xfe, 0x4b, 0xc7, 0x5c, 0x51, 0xf4, 0xf3, 0x4a, 0x8f, 0xfa, 0xe8, 0xf0, 0x0b, 0xb0, 0x19, 0x10, 0xdc, 0xb8, 0xef, 0xda, 0x6d, 0xe4, 0x79, 0xfc, 0x80, 0xda, 0x84, 0xb5, 0x19, 0x27, 0x8e, 0xbe, 0x26, 0xbb, 0x69, 0x70, 0xe3, 0x45, 0x89, 0x56, 0x28, 0xc5, 0x1b, 0x16, 0xc1, 0xac, 0x58, 0xe9, 0x99, 0x0e, 0xe5, 0x14, 0xcb, 0x8b, 0x35, 0x4a, 0xd4, 0x9b, 0xa1, 0x9e, 0x7c, 0x64, 0xd7, 0x5f, 0x4f, 0xdb, 0xf5, 0xe1, 0x87, 0x60, 0x85, 0x11, 0xcb, 0xf2, 0x1c, 0xbf, 0x16, 0x78, 0x4d, 0x01, 0xae, 0x6f, 0x48, 0xe3, 0xf5, 0x6e, 0xa7, 0xb8, 0x52, 0x8f, 0xaa, 0x50, 0xdc, 0x16, 0x1e, 0x81, 0x0d, 0x35, 0xaa, 0x8e, 0x5d, 0x86, 0x9b, 0xa4, 0xde, 0x66, 0x16, 0xb7, 0x99, 0xae, 0x4b, 0x0c, 0xbd, 0xdb, 0x29, 0x6e, 0x54, 0x12, 0xf4, 0x28, 0xd1, 0x0b, 0x7e, 0x04, 0x56, 0x9b, 0x5e, 0x70, 0x42, 0x1b, 0x0d, 0xe2, 0xf6, 0x91, 0xde, 0x92, 0x48, 0x1b, 0x62, 0xbc, 0x1d, 0xc4, 0x74, 0x68, 0xcc, 0xda, 0xf8, 0xb7, 0x06, 0x0a, 0xe9, 0xeb, 0xc9, 0x1b, 0x58, 0x8b, 0x49, 0x74, 0x2d, 0xfe, 0x28, 0xeb, 0x1f, 0x24, 0x69, 0x21, 0xa7, 0x6c, 0xc8, 0xbf, 0x9d, 0x06, 0xdf, 0x79, 0x81, 0x7f, 0x55, 0xe0, 0xdf, 0x34, 0xb0, 0xe7, 0x67, 0xb8, 0xd2, 0xa9, 0x8a, 0xbc, 0xca, 0x5b, 0xf2, 0x77, 0x55, 0x02, 0x99, 0xae, 0x94, 0x28, 0x53, 0x94, 0xe2, 0x9e, 0xeb, 0x62, 0x87, 0xc4, 0xef, 0xb9, 0xf7, 0xb0, 0x43, 0x90, 0xd4, 0x18, 0x7f, 0xd6, 0xc0, 0x37, 0x2f, 0x9d, 0x19, 0xd0, 0x8c, 0x6c, 0xdb, 0xa5, 0xd8, 0xb6, 0x5d, 0x48, 0x07, 0x78, 0xed, 0x7f, 0xc9, 0x9a, 0x9f, 0x3e, 0x7e, 0x56, 0x98, 0x7a, 0xf2, 0xac, 0x30, 0xf5, 0xf4, 0x59, 0x61, 0xea, 0x51, 0xb7, 0xa0, 0x3d, 0xee, 0x16, 0xb4, 0x27, 0xdd, 0x82, 0xf6, 0xb4, 0x5b, 0xd0, 0xfe, 0xd9, 0x2d, 0x68, 0xbf, 0xfa, 0x57, 0x61, 0xea, 0xcb, 0xbd, 0x2c, 0xff, 0xde, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x31, 0x4b, 0x4e, 0xe4, 0x17, 0x00, 0x00, } func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/security/v1/legacy.go
vendor/github.com/openshift/api/security/v1/legacy.go
package v1 import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme ) func addLegacyKnownTypes(scheme *runtime.Scheme) error { types := []runtime.Object{ &SecurityContextConstraints{}, &SecurityContextConstraintsList{}, &PodSecurityPolicySubjectReview{}, &PodSecurityPolicySelfSubjectReview{}, &PodSecurityPolicyReview{}, } scheme.AddKnownTypes(legacyGroupVersion, types...) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go
vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated // Code generated by deepcopy-gen. DO NOT EDIT. package v1 import ( corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BrokerTemplateInstance) DeepCopyInto(out *BrokerTemplateInstance) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstance. func (in *BrokerTemplateInstance) DeepCopy() *BrokerTemplateInstance { if in == nil { return nil } out := new(BrokerTemplateInstance) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *BrokerTemplateInstance) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BrokerTemplateInstanceList) DeepCopyInto(out *BrokerTemplateInstanceList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]BrokerTemplateInstance, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstanceList. func (in *BrokerTemplateInstanceList) DeepCopy() *BrokerTemplateInstanceList { if in == nil { return nil } out := new(BrokerTemplateInstanceList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *BrokerTemplateInstanceList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BrokerTemplateInstanceSpec) DeepCopyInto(out *BrokerTemplateInstanceSpec) { *out = *in out.TemplateInstance = in.TemplateInstance out.Secret = in.Secret if in.BindingIDs != nil { in, out := &in.BindingIDs, &out.BindingIDs *out = make([]string, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstanceSpec. func (in *BrokerTemplateInstanceSpec) DeepCopy() *BrokerTemplateInstanceSpec { if in == nil { return nil } out := new(BrokerTemplateInstanceSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in ExtraValue) DeepCopyInto(out *ExtraValue) { { in := &in *out = make(ExtraValue, len(*in)) copy(*out, *in) return } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. func (in ExtraValue) DeepCopy() ExtraValue { if in == nil { return nil } out := new(ExtraValue) in.DeepCopyInto(out) return *out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Parameter) DeepCopyInto(out *Parameter) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parameter. func (in *Parameter) DeepCopy() *Parameter { if in == nil { return nil } out := new(Parameter) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Template) DeepCopyInto(out *Template) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Objects != nil { in, out := &in.Objects, &out.Objects *out = make([]runtime.RawExtension, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Parameters != nil { in, out := &in.Parameters, &out.Parameters *out = make([]Parameter, len(*in)) copy(*out, *in) } if in.ObjectLabels != nil { in, out := &in.ObjectLabels, &out.ObjectLabels *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Template. func (in *Template) DeepCopy() *Template { if in == nil { return nil } out := new(Template) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *Template) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TemplateInstance) DeepCopyInto(out *TemplateInstance) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstance. func (in *TemplateInstance) DeepCopy() *TemplateInstance { if in == nil { return nil } out := new(TemplateInstance) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *TemplateInstance) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TemplateInstanceCondition) DeepCopyInto(out *TemplateInstanceCondition) { *out = *in in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceCondition. func (in *TemplateInstanceCondition) DeepCopy() *TemplateInstanceCondition { if in == nil { return nil } out := new(TemplateInstanceCondition) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TemplateInstanceList) DeepCopyInto(out *TemplateInstanceList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]TemplateInstance, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceList. func (in *TemplateInstanceList) DeepCopy() *TemplateInstanceList { if in == nil { return nil } out := new(TemplateInstanceList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *TemplateInstanceList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TemplateInstanceObject) DeepCopyInto(out *TemplateInstanceObject) { *out = *in out.Ref = in.Ref return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceObject. func (in *TemplateInstanceObject) DeepCopy() *TemplateInstanceObject { if in == nil { return nil } out := new(TemplateInstanceObject) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TemplateInstanceRequester) DeepCopyInto(out *TemplateInstanceRequester) { *out = *in if in.Groups != nil { in, out := &in.Groups, &out.Groups *out = make([]string, len(*in)) copy(*out, *in) } if in.Extra != nil { in, out := &in.Extra, &out.Extra *out = make(map[string]ExtraValue, len(*in)) for key, val := range *in { var outVal []string if val == nil { (*out)[key] = nil } else { in, out := &val, &outVal *out = make(ExtraValue, len(*in)) copy(*out, *in) } (*out)[key] = outVal } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceRequester. func (in *TemplateInstanceRequester) DeepCopy() *TemplateInstanceRequester { if in == nil { return nil } out := new(TemplateInstanceRequester) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TemplateInstanceSpec) DeepCopyInto(out *TemplateInstanceSpec) { *out = *in in.Template.DeepCopyInto(&out.Template) if in.Secret != nil { in, out := &in.Secret, &out.Secret *out = new(corev1.LocalObjectReference) **out = **in } if in.Requester != nil { in, out := &in.Requester, &out.Requester *out = new(TemplateInstanceRequester) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceSpec. func (in *TemplateInstanceSpec) DeepCopy() *TemplateInstanceSpec { if in == nil { return nil } out := new(TemplateInstanceSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TemplateInstanceStatus) DeepCopyInto(out *TemplateInstanceStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]TemplateInstanceCondition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Objects != nil { in, out := &in.Objects, &out.Objects *out = make([]TemplateInstanceObject, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceStatus. func (in *TemplateInstanceStatus) DeepCopy() *TemplateInstanceStatus { if in == nil { return nil } out := new(TemplateInstanceStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TemplateList) DeepCopyInto(out *TemplateList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Template, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateList. func (in *TemplateList) DeepCopy() *TemplateList { if in == nil { return nil } out := new(TemplateList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *TemplateList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/template/v1/consts.go
vendor/github.com/openshift/api/template/v1/consts.go
package v1 const ( // TemplateInstanceFinalizer is used to clean up the objects created by the template instance, // when the template instance is deleted. TemplateInstanceFinalizer = "template.openshift.io/finalizer" // TemplateInstanceOwner is a label applied to all objects created from a template instance // which contains the uid of the template instance. TemplateInstanceOwner = "template.openshift.io/template-instance-owner" // WaitForReadyAnnotation indicates that the TemplateInstance controller // should wait for the object to be ready before reporting the template // instantiation complete. WaitForReadyAnnotation = "template.alpha.openshift.io/wait-for-ready" )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/template/v1/types.go
vendor/github.com/openshift/api/template/v1/types.go
package v1 import ( "fmt" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Template contains the inputs needed to produce a Config. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Template struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // message is an optional instructional message that will // be displayed when this template is instantiated. // This field should inform the user how to utilize the newly created resources. // Parameter substitution will be performed on the message before being // displayed so that generated credentials and other parameters can be // included in the output. Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` // objects is an array of resources to include in this template. // If a namespace value is hardcoded in the object, it will be removed // during template instantiation, however if the namespace value // is, or contains, a ${PARAMETER_REFERENCE}, the resolved // value after parameter substitution will be respected and the object // will be created in that namespace. // +kubebuilder:pruning:PreserveUnknownFields Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` // parameters is an optional array of Parameters used during the // Template to Config transformation. Parameters []Parameter `json:"parameters,omitempty" protobuf:"bytes,4,rep,name=parameters"` // labels is a optional set of labels that are applied to every // object during the Template to Config transformation. ObjectLabels map[string]string `json:"labels,omitempty" protobuf:"bytes,5,rep,name=labels"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // TemplateList is a list of Template objects. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type TemplateList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of templates Items []Template `json:"items" protobuf:"bytes,2,rep,name=items"` } // Parameter defines a name/value variable that is to be processed during // the Template to Config transformation. type Parameter struct { // Name must be set and it can be referenced in Template // Items using ${PARAMETER_NAME}. Required. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Optional: The name that will show in UI instead of parameter 'Name' DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"` // Description of a parameter. Optional. Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"` // Value holds the Parameter data. If specified, the generator will be // ignored. The value replaces all occurrences of the Parameter ${Name} // expression during the Template to Config transformation. Optional. Value string `json:"value,omitempty" protobuf:"bytes,4,opt,name=value"` // generate specifies the generator to be used to generate random string // from an input value specified by From field. The result string is // stored into Value field. If empty, no generator is being used, leaving // the result Value untouched. Optional. // // The only supported generator is "expression", which accepts a "from" // value in the form of a simple regular expression containing the // range expression "[a-zA-Z0-9]", and the length expression "a{length}". // // Examples: // // from | value // ----------------------------- // "test[0-9]{1}x" | "test7x" // "[0-1]{8}" | "01001100" // "0x[A-F0-9]{4}" | "0xB3AF" // "[a-zA-Z0-9]{8}" | "hW4yQU5i" // Generate string `json:"generate,omitempty" protobuf:"bytes,5,opt,name=generate"` // From is an input value for the generator. Optional. From string `json:"from,omitempty" protobuf:"bytes,6,opt,name=from"` // Optional: Indicates the parameter must have a value. Defaults to false. Required bool `json:"required,omitempty" protobuf:"varint,7,opt,name=required"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // TemplateInstance requests and records the instantiation of a Template. // TemplateInstance is part of an experimental API. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type TemplateInstance struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec describes the desired state of this TemplateInstance. Spec TemplateInstanceSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // status describes the current state of this TemplateInstance. // +optional Status TemplateInstanceStatus `json:"status" protobuf:"bytes,3,opt,name=status"` } // TemplateInstanceSpec describes the desired state of a TemplateInstance. type TemplateInstanceSpec struct { // template is a full copy of the template for instantiation. Template Template `json:"template" protobuf:"bytes,1,opt,name=template"` // secret is a reference to a Secret object containing the necessary // template parameters. Secret *corev1.LocalObjectReference `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` // requester holds the identity of the agent requesting the template // instantiation. // +optional Requester *TemplateInstanceRequester `json:"requester" protobuf:"bytes,3,opt,name=requester"` } // TemplateInstanceRequester holds the identity of an agent requesting a // template instantiation. type TemplateInstanceRequester struct { // username uniquely identifies this user among all active users. Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` // uid is a unique value that identifies this user across time; if this user is // deleted and another user by the same name is added, they will have // different UIDs. UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` // groups represent the groups this user is a part of. Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` // extra holds additional information provided by the authenticator. Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` } // ExtraValue masks the value so protobuf can generate // +protobuf.nullable=true // +protobuf.options.(gogoproto.goproto_stringer)=false type ExtraValue []string func (t ExtraValue) String() string { return fmt.Sprintf("%v", []string(t)) } // TemplateInstanceStatus describes the current state of a TemplateInstance. type TemplateInstanceStatus struct { // conditions represent the latest available observations of a // TemplateInstance's current state. Conditions []TemplateInstanceCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` // Objects references the objects created by the TemplateInstance. Objects []TemplateInstanceObject `json:"objects,omitempty" protobuf:"bytes,2,rep,name=objects"` } // TemplateInstanceCondition contains condition information for a // TemplateInstance. type TemplateInstanceCondition struct { // Type of the condition, currently Ready or InstantiateFailure. Type TemplateInstanceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TemplateInstanceConditionType"` // Status of the condition, one of True, False or Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"` // LastTransitionTime is the last time a condition status transitioned from // one state to another. LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"` // Reason is a brief machine readable explanation for the condition's last // transition. Reason string `json:"reason" protobuf:"bytes,4,opt,name=reason"` // Message is a human readable description of the details of the last // transition, complementing reason. Message string `json:"message" protobuf:"bytes,5,opt,name=message"` } // TemplateInstanceConditionType is the type of condition pertaining to a // TemplateInstance. type TemplateInstanceConditionType string const ( // TemplateInstanceReady indicates the readiness of the template // instantiation. TemplateInstanceReady TemplateInstanceConditionType = "Ready" // TemplateInstanceInstantiateFailure indicates the failure of the template // instantiation TemplateInstanceInstantiateFailure TemplateInstanceConditionType = "InstantiateFailure" ) // TemplateInstanceObject references an object created by a TemplateInstance. type TemplateInstanceObject struct { // ref is a reference to the created object. When used under .spec, only // name and namespace are used; these can contain references to parameters // which will be substituted following the usual rules. Ref corev1.ObjectReference `json:"ref,omitempty" protobuf:"bytes,1,opt,name=ref"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // TemplateInstanceList is a list of TemplateInstance objects. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type TemplateInstanceList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // items is a list of Templateinstances Items []TemplateInstance `json:"items" protobuf:"bytes,2,rep,name=items"` } // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // BrokerTemplateInstance holds the service broker-related state associated with // a TemplateInstance. BrokerTemplateInstance is part of an experimental API. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type BrokerTemplateInstance struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec describes the state of this BrokerTemplateInstance. Spec BrokerTemplateInstanceSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } // BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance. type BrokerTemplateInstanceSpec struct { // templateinstance is a reference to a TemplateInstance object residing // in a namespace. TemplateInstance corev1.ObjectReference `json:"templateInstance" protobuf:"bytes,1,opt,name=templateInstance"` // secret is a reference to a Secret object residing in a namespace, // containing the necessary template parameters. Secret corev1.ObjectReference `json:"secret" protobuf:"bytes,2,opt,name=secret"` // bindingids is a list of 'binding_id's provided during successive bind // calls to the template service broker. BindingIDs []string `json:"bindingIDs,omitempty" protobuf:"bytes,3,rep,name=bindingIDs"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // BrokerTemplateInstanceList is a list of BrokerTemplateInstance objects. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type BrokerTemplateInstanceList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // items is a list of BrokerTemplateInstances Items []BrokerTemplateInstance `json:"items" protobuf:"bytes,2,rep,name=items"` }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/template/v1/register.go
vendor/github.com/openshift/api/template/v1/register.go
package v1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( GroupName = "template.openshift.io" GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) // Install is a function which adds this version to a scheme Install = schemeBuilder.AddToScheme // SchemeGroupVersion generated code relies on this name // Deprecated SchemeGroupVersion = GroupVersion // AddToScheme exists solely to keep the old generators creating valid code // DEPRECATED AddToScheme = schemeBuilder.AddToScheme ) // Resource generated code relies on this being here, but it logically belongs to the group // DEPRECATED func Resource(resource string) schema.GroupResource { return schema.GroupResource{Group: GroupName, Resource: resource} } // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(GroupVersion, &Template{}, &TemplateList{}, &TemplateInstance{}, &TemplateInstanceList{}, &BrokerTemplateInstance{}, &BrokerTemplateInstanceList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go
vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go
package v1 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more // information on the implementation: https://github.com/emicklei/go-restful/pull/215 // // TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE var map_BrokerTemplateInstance = map[string]string{ "": "BrokerTemplateInstance holds the service broker-related state associated with a TemplateInstance. BrokerTemplateInstance is part of an experimental API.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec describes the state of this BrokerTemplateInstance.", } func (BrokerTemplateInstance) SwaggerDoc() map[string]string { return map_BrokerTemplateInstance } var map_BrokerTemplateInstanceList = map[string]string{ "": "BrokerTemplateInstanceList is a list of BrokerTemplateInstance objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is a list of BrokerTemplateInstances", } func (BrokerTemplateInstanceList) SwaggerDoc() map[string]string { return map_BrokerTemplateInstanceList } var map_BrokerTemplateInstanceSpec = map[string]string{ "": "BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance.", "templateInstance": "templateinstance is a reference to a TemplateInstance object residing in a namespace.", "secret": "secret is a reference to a Secret object residing in a namespace, containing the necessary template parameters.", "bindingIDs": "bindingids is a list of 'binding_id's provided during successive bind calls to the template service broker.", } func (BrokerTemplateInstanceSpec) SwaggerDoc() map[string]string { return map_BrokerTemplateInstanceSpec } var map_Parameter = map[string]string{ "": "Parameter defines a name/value variable that is to be processed during the Template to Config transformation.", "name": "Name must be set and it can be referenced in Template Items using ${PARAMETER_NAME}. Required.", "displayName": "Optional: The name that will show in UI instead of parameter 'Name'", "description": "Description of a parameter. Optional.", "value": "Value holds the Parameter data. If specified, the generator will be ignored. The value replaces all occurrences of the Parameter ${Name} expression during the Template to Config transformation. Optional.", "generate": "generate specifies the generator to be used to generate random string from an input value specified by From field. The result string is stored into Value field. If empty, no generator is being used, leaving the result Value untouched. Optional.\n\nThe only supported generator is \"expression\", which accepts a \"from\" value in the form of a simple regular expression containing the range expression \"[a-zA-Z0-9]\", and the length expression \"a{length}\".\n\nExamples:\n\nfrom | value", "from": "From is an input value for the generator. Optional.", "required": "Optional: Indicates the parameter must have a value. Defaults to false.", } func (Parameter) SwaggerDoc() map[string]string { return map_Parameter } var map_Template = map[string]string{ "": "Template contains the inputs needed to produce a Config.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "message": "message is an optional instructional message that will be displayed when this template is instantiated. This field should inform the user how to utilize the newly created resources. Parameter substitution will be performed on the message before being displayed so that generated credentials and other parameters can be included in the output.", "objects": "objects is an array of resources to include in this template. If a namespace value is hardcoded in the object, it will be removed during template instantiation, however if the namespace value is, or contains, a ${PARAMETER_REFERENCE}, the resolved value after parameter substitution will be respected and the object will be created in that namespace.", "parameters": "parameters is an optional array of Parameters used during the Template to Config transformation.", "labels": "labels is a optional set of labels that are applied to every object during the Template to Config transformation.", } func (Template) SwaggerDoc() map[string]string { return map_Template } var map_TemplateInstance = map[string]string{ "": "TemplateInstance requests and records the instantiation of a Template. TemplateInstance is part of an experimental API.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec describes the desired state of this TemplateInstance.", "status": "status describes the current state of this TemplateInstance.", } func (TemplateInstance) SwaggerDoc() map[string]string { return map_TemplateInstance } var map_TemplateInstanceCondition = map[string]string{ "": "TemplateInstanceCondition contains condition information for a TemplateInstance.", "type": "Type of the condition, currently Ready or InstantiateFailure.", "status": "Status of the condition, one of True, False or Unknown.", "lastTransitionTime": "LastTransitionTime is the last time a condition status transitioned from one state to another.", "reason": "Reason is a brief machine readable explanation for the condition's last transition.", "message": "Message is a human readable description of the details of the last transition, complementing reason.", } func (TemplateInstanceCondition) SwaggerDoc() map[string]string { return map_TemplateInstanceCondition } var map_TemplateInstanceList = map[string]string{ "": "TemplateInstanceList is a list of TemplateInstance objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is a list of Templateinstances", } func (TemplateInstanceList) SwaggerDoc() map[string]string { return map_TemplateInstanceList } var map_TemplateInstanceObject = map[string]string{ "": "TemplateInstanceObject references an object created by a TemplateInstance.", "ref": "ref is a reference to the created object. When used under .spec, only name and namespace are used; these can contain references to parameters which will be substituted following the usual rules.", } func (TemplateInstanceObject) SwaggerDoc() map[string]string { return map_TemplateInstanceObject } var map_TemplateInstanceRequester = map[string]string{ "": "TemplateInstanceRequester holds the identity of an agent requesting a template instantiation.", "username": "username uniquely identifies this user among all active users.", "uid": "uid is a unique value that identifies this user across time; if this user is deleted and another user by the same name is added, they will have different UIDs.", "groups": "groups represent the groups this user is a part of.", "extra": "extra holds additional information provided by the authenticator.", } func (TemplateInstanceRequester) SwaggerDoc() map[string]string { return map_TemplateInstanceRequester } var map_TemplateInstanceSpec = map[string]string{ "": "TemplateInstanceSpec describes the desired state of a TemplateInstance.", "template": "template is a full copy of the template for instantiation.", "secret": "secret is a reference to a Secret object containing the necessary template parameters.", "requester": "requester holds the identity of the agent requesting the template instantiation.", } func (TemplateInstanceSpec) SwaggerDoc() map[string]string { return map_TemplateInstanceSpec } var map_TemplateInstanceStatus = map[string]string{ "": "TemplateInstanceStatus describes the current state of a TemplateInstance.", "conditions": "conditions represent the latest available observations of a TemplateInstance's current state.", "objects": "Objects references the objects created by the TemplateInstance.", } func (TemplateInstanceStatus) SwaggerDoc() map[string]string { return map_TemplateInstanceStatus } var map_TemplateList = map[string]string{ "": "TemplateList is a list of Template objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of templates", } func (TemplateList) SwaggerDoc() map[string]string { return map_TemplateList } // AUTO-GENERATED FUNCTIONS END HERE
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/template/v1/doc.go
vendor/github.com/openshift/api/template/v1/doc.go
// +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=github.com/openshift/origin/pkg/template/apis/template // +k8s:defaulter-gen=TypeMeta // +k8s:openapi-gen=true // +groupName=template.openshift.io // Package v1 is the v1 version of the API. package v1
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/template/v1/generated.pb.go
vendor/github.com/openshift/api/template/v1/generated.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/openshift/api/template/v1/generated.proto package v1 import ( fmt "fmt" io "io" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" k8s_io_api_core_v1 "k8s.io/api/core/v1" v11 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" math "math" math_bits "math/bits" reflect "reflect" strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *BrokerTemplateInstance) Reset() { *m = BrokerTemplateInstance{} } func (*BrokerTemplateInstance) ProtoMessage() {} func (*BrokerTemplateInstance) Descriptor() ([]byte, []int) { return fileDescriptor_8d3ee9f55fa8363e, []int{0} } func (m *BrokerTemplateInstance) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BrokerTemplateInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BrokerTemplateInstance) XXX_Merge(src proto.Message) { xxx_messageInfo_BrokerTemplateInstance.Merge(m, src) } func (m *BrokerTemplateInstance) XXX_Size() int { return m.Size() } func (m *BrokerTemplateInstance) XXX_DiscardUnknown() { xxx_messageInfo_BrokerTemplateInstance.DiscardUnknown(m) } var xxx_messageInfo_BrokerTemplateInstance proto.InternalMessageInfo func (m *BrokerTemplateInstanceList) Reset() { *m = BrokerTemplateInstanceList{} } func (*BrokerTemplateInstanceList) ProtoMessage() {} func (*BrokerTemplateInstanceList) Descriptor() ([]byte, []int) { return fileDescriptor_8d3ee9f55fa8363e, []int{1} } func (m *BrokerTemplateInstanceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BrokerTemplateInstanceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BrokerTemplateInstanceList) XXX_Merge(src proto.Message) { xxx_messageInfo_BrokerTemplateInstanceList.Merge(m, src) } func (m *BrokerTemplateInstanceList) XXX_Size() int { return m.Size() } func (m *BrokerTemplateInstanceList) XXX_DiscardUnknown() { xxx_messageInfo_BrokerTemplateInstanceList.DiscardUnknown(m) } var xxx_messageInfo_BrokerTemplateInstanceList proto.InternalMessageInfo func (m *BrokerTemplateInstanceSpec) Reset() { *m = BrokerTemplateInstanceSpec{} } func (*BrokerTemplateInstanceSpec) ProtoMessage() {} func (*BrokerTemplateInstanceSpec) Descriptor() ([]byte, []int) { return fileDescriptor_8d3ee9f55fa8363e, []int{2} } func (m *BrokerTemplateInstanceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BrokerTemplateInstanceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *BrokerTemplateInstanceSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_BrokerTemplateInstanceSpec.Merge(m, src) } func (m *BrokerTemplateInstanceSpec) XXX_Size() int { return m.Size() } func (m *BrokerTemplateInstanceSpec) XXX_DiscardUnknown() { xxx_messageInfo_BrokerTemplateInstanceSpec.DiscardUnknown(m) } var xxx_messageInfo_BrokerTemplateInstanceSpec proto.InternalMessageInfo func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptor_8d3ee9f55fa8363e, []int{3} } func (m *ExtraValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *ExtraValue) XXX_Merge(src proto.Message) { xxx_messageInfo_ExtraValue.Merge(m, src) } func (m *ExtraValue) XXX_Size() int { return m.Size() } func (m *ExtraValue) XXX_DiscardUnknown() { xxx_messageInfo_ExtraValue.DiscardUnknown(m) } var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func (m *Parameter) Reset() { *m = Parameter{} } func (*Parameter) ProtoMessage() {} func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor_8d3ee9f55fa8363e, []int{4} } func (m *Parameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *Parameter) XXX_Merge(src proto.Message) { xxx_messageInfo_Parameter.Merge(m, src) } func (m *Parameter) XXX_Size() int { return m.Size() } func (m *Parameter) XXX_DiscardUnknown() { xxx_messageInfo_Parameter.DiscardUnknown(m) } var xxx_messageInfo_Parameter proto.InternalMessageInfo func (m *Template) Reset() { *m = Template{} } func (*Template) ProtoMessage() {} func (*Template) Descriptor() ([]byte, []int) { return fileDescriptor_8d3ee9f55fa8363e, []int{5} } func (m *Template) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *Template) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *Template) XXX_Merge(src proto.Message) { xxx_messageInfo_Template.Merge(m, src) } func (m *Template) XXX_Size() int { return m.Size() } func (m *Template) XXX_DiscardUnknown() { xxx_messageInfo_Template.DiscardUnknown(m) } var xxx_messageInfo_Template proto.InternalMessageInfo func (m *TemplateInstance) Reset() { *m = TemplateInstance{} } func (*TemplateInstance) ProtoMessage() {} func (*TemplateInstance) Descriptor() ([]byte, []int) { return fileDescriptor_8d3ee9f55fa8363e, []int{6} } func (m *TemplateInstance) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TemplateInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *TemplateInstance) XXX_Merge(src proto.Message) { xxx_messageInfo_TemplateInstance.Merge(m, src) } func (m *TemplateInstance) XXX_Size() int { return m.Size() } func (m *TemplateInstance) XXX_DiscardUnknown() { xxx_messageInfo_TemplateInstance.DiscardUnknown(m) } var xxx_messageInfo_TemplateInstance proto.InternalMessageInfo func (m *TemplateInstanceCondition) Reset() { *m = TemplateInstanceCondition{} } func (*TemplateInstanceCondition) ProtoMessage() {} func (*TemplateInstanceCondition) Descriptor() ([]byte, []int) { return fileDescriptor_8d3ee9f55fa8363e, []int{7} } func (m *TemplateInstanceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TemplateInstanceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *TemplateInstanceCondition) XXX_Merge(src proto.Message) { xxx_messageInfo_TemplateInstanceCondition.Merge(m, src) } func (m *TemplateInstanceCondition) XXX_Size() int { return m.Size() } func (m *TemplateInstanceCondition) XXX_DiscardUnknown() { xxx_messageInfo_TemplateInstanceCondition.DiscardUnknown(m) } var xxx_messageInfo_TemplateInstanceCondition proto.InternalMessageInfo func (m *TemplateInstanceList) Reset() { *m = TemplateInstanceList{} } func (*TemplateInstanceList) ProtoMessage() {} func (*TemplateInstanceList) Descriptor() ([]byte, []int) { return fileDescriptor_8d3ee9f55fa8363e, []int{8} } func (m *TemplateInstanceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TemplateInstanceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *TemplateInstanceList) XXX_Merge(src proto.Message) { xxx_messageInfo_TemplateInstanceList.Merge(m, src) } func (m *TemplateInstanceList) XXX_Size() int { return m.Size() } func (m *TemplateInstanceList) XXX_DiscardUnknown() { xxx_messageInfo_TemplateInstanceList.DiscardUnknown(m) } var xxx_messageInfo_TemplateInstanceList proto.InternalMessageInfo func (m *TemplateInstanceObject) Reset() { *m = TemplateInstanceObject{} } func (*TemplateInstanceObject) ProtoMessage() {} func (*TemplateInstanceObject) Descriptor() ([]byte, []int) { return fileDescriptor_8d3ee9f55fa8363e, []int{9} } func (m *TemplateInstanceObject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TemplateInstanceObject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *TemplateInstanceObject) XXX_Merge(src proto.Message) { xxx_messageInfo_TemplateInstanceObject.Merge(m, src) } func (m *TemplateInstanceObject) XXX_Size() int { return m.Size() } func (m *TemplateInstanceObject) XXX_DiscardUnknown() { xxx_messageInfo_TemplateInstanceObject.DiscardUnknown(m) } var xxx_messageInfo_TemplateInstanceObject proto.InternalMessageInfo func (m *TemplateInstanceRequester) Reset() { *m = TemplateInstanceRequester{} } func (*TemplateInstanceRequester) ProtoMessage() {} func (*TemplateInstanceRequester) Descriptor() ([]byte, []int) { return fileDescriptor_8d3ee9f55fa8363e, []int{10} } func (m *TemplateInstanceRequester) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TemplateInstanceRequester) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *TemplateInstanceRequester) XXX_Merge(src proto.Message) { xxx_messageInfo_TemplateInstanceRequester.Merge(m, src) } func (m *TemplateInstanceRequester) XXX_Size() int { return m.Size() } func (m *TemplateInstanceRequester) XXX_DiscardUnknown() { xxx_messageInfo_TemplateInstanceRequester.DiscardUnknown(m) } var xxx_messageInfo_TemplateInstanceRequester proto.InternalMessageInfo func (m *TemplateInstanceSpec) Reset() { *m = TemplateInstanceSpec{} } func (*TemplateInstanceSpec) ProtoMessage() {} func (*TemplateInstanceSpec) Descriptor() ([]byte, []int) { return fileDescriptor_8d3ee9f55fa8363e, []int{11} } func (m *TemplateInstanceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TemplateInstanceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *TemplateInstanceSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_TemplateInstanceSpec.Merge(m, src) } func (m *TemplateInstanceSpec) XXX_Size() int { return m.Size() } func (m *TemplateInstanceSpec) XXX_DiscardUnknown() { xxx_messageInfo_TemplateInstanceSpec.DiscardUnknown(m) } var xxx_messageInfo_TemplateInstanceSpec proto.InternalMessageInfo func (m *TemplateInstanceStatus) Reset() { *m = TemplateInstanceStatus{} } func (*TemplateInstanceStatus) ProtoMessage() {} func (*TemplateInstanceStatus) Descriptor() ([]byte, []int) { return fileDescriptor_8d3ee9f55fa8363e, []int{12} } func (m *TemplateInstanceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TemplateInstanceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *TemplateInstanceStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_TemplateInstanceStatus.Merge(m, src) } func (m *TemplateInstanceStatus) XXX_Size() int { return m.Size() } func (m *TemplateInstanceStatus) XXX_DiscardUnknown() { xxx_messageInfo_TemplateInstanceStatus.DiscardUnknown(m) } var xxx_messageInfo_TemplateInstanceStatus proto.InternalMessageInfo func (m *TemplateList) Reset() { *m = TemplateList{} } func (*TemplateList) ProtoMessage() {} func (*TemplateList) Descriptor() ([]byte, []int) { return fileDescriptor_8d3ee9f55fa8363e, []int{13} } func (m *TemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *TemplateList) XXX_Merge(src proto.Message) { xxx_messageInfo_TemplateList.Merge(m, src) } func (m *TemplateList) XXX_Size() int { return m.Size() } func (m *TemplateList) XXX_DiscardUnknown() { xxx_messageInfo_TemplateList.DiscardUnknown(m) } var xxx_messageInfo_TemplateList proto.InternalMessageInfo func init() { proto.RegisterType((*BrokerTemplateInstance)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstance") proto.RegisterType((*BrokerTemplateInstanceList)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstanceList") proto.RegisterType((*BrokerTemplateInstanceSpec)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstanceSpec") proto.RegisterType((*ExtraValue)(nil), "github.com.openshift.api.template.v1.ExtraValue") proto.RegisterType((*Parameter)(nil), "github.com.openshift.api.template.v1.Parameter") proto.RegisterType((*Template)(nil), "github.com.openshift.api.template.v1.Template") proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.template.v1.Template.LabelsEntry") proto.RegisterType((*TemplateInstance)(nil), "github.com.openshift.api.template.v1.TemplateInstance") proto.RegisterType((*TemplateInstanceCondition)(nil), "github.com.openshift.api.template.v1.TemplateInstanceCondition") proto.RegisterType((*TemplateInstanceList)(nil), "github.com.openshift.api.template.v1.TemplateInstanceList") proto.RegisterType((*TemplateInstanceObject)(nil), "github.com.openshift.api.template.v1.TemplateInstanceObject") proto.RegisterType((*TemplateInstanceRequester)(nil), "github.com.openshift.api.template.v1.TemplateInstanceRequester") proto.RegisterMapType((map[string]ExtraValue)(nil), "github.com.openshift.api.template.v1.TemplateInstanceRequester.ExtraEntry") proto.RegisterType((*TemplateInstanceSpec)(nil), "github.com.openshift.api.template.v1.TemplateInstanceSpec") proto.RegisterType((*TemplateInstanceStatus)(nil), "github.com.openshift.api.template.v1.TemplateInstanceStatus") proto.RegisterType((*TemplateList)(nil), "github.com.openshift.api.template.v1.TemplateList") } func init() { proto.RegisterFile("github.com/openshift/api/template/v1/generated.proto", fileDescriptor_8d3ee9f55fa8363e) } var fileDescriptor_8d3ee9f55fa8363e = []byte{ // 1246 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4d, 0x6f, 0x5b, 0x45, 0x17, 0xf6, 0xf5, 0x57, 0xec, 0x71, 0xdb, 0x37, 0x9a, 0xb7, 0xaa, 0x2e, 0x96, 0x6a, 0x5b, 0xb7, 0x15, 0x0a, 0xa8, 0xb9, 0x26, 0x51, 0x28, 0x25, 0x42, 0x02, 0x2e, 0x49, 0xab, 0x94, 0x14, 0xd0, 0x24, 0x45, 0x08, 0xb2, 0x60, 0x7c, 0x3d, 0x76, 0x6e, 0xe3, 0xfb, 0xc1, 0xcc, 0x38, 0xd4, 0xbb, 0x2e, 0xf8, 0x01, 0x2c, 0x59, 0xf2, 0x13, 0x58, 0xb2, 0x42, 0x62, 0x97, 0x65, 0xd9, 0x75, 0x01, 0x16, 0x31, 0x2b, 0xfe, 0x00, 0x48, 0x65, 0x83, 0x66, 0xee, 0xdc, 0x0f, 0x7f, 0x51, 0x27, 0x95, 0xda, 0x9d, 0xef, 0x99, 0xf3, 0x3c, 0x67, 0xce, 0x99, 0x33, 0xcf, 0x1c, 0x83, 0x8d, 0xae, 0xc3, 0x0f, 0xfb, 0x2d, 0xd3, 0xf6, 0xdd, 0xa6, 0x1f, 0x10, 0x8f, 0x1d, 0x3a, 0x1d, 0xde, 0xc4, 0x81, 0xd3, 0xe4, 0xc4, 0x0d, 0x7a, 0x98, 0x93, 0xe6, 0xf1, 0x5a, 0xb3, 0x4b, 0x3c, 0x42, 0x31, 0x27, 0x6d, 0x33, 0xa0, 0x3e, 0xf7, 0xe1, 0xf5, 0x04, 0x65, 0xc6, 0x28, 0x13, 0x07, 0x8e, 0x19, 0xa1, 0xcc, 0xe3, 0xb5, 0xea, 0x6a, 0x8a, 0xbb, 0xeb, 0x77, 0xfd, 0xa6, 0x04, 0xb7, 0xfa, 0x1d, 0xf9, 0x25, 0x3f, 0xe4, 0xaf, 0x90, 0xb4, 0x6a, 0x1c, 0xdd, 0x62, 0xa6, 0xe3, 0xcb, 0xe0, 0xb6, 0x4f, 0x67, 0x05, 0xae, 0x6e, 0x24, 0x3e, 0x2e, 0xb6, 0x0f, 0x1d, 0x8f, 0xd0, 0x41, 0x33, 0x38, 0xea, 0x0a, 0x03, 0x6b, 0xba, 0x84, 0xe3, 0x59, 0xa8, 0xe6, 0x3c, 0x14, 0xed, 0x7b, 0xdc, 0x71, 0xc9, 0x14, 0xe0, 0xe6, 0xb3, 0x00, 0xcc, 0x3e, 0x24, 0x2e, 0x9e, 0xc4, 0x19, 0x43, 0x0d, 0x5c, 0xb1, 0xa8, 0x7f, 0x44, 0xe8, 0xbe, 0xaa, 0xc3, 0x8e, 0xc7, 0x38, 0xf6, 0x6c, 0x02, 0xbf, 0x04, 0x25, 0xb1, 0xbd, 0x36, 0xe6, 0x58, 0xd7, 0x1a, 0xda, 0x4a, 0x65, 0xfd, 0x0d, 0x33, 0x8c, 0x62, 0xa6, 0xa3, 0x98, 0xc1, 0x51, 0x57, 0x18, 0x98, 0x29, 0xbc, 0xcd, 0xe3, 0x35, 0xf3, 0xe3, 0xd6, 0x03, 0x62, 0xf3, 0x7b, 0x84, 0x63, 0x0b, 0x9e, 0x0c, 0xeb, 0x99, 0xd1, 0xb0, 0x0e, 0x12, 0x1b, 0x8a, 0x59, 0x61, 0x0b, 0xe4, 0x59, 0x40, 0x6c, 0x3d, 0x2b, 0xd9, 0xdf, 0x33, 0x17, 0x39, 0x23, 0x73, 0xf6, 0x6e, 0xf7, 0x02, 0x62, 0x5b, 0x17, 0x54, 0xb4, 0xbc, 0xf8, 0x42, 0x92, 0xdb, 0xf8, 0x4d, 0x03, 0xd5, 0xd9, 0x90, 0x5d, 0x87, 0x71, 0x78, 0x30, 0x95, 0xa4, 0xb9, 0x58, 0x92, 0x02, 0x2d, 0x53, 0x5c, 0x56, 0x41, 0x4b, 0x91, 0x25, 0x95, 0x20, 0x06, 0x05, 0x87, 0x13, 0x97, 0xe9, 0xd9, 0x46, 0x6e, 0xa5, 0xb2, 0xfe, 0xce, 0xf3, 0x64, 0x68, 0x5d, 0x54, 0x81, 0x0a, 0x3b, 0x82, 0x12, 0x85, 0xcc, 0xc6, 0x37, 0xd9, 0x79, 0xf9, 0x89, 0x22, 0x40, 0x07, 0x2c, 0xf3, 0x09, 0xbb, 0xca, 0xf3, 0x5a, 0x2a, 0x4f, 0x53, 0x74, 0x6f, 0x72, 0x74, 0x88, 0x74, 0x08, 0x25, 0x22, 0xa6, 0xae, 0x62, 0x2e, 0x4f, 0x92, 0xa3, 0x29, 0x5a, 0xf8, 0x21, 0x28, 0x32, 0x62, 0x53, 0xc2, 0xd5, 0x79, 0x2e, 0x14, 0xe0, 0x92, 0x0a, 0x50, 0xdc, 0x93, 0x50, 0xa4, 0x28, 0xa0, 0x09, 0x40, 0xcb, 0xf1, 0xda, 0x8e, 0xd7, 0xdd, 0xd9, 0x62, 0x7a, 0xae, 0x91, 0x5b, 0x29, 0x5b, 0x97, 0x44, 0x23, 0x59, 0xb1, 0x15, 0xa5, 0x3c, 0x8c, 0xb7, 0x00, 0xd8, 0x7e, 0xc8, 0x29, 0xfe, 0x14, 0xf7, 0xfa, 0x04, 0xd6, 0xa3, 0xba, 0x6b, 0x12, 0x58, 0x9e, 0xac, 0xda, 0x66, 0xe9, 0xbb, 0xef, 0xeb, 0x99, 0x47, 0xbf, 0x36, 0x32, 0xc6, 0x4f, 0x59, 0x50, 0xfe, 0x04, 0x53, 0xec, 0x12, 0x4e, 0x28, 0x6c, 0x80, 0xbc, 0x87, 0xdd, 0xb0, 0x44, 0xe5, 0xa4, 0x9f, 0x3e, 0xc2, 0x2e, 0x41, 0x72, 0x05, 0xbe, 0x09, 0x2a, 0x6d, 0x87, 0x05, 0x3d, 0x3c, 0x10, 0x46, 0x99, 0x6a, 0xd9, 0xfa, 0xbf, 0x72, 0xac, 0x6c, 0x25, 0x4b, 0x28, 0xed, 0x27, 0x61, 0x84, 0xd9, 0xd4, 0x09, 0xb8, 0xe3, 0x7b, 0x7a, 0x6e, 0x02, 0x96, 0x2c, 0xa1, 0xb4, 0x1f, 0xbc, 0x06, 0x0a, 0xc7, 0x22, 0x23, 0x3d, 0x2f, 0x01, 0x71, 0x0b, 0xc8, 0x34, 0x51, 0xb8, 0x06, 0x6f, 0x80, 0x52, 0x74, 0xad, 0xf5, 0x82, 0xf4, 0x8b, 0x7b, 0xf2, 0x8e, 0xb2, 0xa3, 0xd8, 0x43, 0xa4, 0xd8, 0xa1, 0xbe, 0xab, 0x17, 0xc7, 0x53, 0xbc, 0x4d, 0x7d, 0x17, 0xc9, 0x15, 0xc1, 0x47, 0xc9, 0x57, 0x7d, 0x87, 0x92, 0xb6, 0xbe, 0xd4, 0xd0, 0x56, 0x4a, 0x09, 0x1f, 0x52, 0x76, 0x14, 0x7b, 0x18, 0xff, 0xe4, 0x40, 0x29, 0xea, 0x8e, 0x17, 0xa0, 0x19, 0xaf, 0x81, 0x25, 0x97, 0x30, 0x86, 0xbb, 0x51, 0xed, 0xff, 0xa7, 0xdc, 0x97, 0xee, 0x85, 0x66, 0x14, 0xad, 0xc3, 0xcf, 0xc0, 0x92, 0x2f, 0x29, 0xc2, 0x06, 0xaa, 0xac, 0xaf, 0xce, 0xdd, 0x8b, 0x52, 0x49, 0x13, 0xe1, 0xaf, 0xb7, 0x1f, 0x72, 0xe2, 0x31, 0xc7, 0xf7, 0x12, 0xe6, 0x70, 0x23, 0x0c, 0x45, 0x74, 0xd0, 0x06, 0x20, 0x88, 0x7a, 0x86, 0xe9, 0x79, 0x49, 0xde, 0x5c, 0xec, 0x72, 0xc7, 0xbd, 0x96, 0xe4, 0x19, 0x9b, 0x18, 0x4a, 0xd1, 0xc2, 0x43, 0x50, 0xec, 0xe1, 0x16, 0xe9, 0x31, 0xbd, 0x20, 0x03, 0x6c, 0x2e, 0x16, 0x20, 0x3a, 0x0b, 0x73, 0x57, 0x82, 0xb7, 0x3d, 0x4e, 0x07, 0xd6, 0x65, 0x15, 0xeb, 0x42, 0x98, 0x4a, 0xb8, 0x84, 0x14, 0x7f, 0xf5, 0x6d, 0x50, 0x49, 0x39, 0xc3, 0x65, 0x90, 0x3b, 0x22, 0x83, 0xf0, 0x0e, 0x20, 0xf1, 0x13, 0x5e, 0x8e, 0xda, 0x50, 0x96, 0x5c, 0xf5, 0xdd, 0x66, 0xf6, 0x96, 0x66, 0xfc, 0x98, 0x05, 0xcb, 0x2f, 0xe1, 0xe5, 0x38, 0x18, 0x7b, 0x39, 0xce, 0x58, 0x99, 0x67, 0xbd, 0x19, 0xb0, 0x0d, 0x8a, 0x8c, 0x63, 0xde, 0x67, 0xf2, 0x9e, 0x2e, 0xac, 0xdb, 0x53, 0xfc, 0x92, 0x23, 0x25, 0x71, 0xf2, 0x1b, 0x29, 0x6e, 0xe3, 0xef, 0x2c, 0x78, 0x65, 0x12, 0xf2, 0x81, 0xef, 0xb5, 0x1d, 0x79, 0xf3, 0xdf, 0x07, 0x79, 0x3e, 0x08, 0x22, 0x25, 0x5a, 0x8d, 0x76, 0xb9, 0x3f, 0x08, 0xc8, 0xd3, 0x61, 0xfd, 0xea, 0x5c, 0xa0, 0x70, 0x40, 0x12, 0x0a, 0x77, 0xe3, 0x34, 0xc2, 0x9b, 0xb2, 0x31, 0xbe, 0x91, 0xa7, 0xc3, 0xfa, 0x8c, 0x01, 0xc6, 0x8c, 0x99, 0xc6, 0xb7, 0x0b, 0x8f, 0x01, 0xec, 0x61, 0xc6, 0xf7, 0x29, 0xf6, 0x58, 0x18, 0xc9, 0x71, 0x89, 0x2a, 0xd0, 0xeb, 0x8b, 0x1d, 0xaf, 0x40, 0x58, 0x55, 0xb5, 0x0b, 0xb8, 0x3b, 0xc5, 0x86, 0x66, 0x44, 0x80, 0xaf, 0x82, 0x22, 0x25, 0x98, 0xf9, 0x9e, 0xd2, 0xc0, 0xb8, 0x9c, 0x48, 0x5a, 0x91, 0x5a, 0x4d, 0x0b, 0x43, 0xe1, 0xbf, 0x85, 0xc1, 0xf8, 0x45, 0x03, 0x97, 0x5f, 0xc2, 0x34, 0xf0, 0xc5, 0xf8, 0x34, 0x70, 0xf3, 0x7c, 0x5d, 0x35, 0x67, 0x0e, 0x38, 0x00, 0x57, 0x26, 0x3d, 0xc3, 0x9b, 0x03, 0x2d, 0x90, 0xa3, 0xa4, 0x73, 0x96, 0x57, 0xbf, 0xa2, 0x22, 0xe4, 0x10, 0xe9, 0x20, 0x01, 0x36, 0xfe, 0x9c, 0xd1, 0xab, 0xe2, 0x2d, 0x20, 0x4c, 0xbc, 0x9a, 0x37, 0x40, 0xa9, 0xcf, 0x08, 0x4d, 0xbd, 0x9c, 0x71, 0x19, 0xee, 0x2b, 0x3b, 0x8a, 0x3d, 0xe0, 0x55, 0x90, 0xeb, 0x3b, 0x6d, 0xd5, 0x93, 0x71, 0xa8, 0xfb, 0x3b, 0x5b, 0x48, 0xd8, 0xa1, 0x01, 0x8a, 0x5d, 0xea, 0xf7, 0x83, 0xe8, 0xd5, 0x07, 0xe2, 0xac, 0xef, 0x48, 0x0b, 0x52, 0x2b, 0xd0, 0x07, 0x05, 0x22, 0x5e, 0x7b, 0x25, 0xbd, 0x77, 0xcf, 0x57, 0xc9, 0x38, 0x01, 0x53, 0x8e, 0x0e, 0xa1, 0x52, 0xc6, 0xd5, 0x95, 0x36, 0x14, 0xc6, 0xa9, 0x3e, 0x50, 0xe3, 0xc5, 0x3c, 0x81, 0xbc, 0x9d, 0x16, 0x48, 0x21, 0x77, 0x0b, 0x6d, 0x28, 0x99, 0x58, 0xd2, 0x92, 0xfa, 0x43, 0x76, 0xba, 0x3b, 0xe5, 0x2c, 0x77, 0x00, 0x4a, 0x11, 0x3a, 0xee, 0xce, 0x33, 0x25, 0x9e, 0x1c, 0x4b, 0x64, 0x41, 0x31, 0xa3, 0x54, 0x8b, 0xf4, 0xf8, 0xb6, 0x32, 0xab, 0x53, 0x76, 0x7d, 0x1b, 0xf7, 0x26, 0xdb, 0x05, 0xcc, 0x98, 0xdf, 0x7a, 0xa0, 0x4c, 0xa3, 0xf2, 0x2a, 0x91, 0x78, 0xf7, 0x39, 0x4f, 0xc9, 0xba, 0x38, 0x1a, 0xd6, 0xcb, 0xf1, 0x27, 0x4a, 0x02, 0x18, 0x7f, 0x69, 0xd3, 0xdd, 0x1f, 0xca, 0x17, 0x64, 0x00, 0xd8, 0x91, 0xa2, 0x85, 0xf3, 0xe0, 0xb9, 0x77, 0x12, 0x2b, 0x63, 0xf2, 0x38, 0xc5, 0x26, 0x86, 0x52, 0x61, 0x60, 0x37, 0x99, 0x3c, 0xce, 0x34, 0xf9, 0xcf, 0xbe, 0xc1, 0xf3, 0x07, 0x11, 0xe3, 0x67, 0x0d, 0x5c, 0x88, 0x40, 0x2f, 0x40, 0xc1, 0xf6, 0xc6, 0x15, 0xec, 0xac, 0xed, 0x37, 0x53, 0xb9, 0xac, 0xbb, 0x27, 0xa7, 0xb5, 0xcc, 0xe3, 0xd3, 0x5a, 0xe6, 0xc9, 0x69, 0x2d, 0xf3, 0x68, 0x54, 0xd3, 0x4e, 0x46, 0x35, 0xed, 0xf1, 0xa8, 0xa6, 0x3d, 0x19, 0xd5, 0xb4, 0xdf, 0x47, 0x35, 0xed, 0xdb, 0x3f, 0x6a, 0x99, 0xcf, 0xaf, 0x2f, 0xf2, 0xb7, 0xff, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd0, 0x61, 0xc4, 0xab, 0x1d, 0x10, 0x00, 0x00, } func (m *BrokerTemplateInstance) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *BrokerTemplateInstance) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *BrokerTemplateInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *BrokerTemplateInstanceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *BrokerTemplateInstanceList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *BrokerTemplateInstanceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Items) > 0 { for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } } { size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *BrokerTemplateInstanceSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *BrokerTemplateInstanceSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *BrokerTemplateInstanceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.BindingIDs) > 0 { for iNdEx := len(m.BindingIDs) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.BindingIDs[iNdEx]) copy(dAtA[i:], m.BindingIDs[iNdEx]) i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingIDs[iNdEx]))) i-- dAtA[i] = 0x1a } } { size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 { size, err := m.TemplateInstance.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { i -= len(m[iNdEx]) copy(dAtA[i:], m[iNdEx]) i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } func (m *Parameter) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Parameter) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Parameter) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l i-- if m.Required { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x38 i -= len(m.From) copy(dAtA[i:], m.From) i = encodeVarintGenerated(dAtA, i, uint64(len(m.From))) i-- dAtA[i] = 0x32 i -= len(m.Generate) copy(dAtA[i:], m.Generate) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Generate))) i-- dAtA[i] = 0x2a i -= len(m.Value) copy(dAtA[i:], m.Value) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) i-- dAtA[i] = 0x22 i -= len(m.Description) copy(dAtA[i:], m.Description) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) i-- dAtA[i] = 0x1a i -= len(m.DisplayName) copy(dAtA[i:], m.DisplayName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) i-- dAtA[i] = 0x12 i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *Template) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Template) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Template) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.ObjectLabels) > 0 { keysForObjectLabels := make([]string, 0, len(m.ObjectLabels)) for k := range m.ObjectLabels { keysForObjectLabels = append(keysForObjectLabels, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForObjectLabels) for iNdEx := len(keysForObjectLabels) - 1; iNdEx >= 0; iNdEx-- { v := m.ObjectLabels[string(keysForObjectLabels[iNdEx])] baseI := i i -= len(v) copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) i-- dAtA[i] = 0x12 i -= len(keysForObjectLabels[iNdEx]) copy(dAtA[i:], keysForObjectLabels[iNdEx]) i = encodeVarintGenerated(dAtA, i, uint64(len(keysForObjectLabels[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- dAtA[i] = 0x2a } } if len(m.Parameters) > 0 { for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 } } if len(m.Objects) > 0 { for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) i-- dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *TemplateInstance) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *TemplateInstance) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *TemplateInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *TemplateInstanceCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *TemplateInstanceCondition) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *TemplateInstanceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) i-- dAtA[i] = 0x2a i -= len(m.Reason) copy(dAtA[i:], m.Reason) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) i-- dAtA[i] = 0x22 { size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a i -= len(m.Status) copy(dAtA[i:], m.Status) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) i-- dAtA[i] = 0x12 i -= len(m.Type) copy(dAtA[i:], m.Type) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *TemplateInstanceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *TemplateInstanceList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *TemplateInstanceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Items) > 0 {
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/template/v1/legacy.go
vendor/github.com/openshift/api/template/v1/legacy.go
package v1 import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme ) func addLegacyKnownTypes(scheme *runtime.Scheme) error { types := []runtime.Object{ &Template{}, &TemplateList{}, } scheme.AddKnownTypes(legacyGroupVersion, types...) scheme.AddKnownTypeWithName(legacyGroupVersion.WithKind("TemplateConfig"), &Template{}) scheme.AddKnownTypeWithName(legacyGroupVersion.WithKind("ProcessedTemplate"), &Template{}) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/template/v1/codec.go
vendor/github.com/openshift/api/template/v1/codec.go
package v1 import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "github.com/openshift/api/pkg/serialization" ) var _ runtime.NestedObjectDecoder = &Template{} var _ runtime.NestedObjectEncoder = &Template{} // DecodeNestedObjects decodes the object as a runtime.Unknown with JSON content. func (c *Template) DecodeNestedObjects(d runtime.Decoder) error { for i := range c.Objects { if c.Objects[i].Object != nil { continue } c.Objects[i].Object = &runtime.Unknown{ ContentType: "application/json", Raw: c.Objects[i].Raw, } } return nil } func (c *Template) EncodeNestedObjects(e runtime.Encoder) error { for i := range c.Objects { if err := serialization.EncodeNestedRawExtension(unstructured.UnstructuredJSONScheme, &c.Objects[i]); err != nil { return err } } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/network/v1/constants.go
vendor/github.com/openshift/api/network/v1/constants.go
package v1 const ( // Pod annotations AssignMacvlanAnnotation = "pod.network.openshift.io/assign-macvlan" // HostSubnet annotations. (Note: should be "hostsubnet.network.openshift.io/", but the incorrect name is now part of the API.) AssignHostSubnetAnnotation = "pod.network.openshift.io/assign-subnet" FixedVNIDHostAnnotation = "pod.network.openshift.io/fixed-vnid-host" NodeUIDAnnotation = "pod.network.openshift.io/node-uid" // NetNamespace annotations MulticastEnabledAnnotation = "netnamespace.network.openshift.io/multicast-enabled" // ChangePodNetworkAnnotation is an annotation on NetNamespace to request change of pod network ChangePodNetworkAnnotation string = "pod.network.openshift.io/multitenant.change-network" )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go
vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated // Code generated by deepcopy-gen. DO NOT EDIT. package v1 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterNetwork) DeepCopyInto(out *ClusterNetwork) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.ClusterNetworks != nil { in, out := &in.ClusterNetworks, &out.ClusterNetworks *out = make([]ClusterNetworkEntry, len(*in)) copy(*out, *in) } if in.VXLANPort != nil { in, out := &in.VXLANPort, &out.VXLANPort *out = new(uint32) **out = **in } if in.MTU != nil { in, out := &in.MTU, &out.MTU *out = new(uint32) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetwork. func (in *ClusterNetwork) DeepCopy() *ClusterNetwork { if in == nil { return nil } out := new(ClusterNetwork) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ClusterNetwork) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { if in == nil { return nil } out := new(ClusterNetworkEntry) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterNetworkList) DeepCopyInto(out *ClusterNetworkList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterNetwork, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkList. func (in *ClusterNetworkList) DeepCopy() *ClusterNetworkList { if in == nil { return nil } out := new(ClusterNetworkList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ClusterNetworkList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EgressNetworkPolicy) DeepCopyInto(out *EgressNetworkPolicy) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicy. func (in *EgressNetworkPolicy) DeepCopy() *EgressNetworkPolicy { if in == nil { return nil } out := new(EgressNetworkPolicy) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *EgressNetworkPolicy) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EgressNetworkPolicyList) DeepCopyInto(out *EgressNetworkPolicyList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]EgressNetworkPolicy, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyList. func (in *EgressNetworkPolicyList) DeepCopy() *EgressNetworkPolicyList { if in == nil { return nil } out := new(EgressNetworkPolicyList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *EgressNetworkPolicyList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EgressNetworkPolicyPeer) DeepCopyInto(out *EgressNetworkPolicyPeer) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyPeer. func (in *EgressNetworkPolicyPeer) DeepCopy() *EgressNetworkPolicyPeer { if in == nil { return nil } out := new(EgressNetworkPolicyPeer) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EgressNetworkPolicyRule) DeepCopyInto(out *EgressNetworkPolicyRule) { *out = *in out.To = in.To return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyRule. func (in *EgressNetworkPolicyRule) DeepCopy() *EgressNetworkPolicyRule { if in == nil { return nil } out := new(EgressNetworkPolicyRule) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EgressNetworkPolicySpec) DeepCopyInto(out *EgressNetworkPolicySpec) { *out = *in if in.Egress != nil { in, out := &in.Egress, &out.Egress *out = make([]EgressNetworkPolicyRule, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicySpec. func (in *EgressNetworkPolicySpec) DeepCopy() *EgressNetworkPolicySpec { if in == nil { return nil } out := new(EgressNetworkPolicySpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HostSubnet) DeepCopyInto(out *HostSubnet) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.EgressIPs != nil { in, out := &in.EgressIPs, &out.EgressIPs *out = make([]HostSubnetEgressIP, len(*in)) copy(*out, *in) } if in.EgressCIDRs != nil { in, out := &in.EgressCIDRs, &out.EgressCIDRs *out = make([]HostSubnetEgressCIDR, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSubnet. func (in *HostSubnet) DeepCopy() *HostSubnet { if in == nil { return nil } out := new(HostSubnet) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *HostSubnet) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HostSubnetList) DeepCopyInto(out *HostSubnetList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HostSubnet, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSubnetList. func (in *HostSubnetList) DeepCopy() *HostSubnetList { if in == nil { return nil } out := new(HostSubnetList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *HostSubnetList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetNamespace) DeepCopyInto(out *NetNamespace) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.EgressIPs != nil { in, out := &in.EgressIPs, &out.EgressIPs *out = make([]NetNamespaceEgressIP, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetNamespace. func (in *NetNamespace) DeepCopy() *NetNamespace { if in == nil { return nil } out := new(NetNamespace) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *NetNamespace) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetNamespaceList) DeepCopyInto(out *NetNamespaceList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NetNamespace, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetNamespaceList. func (in *NetNamespaceList) DeepCopy() *NetNamespaceList { if in == nil { return nil } out := new(NetNamespaceList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *NetNamespaceList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/network/v1/types.go
vendor/github.com/openshift/api/network/v1/types.go
package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( ClusterNetworkDefault = "default" ) // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterNetwork describes the cluster network. There is normally only one object of this type, // named "default", which is created by the SDN network plugin based on the master configuration // when the cluster is brought up for the first time. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +kubebuilder:resource:scope="Cluster" // +kubebuilder:printcolumn:name="Cluster Network",type=string,JSONPath=`.network`,description="The primary cluster network CIDR" // +kubebuilder:printcolumn:name="Service Network",type=string,JSONPath=`.serviceNetwork`,description="The service network CIDR" // +kubebuilder:printcolumn:name="Plugin Name",type=string,JSONPath=`.pluginName`,description="The Openshift SDN network plug-in in use" // +openshift:compatibility-gen:level=1 type ClusterNetwork struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Network is a CIDR string specifying the global overlay network's L3 space // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` Network string `json:"network,omitempty" protobuf:"bytes,2,opt,name=network"` // HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods // +kubebuilder:validation:Minimum=2 // +kubebuilder:validation:Maximum=30 HostSubnetLength uint32 `json:"hostsubnetlength,omitempty" protobuf:"varint,3,opt,name=hostsubnetlength"` // ServiceNetwork is the CIDR range that Service IP addresses are allocated from // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` ServiceNetwork string `json:"serviceNetwork" protobuf:"bytes,4,opt,name=serviceNetwork"` // PluginName is the name of the network plugin being used PluginName string `json:"pluginName,omitempty" protobuf:"bytes,5,opt,name=pluginName"` // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks" protobuf:"bytes,6,rep,name=clusterNetworks"` // VXLANPort sets the VXLAN destination port used by the cluster. // It is set by the master configuration file on startup and cannot be edited manually. // Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. // Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 // +kubebuilder:validation:Optional // +optional VXLANPort *uint32 `json:"vxlanPort,omitempty" protobuf:"varint,7,opt,name=vxlanPort"` // MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. // +kubebuilder:validation:Minimum=576 // +kubebuilder:validation:Maximum=65536 // +kubebuilder:validation:Optional // +optional MTU *uint32 `json:"mtu,omitempty" protobuf:"varint,8,opt,name=mtu"` } // ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. type ClusterNetworkEntry struct { // CIDR defines the total range of a cluster networks address space. // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` CIDR string `json:"CIDR" protobuf:"bytes,1,opt,name=cidr"` // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. // +kubebuilder:validation:Minimum=2 // +kubebuilder:validation:Maximum=30 HostSubnetLength uint32 `json:"hostSubnetLength" protobuf:"varint,2,opt,name=hostSubnetLength"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterNetworkList is a collection of ClusterNetworks // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type ClusterNetworkList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of cluster networks Items []ClusterNetwork `json:"items" protobuf:"bytes,2,rep,name=items"` } // HostSubnetEgressIP represents one egress IP address currently hosted on the node represented by // HostSubnet // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` type HostSubnetEgressIP string // HostSubnetEgressCIDR represents one egress CIDR from which to assign IP addresses for this node // represented by the HostSubnet // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` type HostSubnetEgressCIDR string // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // HostSubnet describes the container subnet network on a node. The HostSubnet object must have the // same name as the Node object it corresponds to. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +kubebuilder:printcolumn:name="Host",type=string,JSONPath=`.host`,description="The name of the node" // +kubebuilder:printcolumn:name="Host IP",type=string,JSONPath=`.hostIP`,description="The IP address to be used as a VTEP by other nodes in the overlay network" // +kubebuilder:printcolumn:name="Subnet",type=string,JSONPath=`.subnet`,description="The CIDR range of the overlay network assigned to the node for its pods" // +kubebuilder:printcolumn:name="Egress CIDRs",type=string,JSONPath=`.egressCIDRs`,description="The network egress CIDRs" // +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=`.egressIPs`,description="The network egress IP addresses" // +openshift:compatibility-gen:level=1 type HostSubnet struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Host is the name of the node. (This is the same as the object's name, but both fields must be set.) // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` Host string `json:"host" protobuf:"bytes,2,opt,name=host"` // HostIP is the IP address to be used as a VTEP by other nodes in the overlay network // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` HostIP string `json:"hostIP" protobuf:"bytes,3,opt,name=hostIP"` // Subnet is the CIDR range of the overlay network assigned to the node for its pods // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` Subnet string `json:"subnet" protobuf:"bytes,4,opt,name=subnet"` // EgressIPs is the list of automatic egress IP addresses currently hosted by this node. // If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the // master will overwrite the value here with its own allocation of egress IPs. // +optional EgressIPs []HostSubnetEgressIP `json:"egressIPs,omitempty" protobuf:"bytes,5,rep,name=egressIPs"` // EgressCIDRs is the list of CIDR ranges available for automatically assigning // egress IPs to this node from. If this field is set then EgressIPs should be // treated as read-only. // +optional EgressCIDRs []HostSubnetEgressCIDR `json:"egressCIDRs,omitempty" protobuf:"bytes,6,rep,name=egressCIDRs"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // HostSubnetList is a collection of HostSubnets // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type HostSubnetList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of host subnets Items []HostSubnet `json:"items" protobuf:"bytes,2,rep,name=items"` } // NetNamespaceEgressIP is a single egress IP out of a list of reserved IPs used as source of external traffic coming // from pods in this namespace // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` type NetNamespaceEgressIP string // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // NetNamespace describes a single isolated network. When using the redhat/openshift-ovs-multitenant // plugin, every Namespace will have a corresponding NetNamespace object with the same name. // (When using redhat/openshift-ovs-subnet, NetNamespaces are not used.) // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +kubebuilder:printcolumn:name="NetID",type=integer,JSONPath=`.netid`,description="The network identifier of the network namespace" // +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=`.egressIPs`,description="The network egress IP addresses" // +openshift:compatibility-gen:level=1 type NetNamespace struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` NetName string `json:"netname" protobuf:"bytes,2,opt,name=netname"` // NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=16777215 NetID uint32 `json:"netid" protobuf:"varint,3,opt,name=netid"` // EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. // (If empty, external traffic will be masqueraded to Node IPs.) // +optional EgressIPs []NetNamespaceEgressIP `json:"egressIPs,omitempty" protobuf:"bytes,4,rep,name=egressIPs"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // NetNamespaceList is a collection of NetNamespaces // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type NetNamespaceList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of net namespaces Items []NetNamespace `json:"items" protobuf:"bytes,2,rep,name=items"` } // EgressNetworkPolicyRuleType indicates whether an EgressNetworkPolicyRule allows or denies traffic // +kubebuilder:validation:Pattern=`^Allow|Deny$` type EgressNetworkPolicyRuleType string const ( EgressNetworkPolicyRuleAllow EgressNetworkPolicyRuleType = "Allow" EgressNetworkPolicyRuleDeny EgressNetworkPolicyRuleType = "Deny" ) // EgressNetworkPolicyPeer specifies a target to apply egress network policy to type EgressNetworkPolicyPeer struct { // CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset // Ideally we would have liked to use the cidr openapi format for this property. // But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs // We are therefore using a regex pattern to validate instead. // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` CIDRSelector string `json:"cidrSelector,omitempty" protobuf:"bytes,1,rep,name=cidrSelector"` // DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset // +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` DNSName string `json:"dnsName,omitempty" protobuf:"bytes,2,rep,name=dnsName"` } // EgressNetworkPolicyRule contains a single egress network policy rule type EgressNetworkPolicyRule struct { // type marks this as an "Allow" or "Deny" rule Type EgressNetworkPolicyRuleType `json:"type" protobuf:"bytes,1,rep,name=type"` // to is the target that traffic is allowed/denied to To EgressNetworkPolicyPeer `json:"to" protobuf:"bytes,2,rep,name=to"` } // EgressNetworkPolicySpec provides a list of policies on outgoing network traffic type EgressNetworkPolicySpec struct { // egress contains the list of egress policy rules Egress []EgressNetworkPolicyRule `json:"egress" protobuf:"bytes,1,rep,name=egress"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // EgressNetworkPolicy describes the current egress network policy for a Namespace. When using // the 'redhat/openshift-ovs-multitenant' network plugin, traffic from a pod to an IP address // outside the cluster will be checked against each EgressNetworkPolicyRule in the pod's // namespace's EgressNetworkPolicy, in order. If no rule matches (or no EgressNetworkPolicy // is present) then the traffic will be allowed by default. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type EgressNetworkPolicy struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is the specification of the current egress network policy Spec EgressNetworkPolicySpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // EgressNetworkPolicyList is a collection of EgressNetworkPolicy // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type EgressNetworkPolicyList struct { metav1.TypeMeta `json:",inline"` // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // items is the list of policies Items []EgressNetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/network/v1/register.go
vendor/github.com/openshift/api/network/v1/register.go
package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( GroupName = "network.openshift.io" GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) // Install is a function which adds this version to a scheme Install = schemeBuilder.AddToScheme // SchemeGroupVersion generated code relies on this name // Deprecated SchemeGroupVersion = GroupVersion // AddToScheme exists solely to keep the old generators creating valid code // DEPRECATED AddToScheme = schemeBuilder.AddToScheme ) // Resource generated code relies on this being here, but it logically belongs to the group // DEPRECATED func Resource(resource string) schema.GroupResource { return schema.GroupResource{Group: GroupName, Resource: resource} } // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(GroupVersion, &ClusterNetwork{}, &ClusterNetworkList{}, &HostSubnet{}, &HostSubnetList{}, &NetNamespace{}, &NetNamespaceList{}, &EgressNetworkPolicy{}, &EgressNetworkPolicyList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go
vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go
package v1 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more // information on the implementation: https://github.com/emicklei/go-restful/pull/215 // // TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE var map_ClusterNetwork = map[string]string{ "": "ClusterNetwork describes the cluster network. There is normally only one object of this type, named \"default\", which is created by the SDN network plugin based on the master configuration when the cluster is brought up for the first time.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "network": "Network is a CIDR string specifying the global overlay network's L3 space", "hostsubnetlength": "HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods", "serviceNetwork": "ServiceNetwork is the CIDR range that Service IP addresses are allocated from", "pluginName": "PluginName is the name of the network plugin being used", "clusterNetworks": "ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from.", "vxlanPort": "VXLANPort sets the VXLAN destination port used by the cluster. It is set by the master configuration file on startup and cannot be edited manually. Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port.", "mtu": "MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator.", } func (ClusterNetwork) SwaggerDoc() map[string]string { return map_ClusterNetwork } var map_ClusterNetworkEntry = map[string]string{ "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.", "CIDR": "CIDR defines the total range of a cluster networks address space.", "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods.", } func (ClusterNetworkEntry) SwaggerDoc() map[string]string { return map_ClusterNetworkEntry } var map_ClusterNetworkList = map[string]string{ "": "ClusterNetworkList is a collection of ClusterNetworks\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of cluster networks", } func (ClusterNetworkList) SwaggerDoc() map[string]string { return map_ClusterNetworkList } var map_EgressNetworkPolicy = map[string]string{ "": "EgressNetworkPolicy describes the current egress network policy for a Namespace. When using the 'redhat/openshift-ovs-multitenant' network plugin, traffic from a pod to an IP address outside the cluster will be checked against each EgressNetworkPolicyRule in the pod's namespace's EgressNetworkPolicy, in order. If no rule matches (or no EgressNetworkPolicy is present) then the traffic will be allowed by default.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec is the specification of the current egress network policy", } func (EgressNetworkPolicy) SwaggerDoc() map[string]string { return map_EgressNetworkPolicy } var map_EgressNetworkPolicyList = map[string]string{ "": "EgressNetworkPolicyList is a collection of EgressNetworkPolicy\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of policies", } func (EgressNetworkPolicyList) SwaggerDoc() map[string]string { return map_EgressNetworkPolicyList } var map_EgressNetworkPolicyPeer = map[string]string{ "": "EgressNetworkPolicyPeer specifies a target to apply egress network policy to", "cidrSelector": "CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset Ideally we would have liked to use the cidr openapi format for this property. But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs We are therefore using a regex pattern to validate instead.", "dnsName": "DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset", } func (EgressNetworkPolicyPeer) SwaggerDoc() map[string]string { return map_EgressNetworkPolicyPeer } var map_EgressNetworkPolicyRule = map[string]string{ "": "EgressNetworkPolicyRule contains a single egress network policy rule", "type": "type marks this as an \"Allow\" or \"Deny\" rule", "to": "to is the target that traffic is allowed/denied to", } func (EgressNetworkPolicyRule) SwaggerDoc() map[string]string { return map_EgressNetworkPolicyRule } var map_EgressNetworkPolicySpec = map[string]string{ "": "EgressNetworkPolicySpec provides a list of policies on outgoing network traffic", "egress": "egress contains the list of egress policy rules", } func (EgressNetworkPolicySpec) SwaggerDoc() map[string]string { return map_EgressNetworkPolicySpec } var map_HostSubnet = map[string]string{ "": "HostSubnet describes the container subnet network on a node. The HostSubnet object must have the same name as the Node object it corresponds to.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "host": "Host is the name of the node. (This is the same as the object's name, but both fields must be set.)", "hostIP": "HostIP is the IP address to be used as a VTEP by other nodes in the overlay network", "subnet": "Subnet is the CIDR range of the overlay network assigned to the node for its pods", "egressIPs": "EgressIPs is the list of automatic egress IP addresses currently hosted by this node. If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the master will overwrite the value here with its own allocation of egress IPs.", "egressCIDRs": "EgressCIDRs is the list of CIDR ranges available for automatically assigning egress IPs to this node from. If this field is set then EgressIPs should be treated as read-only.", } func (HostSubnet) SwaggerDoc() map[string]string { return map_HostSubnet } var map_HostSubnetList = map[string]string{ "": "HostSubnetList is a collection of HostSubnets\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of host subnets", } func (HostSubnetList) SwaggerDoc() map[string]string { return map_HostSubnetList } var map_NetNamespace = map[string]string{ "": "NetNamespace describes a single isolated network. When using the redhat/openshift-ovs-multitenant plugin, every Namespace will have a corresponding NetNamespace object with the same name. (When using redhat/openshift-ovs-subnet, NetNamespaces are not used.)\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "netname": "NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.)", "netid": "NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the \"oc adm pod-network\" commands.", "egressIPs": "EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. (If empty, external traffic will be masqueraded to Node IPs.)", } func (NetNamespace) SwaggerDoc() map[string]string { return map_NetNamespace } var map_NetNamespaceList = map[string]string{ "": "NetNamespaceList is a collection of NetNamespaces\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of net namespaces", } func (NetNamespaceList) SwaggerDoc() map[string]string { return map_NetNamespaceList } // AUTO-GENERATED FUNCTIONS END HERE
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/network/v1/doc.go
vendor/github.com/openshift/api/network/v1/doc.go
// +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=github.com/openshift/origin/pkg/network/apis/network // +k8s:defaulter-gen=TypeMeta // +k8s:openapi-gen=true // +groupName=network.openshift.io // Package v1 is the v1 version of the API. package v1
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/network/v1/generated.pb.go
vendor/github.com/openshift/api/network/v1/generated.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/openshift/api/network/v1/generated.proto package v1 import ( fmt "fmt" io "io" proto "github.com/gogo/protobuf/proto" math "math" math_bits "math/bits" reflect "reflect" strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ClusterNetwork) Reset() { *m = ClusterNetwork{} } func (*ClusterNetwork) ProtoMessage() {} func (*ClusterNetwork) Descriptor() ([]byte, []int) { return fileDescriptor_38d1cb27735fa5d9, []int{0} } func (m *ClusterNetwork) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ClusterNetwork) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *ClusterNetwork) XXX_Merge(src proto.Message) { xxx_messageInfo_ClusterNetwork.Merge(m, src) } func (m *ClusterNetwork) XXX_Size() int { return m.Size() } func (m *ClusterNetwork) XXX_DiscardUnknown() { xxx_messageInfo_ClusterNetwork.DiscardUnknown(m) } var xxx_messageInfo_ClusterNetwork proto.InternalMessageInfo func (m *ClusterNetworkEntry) Reset() { *m = ClusterNetworkEntry{} } func (*ClusterNetworkEntry) ProtoMessage() {} func (*ClusterNetworkEntry) Descriptor() ([]byte, []int) { return fileDescriptor_38d1cb27735fa5d9, []int{1} } func (m *ClusterNetworkEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ClusterNetworkEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *ClusterNetworkEntry) XXX_Merge(src proto.Message) { xxx_messageInfo_ClusterNetworkEntry.Merge(m, src) } func (m *ClusterNetworkEntry) XXX_Size() int { return m.Size() } func (m *ClusterNetworkEntry) XXX_DiscardUnknown() { xxx_messageInfo_ClusterNetworkEntry.DiscardUnknown(m) } var xxx_messageInfo_ClusterNetworkEntry proto.InternalMessageInfo func (m *ClusterNetworkList) Reset() { *m = ClusterNetworkList{} } func (*ClusterNetworkList) ProtoMessage() {} func (*ClusterNetworkList) Descriptor() ([]byte, []int) { return fileDescriptor_38d1cb27735fa5d9, []int{2} } func (m *ClusterNetworkList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ClusterNetworkList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *ClusterNetworkList) XXX_Merge(src proto.Message) { xxx_messageInfo_ClusterNetworkList.Merge(m, src) } func (m *ClusterNetworkList) XXX_Size() int { return m.Size() } func (m *ClusterNetworkList) XXX_DiscardUnknown() { xxx_messageInfo_ClusterNetworkList.DiscardUnknown(m) } var xxx_messageInfo_ClusterNetworkList proto.InternalMessageInfo func (m *EgressNetworkPolicy) Reset() { *m = EgressNetworkPolicy{} } func (*EgressNetworkPolicy) ProtoMessage() {} func (*EgressNetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptor_38d1cb27735fa5d9, []int{3} } func (m *EgressNetworkPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *EgressNetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *EgressNetworkPolicy) XXX_Merge(src proto.Message) { xxx_messageInfo_EgressNetworkPolicy.Merge(m, src) } func (m *EgressNetworkPolicy) XXX_Size() int { return m.Size() } func (m *EgressNetworkPolicy) XXX_DiscardUnknown() { xxx_messageInfo_EgressNetworkPolicy.DiscardUnknown(m) } var xxx_messageInfo_EgressNetworkPolicy proto.InternalMessageInfo func (m *EgressNetworkPolicyList) Reset() { *m = EgressNetworkPolicyList{} } func (*EgressNetworkPolicyList) ProtoMessage() {} func (*EgressNetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptor_38d1cb27735fa5d9, []int{4} } func (m *EgressNetworkPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *EgressNetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *EgressNetworkPolicyList) XXX_Merge(src proto.Message) { xxx_messageInfo_EgressNetworkPolicyList.Merge(m, src) } func (m *EgressNetworkPolicyList) XXX_Size() int { return m.Size() } func (m *EgressNetworkPolicyList) XXX_DiscardUnknown() { xxx_messageInfo_EgressNetworkPolicyList.DiscardUnknown(m) } var xxx_messageInfo_EgressNetworkPolicyList proto.InternalMessageInfo func (m *EgressNetworkPolicyPeer) Reset() { *m = EgressNetworkPolicyPeer{} } func (*EgressNetworkPolicyPeer) ProtoMessage() {} func (*EgressNetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptor_38d1cb27735fa5d9, []int{5} } func (m *EgressNetworkPolicyPeer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *EgressNetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *EgressNetworkPolicyPeer) XXX_Merge(src proto.Message) { xxx_messageInfo_EgressNetworkPolicyPeer.Merge(m, src) } func (m *EgressNetworkPolicyPeer) XXX_Size() int { return m.Size() } func (m *EgressNetworkPolicyPeer) XXX_DiscardUnknown() { xxx_messageInfo_EgressNetworkPolicyPeer.DiscardUnknown(m) } var xxx_messageInfo_EgressNetworkPolicyPeer proto.InternalMessageInfo func (m *EgressNetworkPolicyRule) Reset() { *m = EgressNetworkPolicyRule{} } func (*EgressNetworkPolicyRule) ProtoMessage() {} func (*EgressNetworkPolicyRule) Descriptor() ([]byte, []int) { return fileDescriptor_38d1cb27735fa5d9, []int{6} } func (m *EgressNetworkPolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *EgressNetworkPolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *EgressNetworkPolicyRule) XXX_Merge(src proto.Message) { xxx_messageInfo_EgressNetworkPolicyRule.Merge(m, src) } func (m *EgressNetworkPolicyRule) XXX_Size() int { return m.Size() } func (m *EgressNetworkPolicyRule) XXX_DiscardUnknown() { xxx_messageInfo_EgressNetworkPolicyRule.DiscardUnknown(m) } var xxx_messageInfo_EgressNetworkPolicyRule proto.InternalMessageInfo func (m *EgressNetworkPolicySpec) Reset() { *m = EgressNetworkPolicySpec{} } func (*EgressNetworkPolicySpec) ProtoMessage() {} func (*EgressNetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptor_38d1cb27735fa5d9, []int{7} } func (m *EgressNetworkPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *EgressNetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *EgressNetworkPolicySpec) XXX_Merge(src proto.Message) { xxx_messageInfo_EgressNetworkPolicySpec.Merge(m, src) } func (m *EgressNetworkPolicySpec) XXX_Size() int { return m.Size() } func (m *EgressNetworkPolicySpec) XXX_DiscardUnknown() { xxx_messageInfo_EgressNetworkPolicySpec.DiscardUnknown(m) } var xxx_messageInfo_EgressNetworkPolicySpec proto.InternalMessageInfo func (m *HostSubnet) Reset() { *m = HostSubnet{} } func (*HostSubnet) ProtoMessage() {} func (*HostSubnet) Descriptor() ([]byte, []int) { return fileDescriptor_38d1cb27735fa5d9, []int{8} } func (m *HostSubnet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *HostSubnet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *HostSubnet) XXX_Merge(src proto.Message) { xxx_messageInfo_HostSubnet.Merge(m, src) } func (m *HostSubnet) XXX_Size() int { return m.Size() } func (m *HostSubnet) XXX_DiscardUnknown() { xxx_messageInfo_HostSubnet.DiscardUnknown(m) } var xxx_messageInfo_HostSubnet proto.InternalMessageInfo func (m *HostSubnetList) Reset() { *m = HostSubnetList{} } func (*HostSubnetList) ProtoMessage() {} func (*HostSubnetList) Descriptor() ([]byte, []int) { return fileDescriptor_38d1cb27735fa5d9, []int{9} } func (m *HostSubnetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *HostSubnetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *HostSubnetList) XXX_Merge(src proto.Message) { xxx_messageInfo_HostSubnetList.Merge(m, src) } func (m *HostSubnetList) XXX_Size() int { return m.Size() } func (m *HostSubnetList) XXX_DiscardUnknown() { xxx_messageInfo_HostSubnetList.DiscardUnknown(m) } var xxx_messageInfo_HostSubnetList proto.InternalMessageInfo func (m *NetNamespace) Reset() { *m = NetNamespace{} } func (*NetNamespace) ProtoMessage() {} func (*NetNamespace) Descriptor() ([]byte, []int) { return fileDescriptor_38d1cb27735fa5d9, []int{10} } func (m *NetNamespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *NetNamespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *NetNamespace) XXX_Merge(src proto.Message) { xxx_messageInfo_NetNamespace.Merge(m, src) } func (m *NetNamespace) XXX_Size() int { return m.Size() } func (m *NetNamespace) XXX_DiscardUnknown() { xxx_messageInfo_NetNamespace.DiscardUnknown(m) } var xxx_messageInfo_NetNamespace proto.InternalMessageInfo func (m *NetNamespaceList) Reset() { *m = NetNamespaceList{} } func (*NetNamespaceList) ProtoMessage() {} func (*NetNamespaceList) Descriptor() ([]byte, []int) { return fileDescriptor_38d1cb27735fa5d9, []int{11} } func (m *NetNamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *NetNamespaceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *NetNamespaceList) XXX_Merge(src proto.Message) { xxx_messageInfo_NetNamespaceList.Merge(m, src) } func (m *NetNamespaceList) XXX_Size() int { return m.Size() } func (m *NetNamespaceList) XXX_DiscardUnknown() { xxx_messageInfo_NetNamespaceList.DiscardUnknown(m) } var xxx_messageInfo_NetNamespaceList proto.InternalMessageInfo func init() { proto.RegisterType((*ClusterNetwork)(nil), "github.com.openshift.api.network.v1.ClusterNetwork") proto.RegisterType((*ClusterNetworkEntry)(nil), "github.com.openshift.api.network.v1.ClusterNetworkEntry") proto.RegisterType((*ClusterNetworkList)(nil), "github.com.openshift.api.network.v1.ClusterNetworkList") proto.RegisterType((*EgressNetworkPolicy)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicy") proto.RegisterType((*EgressNetworkPolicyList)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyList") proto.RegisterType((*EgressNetworkPolicyPeer)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyPeer") proto.RegisterType((*EgressNetworkPolicyRule)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyRule") proto.RegisterType((*EgressNetworkPolicySpec)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicySpec") proto.RegisterType((*HostSubnet)(nil), "github.com.openshift.api.network.v1.HostSubnet") proto.RegisterType((*HostSubnetList)(nil), "github.com.openshift.api.network.v1.HostSubnetList") proto.RegisterType((*NetNamespace)(nil), "github.com.openshift.api.network.v1.NetNamespace") proto.RegisterType((*NetNamespaceList)(nil), "github.com.openshift.api.network.v1.NetNamespaceList") } func init() { proto.RegisterFile("github.com/openshift/api/network/v1/generated.proto", fileDescriptor_38d1cb27735fa5d9) } var fileDescriptor_38d1cb27735fa5d9 = []byte{ // 996 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44, 0x14, 0xaf, 0xf3, 0xa7, 0x6d, 0x26, 0x6d, 0x5a, 0xcd, 0x56, 0xac, 0x29, 0x92, 0x13, 0xb9, 0x02, 0x82, 0x56, 0xd8, 0xb4, 0x8b, 0x50, 0x0f, 0x08, 0xb4, 0x6e, 0x2b, 0x6d, 0xa4, 0x6e, 0x88, 0x26, 0x65, 0x55, 0x21, 0x40, 0xb8, 0xce, 0xac, 0x63, 0x9a, 0xd8, 0x96, 0x67, 0x12, 0x88, 0x10, 0x7f, 0x2e, 0xdc, 0xf9, 0x00, 0x7c, 0x0c, 0x3e, 0x02, 0x87, 0x1e, 0x38, 0xec, 0x09, 0xf6, 0x14, 0x51, 0x73, 0xe7, 0x03, 0xf4, 0x84, 0x66, 0x3c, 0x8e, 0xed, 0xac, 0x2b, 0xa2, 0x22, 0x72, 0x4a, 0xe6, 0xfd, 0xde, 0xdf, 0xf9, 0xbd, 0xf7, 0xc6, 0xe0, 0xa1, 0xed, 0xd0, 0xfe, 0xe8, 0x42, 0xb3, 0xbc, 0xa1, 0xee, 0xf9, 0xd8, 0x25, 0x7d, 0xe7, 0x19, 0xd5, 0x4d, 0xdf, 0xd1, 0x5d, 0x4c, 0xbf, 0xf2, 0x82, 0x4b, 0x7d, 0xbc, 0xaf, 0xdb, 0xd8, 0xc5, 0x81, 0x49, 0x71, 0x4f, 0xf3, 0x03, 0x8f, 0x7a, 0x70, 0x2f, 0x31, 0xd2, 0x66, 0x46, 0x9a, 0xe9, 0x3b, 0x9a, 0x30, 0xd2, 0xc6, 0xfb, 0xbb, 0x6f, 0xa7, 0x3c, 0xdb, 0x9e, 0xed, 0xe9, 0xdc, 0xf6, 0x62, 0xf4, 0x8c, 0x9f, 0xf8, 0x81, 0xff, 0x8b, 0x7c, 0xee, 0xbe, 0x7b, 0x79, 0x48, 0x34, 0xc7, 0x63, 0xa1, 0x87, 0xa6, 0xd5, 0x77, 0x5c, 0x1c, 0x4c, 0x74, 0xff, 0xd2, 0x66, 0x02, 0xa2, 0x0f, 0x31, 0x35, 0x73, 0x32, 0xd9, 0x7d, 0xef, 0x36, 0xab, 0x60, 0xe4, 0x52, 0x67, 0x88, 0x75, 0x62, 0xf5, 0xf1, 0xd0, 0x9c, 0xb7, 0x53, 0x7f, 0x2e, 0x81, 0xda, 0xd1, 0x60, 0x44, 0x28, 0x0e, 0xda, 0x51, 0xca, 0xf0, 0x0b, 0xb0, 0xce, 0xa2, 0xf4, 0x4c, 0x6a, 0xca, 0x52, 0x43, 0x6a, 0x56, 0x0f, 0xde, 0xd1, 0x22, 0xef, 0x5a, 0xda, 0xbb, 0xe6, 0x5f, 0xda, 0x4c, 0x40, 0x34, 0xa6, 0xad, 0x8d, 0xf7, 0xb5, 0x8f, 0x2e, 0xbe, 0xc4, 0x16, 0x7d, 0x82, 0xa9, 0x69, 0xc0, 0xab, 0x69, 0x7d, 0x25, 0x9c, 0xd6, 0x41, 0x22, 0x43, 0x33, 0xaf, 0xf0, 0x2d, 0xb0, 0x26, 0xee, 0x47, 0x2e, 0x34, 0xa4, 0x66, 0xc5, 0xd8, 0x12, 0xea, 0x6b, 0x22, 0x07, 0x14, 0xe3, 0xf0, 0x18, 0x6c, 0xf7, 0x3d, 0x42, 0xc9, 0xe8, 0xc2, 0xc5, 0x74, 0x80, 0x5d, 0x9b, 0xf6, 0xe5, 0x62, 0x43, 0x6a, 0x6e, 0x1a, 0xb2, 0xb0, 0xd9, 0x7e, 0xec, 0x11, 0xda, 0xe5, 0xf8, 0x29, 0xc7, 0xd1, 0x4b, 0x16, 0xf0, 0x03, 0x50, 0x23, 0x38, 0x18, 0x3b, 0x16, 0x16, 0x01, 0xe4, 0x12, 0x8f, 0xfb, 0x8a, 0xf0, 0x51, 0xeb, 0x66, 0x50, 0x34, 0xa7, 0x0d, 0x0f, 0x00, 0xf0, 0x07, 0x23, 0xdb, 0x71, 0xdb, 0xe6, 0x10, 0xcb, 0x65, 0x6e, 0x3b, 0x2b, 0xb1, 0x33, 0x43, 0x50, 0x4a, 0x0b, 0x7e, 0x03, 0xb6, 0xac, 0xcc, 0xc5, 0x12, 0x79, 0xb5, 0x51, 0x6c, 0x56, 0x0f, 0x0e, 0xb5, 0x05, 0xba, 0x46, 0xcb, 0x92, 0x72, 0xe2, 0xd2, 0x60, 0x62, 0xdc, 0x17, 0x21, 0xb7, 0xb2, 0x20, 0x41, 0xf3, 0x91, 0xe0, 0x03, 0x50, 0x19, 0x7f, 0x3d, 0x30, 0xdd, 0x8e, 0x17, 0x50, 0x79, 0x8d, 0xdf, 0xd7, 0x66, 0x38, 0xad, 0x57, 0x9e, 0x9e, 0x9f, 0x3e, 0x6a, 0x33, 0x21, 0x4a, 0x70, 0xf8, 0x2a, 0x28, 0x0e, 0xe9, 0x48, 0x5e, 0xe7, 0x6a, 0x6b, 0xe1, 0xb4, 0x5e, 0x7c, 0x72, 0xf6, 0x31, 0x62, 0x32, 0xf5, 0x5b, 0x70, 0x2f, 0x27, 0x11, 0xd8, 0x00, 0x25, 0xcb, 0xe9, 0x05, 0xbc, 0x3d, 0x2a, 0xc6, 0x86, 0x48, 0xab, 0x74, 0xd4, 0x3a, 0x46, 0x88, 0x23, 0x31, 0x6f, 0x69, 0x5e, 0x38, 0xd7, 0xff, 0xca, 0x5b, 0x5a, 0xa2, 0xfe, 0x26, 0x01, 0x98, 0x8d, 0x7f, 0xea, 0x10, 0x0a, 0x3f, 0x7d, 0xa9, 0x43, 0xb5, 0xc5, 0x3a, 0x94, 0x59, 0xf3, 0xfe, 0xdc, 0x16, 0x49, 0xac, 0xc7, 0x92, 0x54, 0x77, 0x9e, 0x83, 0xb2, 0x43, 0xf1, 0x90, 0xc8, 0x05, 0x4e, 0xd7, 0xc3, 0x3b, 0xd0, 0x65, 0x6c, 0x0a, 0xff, 0xe5, 0x16, 0xf3, 0x84, 0x22, 0x87, 0xea, 0x1f, 0x12, 0xb8, 0x77, 0x62, 0x07, 0x98, 0x10, 0xa1, 0xd7, 0xf1, 0x06, 0x8e, 0x35, 0x59, 0xc2, 0xc4, 0x7d, 0x0e, 0x4a, 0xc4, 0xc7, 0x16, 0xa7, 0xa0, 0x7a, 0xf0, 0xfe, 0x42, 0x25, 0xe5, 0x64, 0xda, 0xf5, 0xb1, 0x95, 0xd0, 0xcd, 0x4e, 0x88, 0xfb, 0x55, 0x7f, 0x97, 0xc0, 0xfd, 0x1c, 0xfd, 0x25, 0xb0, 0xf5, 0x59, 0x96, 0xad, 0xc3, 0xbb, 0x96, 0x76, 0x0b, 0x65, 0xdf, 0xe5, 0xd6, 0xd5, 0xc1, 0x38, 0x80, 0x87, 0x60, 0x83, 0xb5, 0x7a, 0x17, 0x0f, 0xb0, 0x45, 0xbd, 0x78, 0x18, 0x76, 0x84, 0x9b, 0x0d, 0x36, 0x0c, 0x31, 0x86, 0x32, 0x9a, 0x6c, 0xff, 0xf5, 0x5c, 0xc2, 0x77, 0xc9, 0xdc, 0xfe, 0x3b, 0x6e, 0x77, 0xf9, 0x22, 0x89, 0x71, 0xf5, 0x97, 0xfc, 0x8b, 0x45, 0xa3, 0x01, 0x86, 0x1f, 0x82, 0x12, 0x9d, 0xf8, 0x58, 0x04, 0x7e, 0x10, 0xd3, 0x72, 0x36, 0xf1, 0xf1, 0xcd, 0xb4, 0xfe, 0xda, 0x2d, 0x66, 0x0c, 0x46, 0xdc, 0x10, 0x9e, 0x83, 0x02, 0xf5, 0xfe, 0x6b, 0x4f, 0xb0, 0xbb, 0x30, 0x80, 0x08, 0x5e, 0x38, 0xf3, 0x50, 0x81, 0x7a, 0xea, 0xf7, 0xb9, 0x59, 0xb3, 0x86, 0x81, 0x3d, 0xb0, 0x8a, 0x39, 0x24, 0x4b, 0x9c, 0xb1, 0x3b, 0x07, 0x66, 0xc5, 0x18, 0x35, 0x11, 0x78, 0x35, 0x52, 0x40, 0xc2, 0xb7, 0xfa, 0x77, 0x01, 0x80, 0x64, 0xc1, 0x2c, 0x61, 0xc2, 0x1a, 0xa0, 0xc4, 0xd6, 0x97, 0x20, 0x74, 0x36, 0x23, 0x2c, 0x07, 0xc4, 0x11, 0xf8, 0x06, 0x58, 0x65, 0xbf, 0xad, 0x0e, 0x7f, 0xc0, 0x2a, 0x49, 0xea, 0x8f, 0xb9, 0x14, 0x09, 0x94, 0xe9, 0x45, 0x8f, 0x97, 0x78, 0xa4, 0x66, 0x7a, 0x51, 0x2d, 0x48, 0xa0, 0xf0, 0x11, 0xa8, 0x44, 0xc5, 0xb6, 0x3a, 0x44, 0x2e, 0x37, 0x8a, 0xcd, 0x8a, 0xb1, 0xc7, 0x76, 0xfc, 0x49, 0x2c, 0xbc, 0x99, 0xd6, 0x61, 0x72, 0x07, 0xb1, 0x18, 0x25, 0x56, 0xb0, 0x05, 0xaa, 0xd1, 0x81, 0x35, 0x6b, 0xf4, 0x3e, 0x55, 0x8c, 0x37, 0xc3, 0x69, 0xbd, 0x7a, 0x92, 0x88, 0x6f, 0xa6, 0xf5, 0x9d, 0x79, 0x37, 0x7c, 0xd3, 0xa7, 0x6d, 0xd5, 0x5f, 0x25, 0x50, 0x4b, 0x6d, 0xf4, 0xff, 0x7f, 0xf0, 0xcf, 0xb2, 0x83, 0xaf, 0x2f, 0xd4, 0x46, 0x49, 0x86, 0xb7, 0xcc, 0xfb, 0x8f, 0x05, 0xb0, 0xd1, 0xc6, 0x94, 0xcd, 0x1e, 0xf1, 0x4d, 0x0b, 0x2f, 0xed, 0x6b, 0xc8, 0xcd, 0xd9, 0x06, 0x22, 0x11, 0x14, 0xe3, 0x70, 0x0f, 0x94, 0x5d, 0x4c, 0x9d, 0x9e, 0xf8, 0x04, 0x9a, 0x95, 0xd0, 0xc6, 0xb4, 0x75, 0x8c, 0x22, 0x0c, 0x1e, 0xa5, 0xfb, 0xa2, 0xc4, 0x29, 0x7d, 0x7d, 0xbe, 0x2f, 0x76, 0xd2, 0x35, 0xe6, 0x74, 0x86, 0x7a, 0x25, 0x81, 0xed, 0xb4, 0xce, 0x12, 0x08, 0x7d, 0x9a, 0x25, 0x74, 0x7f, 0x21, 0x42, 0xd3, 0x39, 0xe6, 0x53, 0x6a, 0xb4, 0xae, 0xae, 0x95, 0x95, 0xe7, 0xd7, 0xca, 0xca, 0x8b, 0x6b, 0x65, 0xe5, 0x87, 0x50, 0x91, 0xae, 0x42, 0x45, 0x7a, 0x1e, 0x2a, 0xd2, 0x8b, 0x50, 0x91, 0xfe, 0x0c, 0x15, 0xe9, 0xa7, 0xbf, 0x94, 0x95, 0x4f, 0xf6, 0x16, 0xf8, 0xfe, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x4d, 0xd5, 0x11, 0x25, 0x0c, 0x00, 0x00, } func (m *ClusterNetwork) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ClusterNetwork) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ClusterNetwork) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.MTU != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.MTU)) i-- dAtA[i] = 0x40 } if m.VXLANPort != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.VXLANPort)) i-- dAtA[i] = 0x38 } if len(m.ClusterNetworks) > 0 { for iNdEx := len(m.ClusterNetworks) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.ClusterNetworks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x32 } } i -= len(m.PluginName) copy(dAtA[i:], m.PluginName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.PluginName))) i-- dAtA[i] = 0x2a i -= len(m.ServiceNetwork) copy(dAtA[i:], m.ServiceNetwork) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceNetwork))) i-- dAtA[i] = 0x22 i = encodeVarintGenerated(dAtA, i, uint64(m.HostSubnetLength)) i-- dAtA[i] = 0x18 i -= len(m.Network) copy(dAtA[i:], m.Network) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Network))) i-- dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *ClusterNetworkEntry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ClusterNetworkEntry) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ClusterNetworkEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l i = encodeVarintGenerated(dAtA, i, uint64(m.HostSubnetLength)) i-- dAtA[i] = 0x10 i -= len(m.CIDR) copy(dAtA[i:], m.CIDR) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *ClusterNetworkList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ClusterNetworkList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ClusterNetworkList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Items) > 0 { for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } } { size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *EgressNetworkPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *EgressNetworkPolicy) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *EgressNetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *EgressNetworkPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *EgressNetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *EgressNetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Items) > 0 { for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } } { size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *EgressNetworkPolicyPeer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *EgressNetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *EgressNetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l i -= len(m.DNSName) copy(dAtA[i:], m.DNSName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSName))) i-- dAtA[i] = 0x12 i -= len(m.CIDRSelector) copy(dAtA[i:], m.CIDRSelector) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRSelector))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *EgressNetworkPolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *EgressNetworkPolicyRule) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *EgressNetworkPolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 i -= len(m.Type) copy(dAtA[i:], m.Type) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *EgressNetworkPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *EgressNetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *EgressNetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Egress) > 0 { for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } func (m *HostSubnet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *HostSubnet) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *HostSubnet) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.EgressCIDRs) > 0 { for iNdEx := len(m.EgressCIDRs) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.EgressCIDRs[iNdEx]) copy(dAtA[i:], m.EgressCIDRs[iNdEx]) i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressCIDRs[iNdEx]))) i-- dAtA[i] = 0x32 } } if len(m.EgressIPs) > 0 { for iNdEx := len(m.EgressIPs) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.EgressIPs[iNdEx]) copy(dAtA[i:], m.EgressIPs[iNdEx]) i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressIPs[iNdEx]))) i-- dAtA[i] = 0x2a } } i -= len(m.Subnet) copy(dAtA[i:], m.Subnet) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subnet))) i-- dAtA[i] = 0x22 i -= len(m.HostIP) copy(dAtA[i:], m.HostIP) i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) i-- dAtA[i] = 0x1a i -= len(m.Host) copy(dAtA[i:], m.Host) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) i-- dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *HostSubnetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *HostSubnetList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *HostSubnetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Items) > 0 { for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } } { size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *NetNamespace) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *NetNamespace) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *NetNamespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.EgressIPs) > 0 { for iNdEx := len(m.EgressIPs) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.EgressIPs[iNdEx]) copy(dAtA[i:], m.EgressIPs[iNdEx]) i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressIPs[iNdEx]))) i-- dAtA[i] = 0x22 } } i = encodeVarintGenerated(dAtA, i, uint64(m.NetID)) i-- dAtA[i] = 0x18 i -= len(m.NetName) copy(dAtA[i:], m.NetName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.NetName))) i-- dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *NetNamespaceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *NetNamespaceList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *NetNamespaceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Items) > 0 { for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } } { size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { offset -= sovGenerated(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func (m *ClusterNetwork) Size() (n int) { if m == nil { return 0 } var l int _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) l = len(m.Network) n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.HostSubnetLength)) l = len(m.ServiceNetwork) n += 1 + l + sovGenerated(uint64(l)) l = len(m.PluginName) n += 1 + l + sovGenerated(uint64(l)) if len(m.ClusterNetworks) > 0 { for _, e := range m.ClusterNetworks { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } if m.VXLANPort != nil { n += 1 + sovGenerated(uint64(*m.VXLANPort)) } if m.MTU != nil { n += 1 + sovGenerated(uint64(*m.MTU)) } return n } func (m *ClusterNetworkEntry) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.CIDR) n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.HostSubnetLength)) return n } func (m *ClusterNetworkList) Size() (n int) { if m == nil { return 0 } var l int _ = l l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { for _, e := range m.Items { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } return n } func (m *EgressNetworkPolicy) Size() (n int) { if m == nil { return 0 } var l int _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) return n } func (m *EgressNetworkPolicyList) Size() (n int) { if m == nil { return 0 } var l int _ = l l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l))
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/api/network/v1/legacy.go
vendor/github.com/openshift/api/network/v1/legacy.go
package v1 import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme ) func addLegacyKnownTypes(scheme *runtime.Scheme) error { types := []runtime.Object{ &ClusterNetwork{}, &ClusterNetworkList{}, &HostSubnet{}, &HostSubnetList{}, &NetNamespace{}, &NetNamespaceList{}, &EgressNetworkPolicy{}, &EgressNetworkPolicyList{}, } scheme.AddKnownTypes(legacyGroupVersion, types...) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go
vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go
// +build !ignore_autogenerated // Code generated by deepcopy-gen. DO NOT EDIT. package v1 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Condition) DeepCopyInto(out *Condition) { *out = *in in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. func (in *Condition) DeepCopy() *Condition { if in == nil { return nil } out := new(Condition) in.DeepCopyInto(out) return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go
vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go
package v1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Condition represents the state of the operator's // reconciliation functionality. // +k8s:deepcopy-gen=true type Condition struct { Type ConditionType `json:"type" description:"type of condition ie. Available|Progressing|Degraded."` Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` // +optional Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` // +optional Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` // +optional LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime" description:"last time we got an update on a given condition"` // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime" description:"last time the condition transit from one status to another"` } // ConditionType is the state of the operator's reconciliation functionality. type ConditionType string const ( // ConditionAvailable indicates that the resources maintained by the operator, // is functional and available in the cluster. ConditionAvailable ConditionType = "Available" // ConditionProgressing indicates that the operator is actively making changes to the resources maintained by the // operator ConditionProgressing ConditionType = "Progressing" // ConditionDegraded indicates that the resources maintained by the operator are not functioning completely. // An example of a degraded state would be if not all pods in a deployment were running. // It may still be available, but it is degraded ConditionDegraded ConditionType = "Degraded" // ConditionUpgradeable indicates whether the resources maintained by the operator are in a state that is safe to upgrade. // When `False`, the resources maintained by the operator should not be upgraded and the // message field should contain a human readable description of what the administrator should do to // allow the operator to successfully update the resources maintained by the operator. ConditionUpgradeable ConditionType = "Upgradeable" )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go
vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go
package v1 import ( "time" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // SetStatusCondition sets the corresponding condition in conditions to newCondition. func SetStatusCondition(conditions *[]Condition, newCondition Condition) { if conditions == nil { conditions = &[]Condition{} } existingCondition := FindStatusCondition(*conditions, newCondition.Type) if existingCondition == nil { newCondition.LastTransitionTime = metav1.NewTime(time.Now()) newCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) *conditions = append(*conditions, newCondition) return } if existingCondition.Status != newCondition.Status { existingCondition.Status = newCondition.Status existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) } existingCondition.Reason = newCondition.Reason existingCondition.Message = newCondition.Message existingCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) } // SetStatusConditionNoHearbeat sets the corresponding condition in conditions to newCondition // without setting lastHeartbeatTime. func SetStatusConditionNoHeartbeat(conditions *[]Condition, newCondition Condition) { if conditions == nil { conditions = &[]Condition{} } existingCondition := FindStatusCondition(*conditions, newCondition.Type) if existingCondition == nil { newCondition.LastTransitionTime = metav1.NewTime(time.Now()) *conditions = append(*conditions, newCondition) return } if existingCondition.Status != newCondition.Status { existingCondition.Status = newCondition.Status existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) } existingCondition.Reason = newCondition.Reason existingCondition.Message = newCondition.Message } // RemoveStatusCondition removes the corresponding conditionType from conditions. func RemoveStatusCondition(conditions *[]Condition, conditionType ConditionType) { if conditions == nil { return } newConditions := []Condition{} for _, condition := range *conditions { if condition.Type != conditionType { newConditions = append(newConditions, condition) } } *conditions = newConditions } // FindStatusCondition finds the conditionType in conditions. func FindStatusCondition(conditions []Condition, conditionType ConditionType) *Condition { for i := range conditions { if conditions[i].Type == conditionType { return &conditions[i] } } return nil } // IsStatusConditionTrue returns true when the conditionType is present and set to `corev1.ConditionTrue` func IsStatusConditionTrue(conditions []Condition, conditionType ConditionType) bool { return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionTrue) } // IsStatusConditionFalse returns true when the conditionType is present and set to `corev1.ConditionFalse` func IsStatusConditionFalse(conditions []Condition, conditionType ConditionType) bool { return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionFalse) } // IsStatusConditionUnknown returns true when the conditionType is present and set to `corev1.ConditionUnknown` func IsStatusConditionUnknown(conditions []Condition, conditionType ConditionType) bool { return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionUnknown) } // IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status. func IsStatusConditionPresentAndEqual(conditions []Condition, conditionType ConditionType, status corev1.ConditionStatus) bool { for _, condition := range conditions { if condition.Type == conditionType { return condition.Status == status } } return false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go
vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go
// +k8s:deepcopy-gen=package,register // +k8s:defaulter-gen=TypeMeta // +k8s:openapi-gen=true // Package v1 provides version v1 of the types and functions necessary to // manage and inspect a slice of conditions. It is opinionated in the // condition types provided but leaves it to the user to define additional // types as necessary. package v1
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object.go
vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object.go
package templateprocessing import ( "encoding/json" "fmt" "reflect" "k8s.io/klog/v2" ) // visitObjectStrings recursively visits all string fields in the object and calls the // visitor function on them. The visitor function can be used to modify the // value of the string fields. func visitObjectStrings(obj interface{}, visitor func(string) (string, bool)) error { return visitValue(reflect.ValueOf(obj), visitor) } func visitValue(v reflect.Value, visitor func(string) (string, bool)) error { // you'll never be able to substitute on a nil. Check the kind first or you'll accidentally // end up panic-ing switch v.Kind() { case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: if v.IsNil() { return nil } } switch v.Kind() { case reflect.Ptr, reflect.Interface: err := visitValue(v.Elem(), visitor) if err != nil { return err } case reflect.Slice, reflect.Array: vt := v.Type().Elem() for i := 0; i < v.Len(); i++ { val, err := visitUnsettableValues(vt, v.Index(i), visitor) if err != nil { return err } v.Index(i).Set(val) } case reflect.Struct: for i := 0; i < v.NumField(); i++ { err := visitValue(v.Field(i), visitor) if err != nil { return err } } case reflect.Map: vt := v.Type().Elem() for _, oldKey := range v.MapKeys() { newKey, err := visitUnsettableValues(oldKey.Type(), oldKey, visitor) if err != nil { return err } oldValue := v.MapIndex(oldKey) newValue, err := visitUnsettableValues(vt, oldValue, visitor) if err != nil { return err } v.SetMapIndex(oldKey, reflect.Value{}) v.SetMapIndex(newKey, newValue) } case reflect.String: if !v.CanSet() { return fmt.Errorf("unable to set String value '%v'", v) } s, asString := visitor(v.String()) if !asString { return fmt.Errorf("attempted to set String field to non-string value '%v'", s) } v.SetString(s) default: klog.V(5).Infof("Ignoring non-parameterizable field type '%s': %v", v.Kind(), v) return nil } return nil } // visitUnsettableValues creates a copy of the object you want to modify and returns the modified result func visitUnsettableValues(typeOf reflect.Type, original reflect.Value, visitor func(string) (string, bool)) (reflect.Value, error) { val := reflect.New(typeOf).Elem() existing := original // if the value type is interface, we must resolve it to a concrete value prior to setting it back. if existing.CanInterface() { existing = reflect.ValueOf(existing.Interface()) } switch existing.Kind() { case reflect.String: s, asString := visitor(existing.String()) if asString { val = reflect.ValueOf(s) } else { b := []byte(s) var data interface{} err := json.Unmarshal(b, &data) if err != nil { // the result of substitution may have been an unquoted string value, // which is an error when decoding in json(only "true", "false", and numeric // values can be unquoted), so try wrapping the value in quotes so it will be // properly converted to a string type during decoding. val = reflect.ValueOf(s) } else { val = reflect.ValueOf(data) } } default: if existing.IsValid() && existing.Kind() != reflect.Invalid { val.Set(existing) } visitValue(val, visitor) } return val, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template.go
vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template.go
package templateprocessing import ( "fmt" "regexp" "strings" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" appsv1 "github.com/openshift/api/apps/v1" templatev1 "github.com/openshift/api/template/v1" "github.com/openshift/library-go/pkg/legacyapi/legacygroupification" . "github.com/openshift/library-go/pkg/template/generator" ) // match ${KEY}, KEY will be grouped var stringParameterExp = regexp.MustCompile(`\$\{([a-zA-Z0-9\_]+?)\}`) // match ${{KEY}} exact match only, KEY will be grouped var nonStringParameterExp = regexp.MustCompile(`^\$\{\{([a-zA-Z0-9\_]+)\}\}$`) // Processor process the Template into the List with substituted parameters type Processor struct { Generators map[string]Generator } // NewProcessor creates new Processor and initializv1es its set of generators. func NewProcessor(generators map[string]Generator) *Processor { return &Processor{Generators: generators} } // Process transforms Template object into List object. It generates // Parameter values using the defined set of generators first, and then it // substitutes all Parameter expression occurrences with their corresponding // values (currently in the containers' Environment variables only). func (p *Processor) Process(template *templatev1.Template) field.ErrorList { templateErrors := field.ErrorList{} if errs := p.GenerateParameterValues(template); len(errs) > 0 { return append(templateErrors, errs...) } // Place parameters into a map for efficient lookup paramMap := make(map[string]templatev1.Parameter) for _, param := range template.Parameters { paramMap[param.Name] = param } // Perform parameter substitution on the template's user message. This can be used to // instruct a user on next steps for the template. template.Message, _ = p.EvaluateParameterSubstitution(paramMap, template.Message) // substitute parameters in ObjectLabels - must be done before the template // objects themselves are iterated. for k, v := range template.ObjectLabels { newk, _ := p.EvaluateParameterSubstitution(paramMap, k) v, _ = p.EvaluateParameterSubstitution(paramMap, v) template.ObjectLabels[newk] = v if newk != k { delete(template.ObjectLabels, k) } } itemPath := field.NewPath("item") for i, item := range template.Objects { idxPath := itemPath.Index(i) var currObj runtime.Object if len(item.Raw) > 0 { // TODO: use runtime.DecodeList when it returns ValidationErrorList decodedObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, item.Raw) if err != nil { templateErrors = append(templateErrors, field.Invalid(idxPath.Child("objects"), item, fmt.Sprintf("unable to handle object: %v", err))) continue } currObj = decodedObj } else { currObj = item.Object.DeepCopyObject() } // If an object definition's metadata includes a hardcoded namespace field, the field will be stripped out of // the definition during template instantiation. Namespace fields that contain a ${PARAMETER_REFERENCE} // will be left in place, resolved during parameter substition, and the object will be created in the // referenced namespace. stripNamespace(currObj) newItem, err := p.SubstituteParameters(paramMap, currObj) if err != nil { templateErrors = append(templateErrors, field.Invalid(idxPath.Child("parameters"), template.Parameters, err.Error())) } // this changes oapi GVKs to groupified GVKs so they can be submitted to modern, aggregated servers // It is done after substitution in case someone substitutes a kind. gvk := currObj.GetObjectKind().GroupVersionKind() legacygroupification.OAPIToGroupifiedGVK(&gvk) newItem.GetObjectKind().SetGroupVersionKind(gvk) if err := addObjectLabels(newItem, template.ObjectLabels); err != nil { templateErrors = append(templateErrors, field.Invalid(idxPath.Child("labels"), template.ObjectLabels, fmt.Sprintf("label could not be applied: %v", err))) } template.Objects[i] = runtime.RawExtension{Object: newItem} } return templateErrors } func stripNamespace(obj runtime.Object) { // Remove namespace from the item unless it contains a ${PARAMETER_REFERENCE} if itemMeta, err := meta.Accessor(obj); err == nil && len(itemMeta.GetNamespace()) > 0 && !stringParameterExp.MatchString(itemMeta.GetNamespace()) { itemMeta.SetNamespace("") return } // TODO: allow meta.Accessor to handle runtime.Unstructured if unstruct, ok := obj.(*unstructured.Unstructured); ok && unstruct.Object != nil { if obj, ok := unstruct.Object["metadata"]; ok { if m, ok := obj.(map[string]interface{}); ok { if ns, ok := m["namespace"]; ok { if ns, ok := ns.(string); !ok || !stringParameterExp.MatchString(ns) { m["namespace"] = "" } } } return } if ns, ok := unstruct.Object["namespace"]; ok { if ns, ok := ns.(string); !ok || !stringParameterExp.MatchString(ns) { unstruct.Object["namespace"] = "" return } } } } // GetParameterByName searches for a Parameter in the Template // based on its name. func GetParameterByName(t *templatev1.Template, name string) *templatev1.Parameter { for i, param := range t.Parameters { if param.Name == name { return &(t.Parameters[i]) } } return nil } // EvaluateParameterSubstitution replaces escaped parameters in a string with values from the // provided map. Returns the substituted value (if any substitution applied) and a boolean // indicating if the resulting value should be treated as a string(true) or a non-string // value(false) for purposes of json encoding. func (p *Processor) EvaluateParameterSubstitution(params map[string]templatev1.Parameter, in string) (string, bool) { out := in // First check if the value matches the "${{KEY}}" substitution syntax, which // means replace and drop the quotes because the parameter value is to be used // as a non-string value. If we hit a match here, we're done because the // "${{KEY}}" syntax is exact match only, it cannot be used in a value like // "FOO_${{KEY}}_BAR", no substitution will be performed if it is used in that way. for _, match := range nonStringParameterExp.FindAllStringSubmatch(in, -1) { if len(match) > 1 { if paramValue, found := params[match[1]]; found { out = strings.Replace(out, match[0], paramValue.Value, 1) return out, false } } } // If we didn't do a non-string substitution above, do normal string substitution // on the value here if it contains a "${KEY}" reference. This substitution does // allow multiple matches and prefix/postfix, eg "FOO_${KEY1}_${KEY2}_BAR" for _, match := range stringParameterExp.FindAllStringSubmatch(in, -1) { if len(match) > 1 { if paramValue, found := params[match[1]]; found { out = strings.Replace(out, match[0], paramValue.Value, 1) } } } return out, true } // SubstituteParameters loops over all values defined in structured // and unstructured types that are children of item. // // Example of Parameter expression: // - ${PARAMETER_NAME} func (p *Processor) SubstituteParameters(params map[string]templatev1.Parameter, item runtime.Object) (runtime.Object, error) { visitObjectStrings(item, func(in string) (string, bool) { return p.EvaluateParameterSubstitution(params, in) }) return item, nil } // GenerateParameterValues generates Value for each Parameter of the given // Template that has Generate field specified where Value is not already // supplied. // // Examples: // // from | value // ----------------------------- // "test[0-9]{1}x" | "test7x" // "[0-1]{8}" | "01001100" // "0x[A-F0-9]{4}" | "0xB3AF" // "[a-zA-Z0-9]{8}" | "hW4yQU5i" // If an error occurs, the parameter that caused the error is returned along with the error message. func (p *Processor) GenerateParameterValues(t *templatev1.Template) field.ErrorList { var errs field.ErrorList for i := range t.Parameters { param := &t.Parameters[i] if len(param.Value) > 0 { continue } templatePath := field.NewPath("template").Child("parameters").Index(i) if param.Generate != "" { generator, ok := p.Generators[param.Generate] if !ok { err := fmt.Errorf("Unknown generator name '%v' for parameter %s", param.Generate, param.Name) errs = append(errs, field.Invalid(templatePath, param.Generate, err.Error())) continue } if generator == nil { err := fmt.Errorf("template.parameters[%v]: Invalid '%v' generator for parameter %s", i, param.Generate, param.Name) errs = append(errs, field.Invalid(templatePath, param, err.Error())) continue } value, err := generator.GenerateValue(param.From) if err != nil { errs = append(errs, field.Invalid(templatePath, param, err.Error())) continue } param.Value, ok = value.(string) if !ok { err := fmt.Errorf("template.parameters[%v]: Unable to convert the generated value '%#v' to string for parameter %s", i, value, param.Name) errs = append(errs, field.Invalid(templatePath, param, err.Error())) continue } } if len(param.Value) == 0 && param.Required { err := fmt.Errorf("template.parameters[%v]: parameter %s is required and must be specified", i, param.Name) errs = append(errs, field.Required(templatePath, err.Error())) } } return errs } // addObjectLabels adds new label(s) to a single runtime.Object, overwriting // existing labels that have the same key. func addObjectLabels(obj runtime.Object, labels labels.Set) error { if labels == nil { return nil } accessor, err := meta.Accessor(obj) if err != nil { return err } metaLabels := accessor.GetLabels() if metaLabels == nil { metaLabels = make(map[string]string) } for k, v := range labels { metaLabels[k] = v } accessor.SetLabels(metaLabels) switch objType := obj.(type) { case *appsv1.DeploymentConfig: if err := addDeploymentConfigNestedLabels(objType, labels); err != nil { return fmt.Errorf("unable to add nested labels to %s/%s: %v", obj.GetObjectKind().GroupVersionKind(), accessor.GetName(), err) } } return nil } // addDeploymentConfigNestedLabels adds new label(s) to a nested labels of a single DeploymentConfig object func addDeploymentConfigNestedLabels(obj *appsv1.DeploymentConfig, labels labels.Set) error { if obj.Spec.Template == nil { return nil } if obj.Spec.Template.Labels == nil { obj.Spec.Template.Labels = make(map[string]string) } for k, v := range labels { obj.Spec.Template.Labels[k] = v } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/library-go/pkg/template/generator/generator.go
vendor/github.com/openshift/library-go/pkg/template/generator/generator.go
package generator // Generator is an interface for generating random values // from an input expression type Generator interface { GenerateValue(expression string) (interface{}, error) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue.go
vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue.go
package generator import ( "fmt" "math/rand" "regexp" "strconv" "strings" ) // ExpressionValueGenerator implements Generator interface. It generates // random string based on the input expression. The input expression is // a string, which may contain "[a-zA-Z0-9]{length}" constructs, // defining range and length of the result random characters. // // Examples: // // from | value // ----------------------------- // "test[0-9]{1}x" | "test7x" // "[0-1]{8}" | "01001100" // "0x[A-F0-9]{4}" | "0xB3AF" // "[a-zA-Z0-9]{8}" | "hW4yQU5i" // // TODO: Support more regexp constructs. type ExpressionValueGenerator struct { seed *rand.Rand } const ( Alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" Numerals = "0123456789" Symbols = "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:`" ASCII = Alphabet + Numerals + Symbols ) var ( rangeExp = regexp.MustCompile(`([\\]?[a-zA-Z0-9]\-?[a-zA-Z0-9]?)`) generatorsExp = regexp.MustCompile(`\[([a-zA-Z0-9\-\\]+)\](\{([0-9]+)\})`) expressionExp = regexp.MustCompile(`\[(\\w|\\d|\\a|\\A)|([a-zA-Z0-9]\-[a-zA-Z0-9])+\]`) ) // NewExpressionValueGenerator creates new ExpressionValueGenerator. func NewExpressionValueGenerator(seed *rand.Rand) ExpressionValueGenerator { return ExpressionValueGenerator{seed: seed} } // GenerateValue generates random string based on the input expression. // The input expression is a pseudo-regex formatted string. See // ExpressionValueGenerator for more details. func (g ExpressionValueGenerator) GenerateValue(expression string) (interface{}, error) { for { r := generatorsExp.FindStringIndex(expression) if r == nil { break } ranges, length, err := rangesAndLength(expression[r[0]:r[1]]) if err != nil { return "", err } err = replaceWithGenerated( &expression, expression[r[0]:r[1]], findExpressionPos(ranges), length, g.seed, ) if err != nil { return "", err } } return expression, nil } // alphabetSlice produces a string slice that contains all characters within // a specified range. func alphabetSlice(from, to byte) (string, error) { leftPos := strings.Index(ASCII, string(from)) rightPos := strings.LastIndex(ASCII, string(to)) if leftPos > rightPos { return "", fmt.Errorf("invalid range specified: %s-%s", string(from), string(to)) } return ASCII[leftPos:rightPos], nil } // replaceWithGenerated replaces all occurrences of the given expression // in the string with random characters of the specified range and length. func replaceWithGenerated(s *string, expression string, ranges [][]byte, length int, seed *rand.Rand) error { var alphabet string for _, r := range ranges { switch string(r[0]) + string(r[1]) { case `\w`: alphabet += Alphabet + Numerals + "_" case `\d`: alphabet += Numerals case `\a`: alphabet += Alphabet + Numerals case `\A`: alphabet += Symbols default: slice, err := alphabetSlice(r[0], r[1]) if err != nil { return err } alphabet += slice } } result := make([]byte, length) alphabet = removeDuplicateChars(alphabet) for i := 0; i < length; i++ { result[i] = alphabet[seed.Intn(len(alphabet))] } *s = strings.Replace(*s, expression, string(result), 1) return nil } // removeDuplicateChars removes the duplicate characters from the data slice func removeDuplicateChars(input string) string { data := []byte(input) length := len(data) - 1 for i := 0; i < length; i++ { for j := i + 1; j <= length; j++ { if data[i] == data[j] { data[j] = data[length] data = data[0:length] length-- j-- } } } return string(data) } // findExpressionPos searches the given string for the valid expressions // and returns their corresponding indexes. func findExpressionPos(s string) [][]byte { matches := rangeExp.FindAllStringIndex(s, -1) result := make([][]byte, len(matches)) for i, r := range matches { result[i] = []byte{s[r[0]], s[r[1]-1]} } return result } // rangesAndLength extracts the expression ranges (eg. [A-Z0-9]) and length // (eg. {3}). This helper function also validates the expression syntax and // its length (must be within 1..255). func rangesAndLength(s string) (string, int, error) { expr := s[0:strings.LastIndex(s, "{")] if !expressionExp.MatchString(expr) { return "", 0, fmt.Errorf("malformed expresion syntax: %s", expr) } length, _ := strconv.Atoi(s[strings.LastIndex(s, "{")+1 : len(s)-1]) // TODO: We do need to set a better limit for the number of generated characters. if length > 0 && length <= 255 { return expr, length, nil } return "", 0, fmt.Errorf("range must be within [1-255] characters (%d)", length) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/library-go/pkg/template/generator/doc.go
vendor/github.com/openshift/library-go/pkg/template/generator/doc.go
// Package generator defines GeneratorInterface interface and implements // some random value generators. package generator
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/openshift/library-go/pkg/legacyapi/legacygroupification/groupification.go
vendor/github.com/openshift/library-go/pkg/legacyapi/legacygroupification/groupification.go
package legacygroupification import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" appsv1 "github.com/openshift/api/apps/v1" authorizationv1 "github.com/openshift/api/authorization/v1" buildv1 "github.com/openshift/api/build/v1" imagev1 "github.com/openshift/api/image/v1" networkv1 "github.com/openshift/api/network/v1" oauthv1 "github.com/openshift/api/oauth/v1" projectv1 "github.com/openshift/api/project/v1" quotav1 "github.com/openshift/api/quota/v1" routev1 "github.com/openshift/api/route/v1" securityv1 "github.com/openshift/api/security/v1" templatev1 "github.com/openshift/api/template/v1" userv1 "github.com/openshift/api/user/v1" ) // deprecated func IsOAPI(gvk schema.GroupVersionKind) bool { if len(gvk.Group) > 0 { return false } _, ok := oapiKindsToGroup[gvk.Kind] return ok } // deprecated func OAPIToGroupifiedGVK(gvk *schema.GroupVersionKind) { if len(gvk.Group) > 0 { return } newGroup, ok := oapiKindsToGroup[gvk.Kind] if !ok { return } gvk.Group = newGroup } // deprecated func OAPIToGroupified(uncast runtime.Object, gvk *schema.GroupVersionKind) { if len(gvk.Group) > 0 { return } switch obj := uncast.(type) { case *unstructured.Unstructured: newGroup := fixOAPIGroupKindInTopLevelUnstructured(obj.Object) if len(newGroup) > 0 { gvk.Group = newGroup uncast.GetObjectKind().SetGroupVersionKind(*gvk) } case *unstructured.UnstructuredList: newGroup := fixOAPIGroupKindInTopLevelUnstructured(obj.Object) if len(newGroup) > 0 { gvk.Group = newGroup uncast.GetObjectKind().SetGroupVersionKind(*gvk) } case *appsv1.DeploymentConfig, *appsv1.DeploymentConfigList, *appsv1.DeploymentConfigRollback, *appsv1.DeploymentLog, *appsv1.DeploymentRequest: gvk.Group = appsv1.GroupName uncast.GetObjectKind().SetGroupVersionKind(*gvk) case *authorizationv1.ClusterRoleBinding, *authorizationv1.ClusterRoleBindingList, *authorizationv1.ClusterRole, *authorizationv1.ClusterRoleList, *authorizationv1.Role, *authorizationv1.RoleList, *authorizationv1.RoleBinding, *authorizationv1.RoleBindingList, *authorizationv1.RoleBindingRestriction, *authorizationv1.RoleBindingRestrictionList, *authorizationv1.SubjectRulesReview, *authorizationv1.SelfSubjectRulesReview, *authorizationv1.ResourceAccessReview, *authorizationv1.LocalResourceAccessReview, *authorizationv1.SubjectAccessReview, *authorizationv1.LocalSubjectAccessReview: gvk.Group = authorizationv1.GroupName uncast.GetObjectKind().SetGroupVersionKind(*gvk) case *buildv1.BuildConfig, *buildv1.BuildConfigList, *buildv1.Build, *buildv1.BuildList, *buildv1.BuildLog, *buildv1.BuildRequest, *buildv1.BinaryBuildRequestOptions: gvk.Group = buildv1.GroupName uncast.GetObjectKind().SetGroupVersionKind(*gvk) case *imagev1.Image, *imagev1.ImageList, *imagev1.ImageSignature, *imagev1.ImageStreamImage, *imagev1.ImageStreamImport, *imagev1.ImageStreamMapping, *imagev1.ImageStream, *imagev1.ImageStreamList, *imagev1.ImageStreamTag: gvk.Group = imagev1.GroupName uncast.GetObjectKind().SetGroupVersionKind(*gvk) case *networkv1.ClusterNetwork, *networkv1.ClusterNetworkList, *networkv1.NetNamespace, *networkv1.NetNamespaceList, *networkv1.HostSubnet, *networkv1.HostSubnetList, *networkv1.EgressNetworkPolicy, *networkv1.EgressNetworkPolicyList: gvk.Group = networkv1.GroupName uncast.GetObjectKind().SetGroupVersionKind(*gvk) case *projectv1.Project, *projectv1.ProjectList, *projectv1.ProjectRequest: gvk.Group = projectv1.GroupName uncast.GetObjectKind().SetGroupVersionKind(*gvk) case *quotav1.ClusterResourceQuota, *quotav1.ClusterResourceQuotaList, *quotav1.AppliedClusterResourceQuota, *quotav1.AppliedClusterResourceQuotaList: gvk.Group = quotav1.GroupName uncast.GetObjectKind().SetGroupVersionKind(*gvk) case *oauthv1.OAuthAuthorizeToken, *oauthv1.OAuthAuthorizeTokenList, *oauthv1.OAuthClientAuthorization, *oauthv1.OAuthClientAuthorizationList, *oauthv1.OAuthClient, *oauthv1.OAuthClientList, *oauthv1.OAuthAccessToken, *oauthv1.OAuthAccessTokenList: gvk.Group = oauthv1.GroupName uncast.GetObjectKind().SetGroupVersionKind(*gvk) case *routev1.Route, *routev1.RouteList: gvk.Group = routev1.GroupName uncast.GetObjectKind().SetGroupVersionKind(*gvk) case *securityv1.SecurityContextConstraints, *securityv1.SecurityContextConstraintsList, *securityv1.PodSecurityPolicySubjectReview, *securityv1.PodSecurityPolicySelfSubjectReview, *securityv1.PodSecurityPolicyReview: gvk.Group = securityv1.GroupName uncast.GetObjectKind().SetGroupVersionKind(*gvk) case *templatev1.Template, *templatev1.TemplateList: gvk.Group = templatev1.GroupName uncast.GetObjectKind().SetGroupVersionKind(*gvk) case *userv1.Group, *userv1.GroupList, *userv1.Identity, *userv1.IdentityList, *userv1.UserIdentityMapping, *userv1.User, *userv1.UserList: gvk.Group = userv1.GroupName uncast.GetObjectKind().SetGroupVersionKind(*gvk) } } var oapiKindsToGroup = map[string]string{ "DeploymentConfigRollback": "apps.openshift.io", "DeploymentConfig": "apps.openshift.io", "DeploymentConfigList": "apps.openshift.io", "DeploymentLog": "apps.openshift.io", "DeploymentRequest": "apps.openshift.io", "ClusterRoleBinding": "authorization.openshift.io", "ClusterRoleBindingList": "authorization.openshift.io", "ClusterRole": "authorization.openshift.io", "ClusterRoleList": "authorization.openshift.io", "RoleBindingRestriction": "authorization.openshift.io", "RoleBindingRestrictionList": "authorization.openshift.io", "RoleBinding": "authorization.openshift.io", "RoleBindingList": "authorization.openshift.io", "Role": "authorization.openshift.io", "RoleList": "authorization.openshift.io", "SubjectRulesReview": "authorization.openshift.io", "SelfSubjectRulesReview": "authorization.openshift.io", "ResourceAccessReview": "authorization.openshift.io", "LocalResourceAccessReview": "authorization.openshift.io", "SubjectAccessReview": "authorization.openshift.io", "LocalSubjectAccessReview": "authorization.openshift.io", "BuildConfig": "build.openshift.io", "BuildConfigList": "build.openshift.io", "Build": "build.openshift.io", "BuildList": "build.openshift.io", "BinaryBuildRequestOptions": "build.openshift.io", "BuildLog": "build.openshift.io", "BuildRequest": "build.openshift.io", "Image": "image.openshift.io", "ImageList": "image.openshift.io", "ImageSignature": "image.openshift.io", "ImageStreamImage": "image.openshift.io", "ImageStreamImport": "image.openshift.io", "ImageStreamMapping": "image.openshift.io", "ImageStream": "image.openshift.io", "ImageStreamList": "image.openshift.io", "ImageStreamTag": "image.openshift.io", "ImageStreamTagList": "image.openshift.io", "ClusterNetwork": "network.openshift.io", "ClusterNetworkList": "network.openshift.io", "EgressNetworkPolicy": "network.openshift.io", "EgressNetworkPolicyList": "network.openshift.io", "HostSubnet": "network.openshift.io", "HostSubnetList": "network.openshift.io", "NetNamespace": "network.openshift.io", "NetNamespaceList": "network.openshift.io", "OAuthAccessToken": "oauth.openshift.io", "OAuthAccessTokenList": "oauth.openshift.io", "OAuthAuthorizeToken": "oauth.openshift.io", "OAuthAuthorizeTokenList": "oauth.openshift.io", "OAuthClientAuthorization": "oauth.openshift.io", "OAuthClientAuthorizationList": "oauth.openshift.io", "OAuthClient": "oauth.openshift.io", "OAuthClientList": "oauth.openshift.io", "Project": "project.openshift.io", "ProjectList": "project.openshift.io", "ProjectRequest": "project.openshift.io", "ClusterResourceQuota": "quota.openshift.io", "ClusterResourceQuotaList": "quota.openshift.io", "AppliedClusterResourceQuota": "quota.openshift.io", "AppliedClusterResourceQuotaList": "quota.openshift.io", "Route": "route.openshift.io", "RouteList": "route.openshift.io", "SecurityContextConstraints": "security.openshift.io", "SecurityContextConstraintsList": "security.openshift.io", "PodSecurityPolicySubjectReview": "security.openshift.io", "PodSecurityPolicySelfSubjectReview": "security.openshift.io", "PodSecurityPolicyReview": "security.openshift.io", "Template": "template.openshift.io", "TemplateList": "template.openshift.io", "Group": "user.openshift.io", "GroupList": "user.openshift.io", "Identity": "user.openshift.io", "IdentityList": "user.openshift.io", "UserIdentityMapping": "user.openshift.io", "User": "user.openshift.io", "UserList": "user.openshift.io", } func fixOAPIGroupKindInTopLevelUnstructured(obj map[string]interface{}) string { kind, ok := obj["kind"] if !ok { return "" } kindStr, ok := kind.(string) if !ok { return "" } newGroup, ok := oapiKindsToGroup[kindStr] if !ok { return "" } apiVersion, ok := obj["apiVersion"] if !ok { return newGroup } apiVersionStr, ok := apiVersion.(string) if !ok { return newGroup } if apiVersionStr != "v1" { return newGroup } obj["apiVersion"] = newGroup + "/v1" return newGroup }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/beorn7/perks/quantile/stream.go
vendor/github.com/beorn7/perks/quantile/stream.go
// Package quantile computes approximate quantiles over an unbounded data // stream within low memory and CPU bounds. // // A small amount of accuracy is traded to achieve the above properties. // // Multiple streams can be merged before calling Query to generate a single set // of results. This is meaningful when the streams represent the same type of // data. See Merge and Samples. // // For more detailed information about the algorithm used, see: // // Effective Computation of Biased Quantiles over Data Streams // // http://www.cs.rutgers.edu/~muthu/bquant.pdf package quantile import ( "math" "sort" ) // Sample holds an observed value and meta information for compression. JSON // tags have been added for convenience. type Sample struct { Value float64 `json:",string"` Width float64 `json:",string"` Delta float64 `json:",string"` } // Samples represents a slice of samples. It implements sort.Interface. type Samples []Sample func (a Samples) Len() int { return len(a) } func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } type invariant func(s *stream, r float64) float64 // NewLowBiased returns an initialized Stream for low-biased quantiles // (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but // error guarantees can still be given even for the lower ranks of the data // distribution. // // The provided epsilon is a relative error, i.e. the true quantile of a value // returned by a query is guaranteed to be within (1±Epsilon)*Quantile. // // See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error // properties. func NewLowBiased(epsilon float64) *Stream { ƒ := func(s *stream, r float64) float64 { return 2 * epsilon * r } return newStream(ƒ) } // NewHighBiased returns an initialized Stream for high-biased quantiles // (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but // error guarantees can still be given even for the higher ranks of the data // distribution. // // The provided epsilon is a relative error, i.e. the true quantile of a value // returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). // // See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error // properties. func NewHighBiased(epsilon float64) *Stream { ƒ := func(s *stream, r float64) float64 { return 2 * epsilon * (s.n - r) } return newStream(ƒ) } // NewTargeted returns an initialized Stream concerned with a particular set of // quantile values that are supplied a priori. Knowing these a priori reduces // space and computation time. The targets map maps the desired quantiles to // their absolute errors, i.e. the true quantile of a value returned by a query // is guaranteed to be within (Quantile±Epsilon). // // See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. func NewTargeted(targetMap map[float64]float64) *Stream { // Convert map to slice to avoid slow iterations on a map. // ƒ is called on the hot path, so converting the map to a slice // beforehand results in significant CPU savings. targets := targetMapToSlice(targetMap) ƒ := func(s *stream, r float64) float64 { var m = math.MaxFloat64 var f float64 for _, t := range targets { if t.quantile*s.n <= r { f = (2 * t.epsilon * r) / t.quantile } else { f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) } if f < m { m = f } } return m } return newStream(ƒ) } type target struct { quantile float64 epsilon float64 } func targetMapToSlice(targetMap map[float64]float64) []target { targets := make([]target, 0, len(targetMap)) for quantile, epsilon := range targetMap { t := target{ quantile: quantile, epsilon: epsilon, } targets = append(targets, t) } return targets } // Stream computes quantiles for a stream of float64s. It is not thread-safe by // design. Take care when using across multiple goroutines. type Stream struct { *stream b Samples sorted bool } func newStream(ƒ invariant) *Stream { x := &stream{ƒ: ƒ} return &Stream{x, make(Samples, 0, 500), true} } // Insert inserts v into the stream. func (s *Stream) Insert(v float64) { s.insert(Sample{Value: v, Width: 1}) } func (s *Stream) insert(sample Sample) { s.b = append(s.b, sample) s.sorted = false if len(s.b) == cap(s.b) { s.flush() } } // Query returns the computed qth percentiles value. If s was created with // NewTargeted, and q is not in the set of quantiles provided a priori, Query // will return an unspecified result. func (s *Stream) Query(q float64) float64 { if !s.flushed() { // Fast path when there hasn't been enough data for a flush; // this also yields better accuracy for small sets of data. l := len(s.b) if l == 0 { return 0 } i := int(math.Ceil(float64(l) * q)) if i > 0 { i -= 1 } s.maybeSort() return s.b[i].Value } s.flush() return s.stream.query(q) } // Merge merges samples into the underlying streams samples. This is handy when // merging multiple streams from separate threads, database shards, etc. // // ATTENTION: This method is broken and does not yield correct results. The // underlying algorithm is not capable of merging streams correctly. func (s *Stream) Merge(samples Samples) { sort.Sort(samples) s.stream.merge(samples) } // Reset reinitializes and clears the list reusing the samples buffer memory. func (s *Stream) Reset() { s.stream.reset() s.b = s.b[:0] } // Samples returns stream samples held by s. func (s *Stream) Samples() Samples { if !s.flushed() { return s.b } s.flush() return s.stream.samples() } // Count returns the total number of samples observed in the stream // since initialization. func (s *Stream) Count() int { return len(s.b) + s.stream.count() } func (s *Stream) flush() { s.maybeSort() s.stream.merge(s.b) s.b = s.b[:0] } func (s *Stream) maybeSort() { if !s.sorted { s.sorted = true sort.Sort(s.b) } } func (s *Stream) flushed() bool { return len(s.stream.l) > 0 } type stream struct { n float64 l []Sample ƒ invariant } func (s *stream) reset() { s.l = s.l[:0] s.n = 0 } func (s *stream) insert(v float64) { s.merge(Samples{{v, 1, 0}}) } func (s *stream) merge(samples Samples) { // TODO(beorn7): This tries to merge not only individual samples, but // whole summaries. The paper doesn't mention merging summaries at // all. Unittests show that the merging is inaccurate. Find out how to // do merges properly. var r float64 i := 0 for _, sample := range samples { for ; i < len(s.l); i++ { c := s.l[i] if c.Value > sample.Value { // Insert at position i. s.l = append(s.l, Sample{}) copy(s.l[i+1:], s.l[i:]) s.l[i] = Sample{ sample.Value, sample.Width, math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), // TODO(beorn7): How to calculate delta correctly? } i++ goto inserted } r += c.Width } s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) i++ inserted: s.n += sample.Width r += sample.Width } s.compress() } func (s *stream) count() int { return int(s.n) } func (s *stream) query(q float64) float64 { t := math.Ceil(q * s.n) t += math.Ceil(s.ƒ(s, t) / 2) p := s.l[0] var r float64 for _, c := range s.l[1:] { r += p.Width if r+c.Width+c.Delta > t { return p.Value } p = c } return p.Value } func (s *stream) compress() { if len(s.l) < 2 { return } x := s.l[len(s.l)-1] xi := len(s.l) - 1 r := s.n - 1 - x.Width for i := len(s.l) - 2; i >= 0; i-- { c := s.l[i] if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { x.Width += c.Width s.l[xi] = x // Remove element at i. copy(s.l[i:], s.l[i+1:]) s.l = s.l[:len(s.l)-1] xi -= 1 } else { x = c xi = i } r -= c.Width } } func (s *stream) samples() Samples { samples := make(Samples, len(s.l)) copy(samples, s.l) return samples }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/munnerz/goautoneg/autoneg.go
vendor/github.com/munnerz/goautoneg/autoneg.go
/* HTTP Content-Type Autonegotiation. The functions in this package implement the behaviour specified in http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html Copyright (c) 2011, Open Knowledge Foundation Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the Open Knowledge Foundation Ltd. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package goautoneg import ( "sort" "strconv" "strings" ) // Structure to represent a clause in an HTTP Accept Header type Accept struct { Type, SubType string Q float64 Params map[string]string } // acceptSlice is defined to implement sort interface. type acceptSlice []Accept func (slice acceptSlice) Len() int { return len(slice) } func (slice acceptSlice) Less(i, j int) bool { ai, aj := slice[i], slice[j] if ai.Q > aj.Q { return true } if ai.Type != "*" && aj.Type == "*" { return true } if ai.SubType != "*" && aj.SubType == "*" { return true } return false } func (slice acceptSlice) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } func stringTrimSpaceCutset(r rune) bool { return r == ' ' } func nextSplitElement(s, sep string) (item string, remaining string) { if index := strings.Index(s, sep); index != -1 { return s[:index], s[index+1:] } return s, "" } // Parse an Accept Header string returning a sorted list // of clauses func ParseAccept(header string) acceptSlice { partsCount := 0 remaining := header for len(remaining) > 0 { partsCount++ _, remaining = nextSplitElement(remaining, ",") } accept := make(acceptSlice, 0, partsCount) remaining = header var part string for len(remaining) > 0 { part, remaining = nextSplitElement(remaining, ",") part = strings.TrimFunc(part, stringTrimSpaceCutset) a := Accept{ Q: 1.0, } sp, remainingPart := nextSplitElement(part, ";") sp0, spRemaining := nextSplitElement(sp, "/") a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset) switch { case len(spRemaining) == 0: if a.Type == "*" { a.SubType = "*" } else { continue } default: var sp1 string sp1, spRemaining = nextSplitElement(spRemaining, "/") if len(spRemaining) > 0 { continue } a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset) } if len(remainingPart) == 0 { accept = append(accept, a) continue } a.Params = make(map[string]string) for len(remainingPart) > 0 { sp, remainingPart = nextSplitElement(remainingPart, ";") sp0, spRemaining = nextSplitElement(sp, "=") if len(spRemaining) == 0 { continue } var sp1 string sp1, spRemaining = nextSplitElement(spRemaining, "=") if len(spRemaining) != 0 { continue } token := strings.TrimFunc(sp0, stringTrimSpaceCutset) if token == "q" { a.Q, _ = strconv.ParseFloat(sp1, 32) } else { a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset) } } accept = append(accept, a) } sort.Sort(accept) return accept } // Negotiate the most appropriate content_type given the accept header // and a list of alternatives. func Negotiate(header string, alternatives []string) (content_type string) { asp := make([][]string, 0, len(alternatives)) for _, ctype := range alternatives { asp = append(asp, strings.SplitN(ctype, "/", 2)) } for _, clause := range ParseAccept(header) { for i, ctsp := range asp { if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { content_type = alternatives[i] return } if clause.Type == ctsp[0] && clause.SubType == "*" { content_type = alternatives[i] return } if clause.Type == "*" && clause.SubType == "*" { content_type = alternatives[i] return } } } return }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/client.go
vendor/github.com/vmware/govmomi/client.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 /* This package is the root package of the govmomi library. The library is structured as follows: # Package vim25 The minimal usable functionality is available through the vim25 package. It contains subpackages that contain generated types, managed objects, and all available methods. The vim25 package is entirely independent of the other packages in the govmomi tree -- it has no dependencies on its peers. The vim25 package itself contains a client structure that is passed around throughout the entire library. It abstracts a session and its immutable state. See the vim25 package for more information. # Package session The session package contains an abstraction for the session manager that allows a user to login and logout. It also provides access to the current session (i.e. to determine if the user is in fact logged in) # Package object The object package contains wrappers for a selection of managed objects. The constructors of these objects all take a *vim25.Client, which they pass along to derived objects, if applicable. # Package govc The govc package contains the govc CLI. The code in this tree is not intended to be used as a library. Any functionality that govc contains that _could_ be used as a library function but isn't, _should_ live in a root level package. # Other packages Other packages, such as "event", "guest", or "license", provide wrappers for the respective subsystems. They are typically not needed in normal workflows so are kept outside the object package. */ package govmomi import ( "context" "net/url" "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/session" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) type Client struct { *vim25.Client SessionManager *session.Manager } // NewClient creates a new client from a URL. The client authenticates with the // server with username/password before returning if the URL contains user information. func NewClient(ctx context.Context, u *url.URL, insecure bool) (*Client, error) { soapClient := soap.NewClient(u, insecure) vimClient, err := vim25.NewClient(ctx, soapClient) if err != nil { return nil, err } c := &Client{ Client: vimClient, SessionManager: session.NewManager(vimClient), } // Only login if the URL contains user information. if u.User != nil { err = c.Login(ctx, u.User) if err != nil { return nil, err } } return c, nil } // Login dispatches to the SessionManager. func (c *Client) Login(ctx context.Context, u *url.Userinfo) error { return c.SessionManager.Login(ctx, u) } // Logout dispatches to the SessionManager. func (c *Client) Logout(ctx context.Context) error { // Close any idle connections after logging out. defer c.Client.CloseIdleConnections() return c.SessionManager.Logout(ctx) } // PropertyCollector returns the session's default property collector. func (c *Client) PropertyCollector() *property.Collector { return property.DefaultCollector(c.Client) } // RetrieveOne dispatches to the Retrieve function on the default property collector. func (c *Client) RetrieveOne(ctx context.Context, obj types.ManagedObjectReference, p []string, dst any) error { return c.PropertyCollector().RetrieveOne(ctx, obj, p, dst) } // Retrieve dispatches to the Retrieve function on the default property collector. func (c *Client) Retrieve(ctx context.Context, objs []types.ManagedObjectReference, p []string, dst any) error { return c.PropertyCollector().Retrieve(ctx, objs, p, dst) } // Wait dispatches to property.Wait. func (c *Client) Wait(ctx context.Context, obj types.ManagedObjectReference, ps []string, f func([]types.PropertyChange) bool) error { return property.Wait(ctx, c.PropertyCollector(), obj, ps, f) } // IsVC returns true if we are connected to a vCenter func (c *Client) IsVC() bool { return c.Client.IsVC() }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/client.go
vendor/github.com/vmware/govmomi/vim25/client.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package vim25 import ( "context" "encoding/json" "strings" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) const ( Namespace = "vim25" Version = "9.0.0.0" Path = "/sdk" ) var ( ServiceInstance = types.ManagedObjectReference{ Type: "ServiceInstance", Value: "ServiceInstance", } ) // Client is a tiny wrapper around the vim25/soap Client that stores session // specific state (i.e. state that only needs to be retrieved once after the // client has been created). This means the client can be reused after // serialization without performing additional requests for initialization. type Client struct { *soap.Client ServiceContent types.ServiceContent // RoundTripper is a separate field such that the client's implementation of // the RoundTripper interface can be wrapped by separate implementations for // extra functionality (for example, reauthentication on session timeout). RoundTripper soap.RoundTripper } // NewClient creates and returns a new client with the ServiceContent field // filled in. func NewClient(ctx context.Context, rt soap.RoundTripper) (*Client, error) { c := Client{ RoundTripper: rt, } // Set client if it happens to be a soap.Client if sc, ok := rt.(*soap.Client); ok { c.Client = sc if c.Namespace == "" { c.Namespace = "urn:" + Namespace } else if !strings.Contains(c.Namespace, ":") { c.Namespace = "urn:" + c.Namespace // ensure valid URI format } if c.Version == "" { c.Version = Version } } var err error c.ServiceContent, err = methods.GetServiceContent(ctx, rt) if err != nil { return nil, err } return &c, nil } // RoundTrip dispatches to the RoundTripper field. func (c *Client) RoundTrip(ctx context.Context, req, res soap.HasFault) error { return c.RoundTripper.RoundTrip(ctx, req, res) } type marshaledClient struct { SoapClient *soap.Client ServiceContent types.ServiceContent } func (c *Client) MarshalJSON() ([]byte, error) { m := marshaledClient{ SoapClient: c.Client, ServiceContent: c.ServiceContent, } return json.Marshal(m) } func (c *Client) UnmarshalJSON(b []byte) error { var m marshaledClient err := json.Unmarshal(b, &m) if err != nil { return err } *c = Client{ Client: m.SoapClient, ServiceContent: m.ServiceContent, RoundTripper: m.SoapClient, } return nil } // Valid returns whether or not the client is valid and ready for use. // This should be called after unmarshalling the client. func (c *Client) Valid() bool { if c == nil { return false } if c.Client == nil { return false } // Use arbitrary pointer field in the service content. // Doesn't matter which one, as long as it is populated by default. if c.ServiceContent.SessionManager == nil { return false } return true } // Path returns vim25.Path (see cache.Client) func (c *Client) Path() string { return Path } // IsVC returns true if we are connected to a vCenter func (c *Client) IsVC() bool { return c.ServiceContent.About.ApiType == "VirtualCenter" }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/retry.go
vendor/github.com/vmware/govmomi/vim25/retry.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package vim25 import ( "context" "time" "github.com/vmware/govmomi/vim25/soap" ) type RetryFunc func(err error) (retry bool, delay time.Duration) // TemporaryNetworkError is deprecated. Use Retry() with RetryTemporaryNetworkError and retryAttempts instead. func TemporaryNetworkError(n int) RetryFunc { return func(err error) (bool, time.Duration) { if IsTemporaryNetworkError(err) { // Don't retry if we're out of tries. if n--; n <= 0 { return false, 0 } return true, 0 } return false, 0 } } // RetryTemporaryNetworkError returns a RetryFunc that returns IsTemporaryNetworkError(err) func RetryTemporaryNetworkError(err error) (bool, time.Duration) { return IsTemporaryNetworkError(err), 0 } // IsTemporaryNetworkError returns false unless the error implements // a Temporary() bool method such as url.Error and net.Error. // Otherwise, returns the value of the Temporary() method. func IsTemporaryNetworkError(err error) bool { t, ok := err.(interface { // Temporary is implemented by url.Error and net.Error Temporary() bool }) if !ok { // Not a Temporary error. return false } return t.Temporary() } type retry struct { roundTripper soap.RoundTripper // fn is a custom function that is called when an error occurs. // It returns whether or not to retry, and if so, how long to // delay before retrying. fn RetryFunc maxRetryAttempts int } // Retry wraps the specified soap.RoundTripper and invokes the // specified RetryFunc. The RetryFunc returns whether or not to // retry the call, and if so, how long to wait before retrying. If // the result of this function is to not retry, the original error // is returned from the RoundTrip function. // The soap.RoundTripper will return the original error if retryAttempts is specified and reached. func Retry(roundTripper soap.RoundTripper, fn RetryFunc, retryAttempts ...int) soap.RoundTripper { r := &retry{ roundTripper: roundTripper, fn: fn, maxRetryAttempts: 1, } if len(retryAttempts) == 1 { r.maxRetryAttempts = retryAttempts[0] } return r } func (r *retry) RoundTrip(ctx context.Context, req, res soap.HasFault) error { var err error for attempt := 0; attempt < r.maxRetryAttempts; attempt++ { err = r.roundTripper.RoundTrip(ctx, req, res) if err == nil { break } // Invoke retry function to see if another attempt should be made. if retry, delay := r.fn(err); retry { time.Sleep(delay) continue } break } return err }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/doc.go
vendor/github.com/vmware/govmomi/vim25/doc.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 /* Package vim25 provides a minimal client implementation to use with other packages in the vim25 tree. The code in this package intentionally does not take any dependendies outside the vim25 tree. The client implementation in this package embeds the soap.Client structure. Additionally, it stores the value of the session's ServiceContent object. This object stores references to a variety of subsystems, such as the root property collector, the session manager, and the search index. The client is fully functional after serialization and deserialization, without the need for additional requests for initialization. */ package vim25
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/mo/extra.go
vendor/github.com/vmware/govmomi/vim25/mo/extra.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package mo type IsManagedEntity interface { GetManagedEntity() ManagedEntity } func (m ComputeResource) GetManagedEntity() ManagedEntity { return m.ManagedEntity } func (m Datacenter) GetManagedEntity() ManagedEntity { return m.ManagedEntity } func (m Datastore) GetManagedEntity() ManagedEntity { return m.ManagedEntity } func (m DistributedVirtualSwitch) GetManagedEntity() ManagedEntity { return m.ManagedEntity } func (m DistributedVirtualPortgroup) GetManagedEntity() ManagedEntity { return m.ManagedEntity } func (m Folder) GetManagedEntity() ManagedEntity { return m.ManagedEntity } func (m HostSystem) GetManagedEntity() ManagedEntity { return m.ManagedEntity } func (m Network) GetManagedEntity() ManagedEntity { return m.ManagedEntity } func (m ResourcePool) GetManagedEntity() ManagedEntity { return m.ManagedEntity } func (m VirtualMachine) GetManagedEntity() ManagedEntity { return m.ManagedEntity }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/mo/registry.go
vendor/github.com/vmware/govmomi/vim25/mo/registry.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package mo import ( "reflect" "github.com/vmware/govmomi/vim25/types" ) var t = map[string]reflect.Type{} // TODO: 9.0 mo below, not included in the generate mo/mo.go, since the generator still uses older rbvmomi vmodl.db type DirectPathProfileManager struct { Self types.ManagedObjectReference `json:"self"` } func (m DirectPathProfileManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["DirectPathProfileManager"] = reflect.TypeOf((*DirectPathProfileManager)(nil)).Elem() }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/mo/ancestors.go
vendor/github.com/vmware/govmomi/vim25/mo/ancestors.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package mo import ( "context" "fmt" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) // Ancestors returns the entire ancestry tree of a specified managed object. // The return value includes the root node and the specified object itself. func Ancestors(ctx context.Context, rt soap.RoundTripper, pc, obj types.ManagedObjectReference) ([]ManagedEntity, error) { ospec := types.ObjectSpec{ Obj: obj, SelectSet: []types.BaseSelectionSpec{ &types.TraversalSpec{ SelectionSpec: types.SelectionSpec{Name: "traverseParent"}, Type: "ManagedEntity", Path: "parent", Skip: types.NewBool(false), SelectSet: []types.BaseSelectionSpec{ &types.SelectionSpec{Name: "traverseParent"}, }, }, &types.TraversalSpec{ SelectionSpec: types.SelectionSpec{}, Type: "VirtualMachine", Path: "parentVApp", Skip: types.NewBool(false), SelectSet: []types.BaseSelectionSpec{ &types.SelectionSpec{Name: "traverseParent"}, }, }, }, Skip: types.NewBool(false), } pspec := []types.PropertySpec{ { Type: "ManagedEntity", PathSet: []string{"name", "parent"}, }, { Type: "VirtualMachine", PathSet: []string{"parentVApp"}, }, } req := types.RetrieveProperties{ This: pc, SpecSet: []types.PropertyFilterSpec{ { ObjectSet: []types.ObjectSpec{ospec}, PropSet: pspec, }, }, } var ifaces []any err := RetrievePropertiesForRequest(ctx, rt, req, &ifaces) if err != nil { return nil, err } var out []ManagedEntity // Build ancestry tree by iteratively finding a new child. for len(out) < len(ifaces) { var find types.ManagedObjectReference if len(out) > 0 { find = out[len(out)-1].Self } // Find entity we're looking for given the last entity in the current tree. for _, iface := range ifaces { me := iface.(IsManagedEntity).GetManagedEntity() if me.Name == "" { // The types below have their own 'Name' field, so ManagedEntity.Name (me.Name) is empty. // We only hit this case when the 'obj' param is one of these types. // In most cases, 'obj' is a Folder so Name isn't collected in this call. switch x := iface.(type) { case Network: me.Name = x.Name case DistributedVirtualSwitch: me.Name = x.Name case DistributedVirtualPortgroup: me.Name = x.Name case OpaqueNetwork: me.Name = x.Name default: // ManagedEntity always has a Name, if we hit this point we missed a case above. panic(fmt.Sprintf("%#v Name is empty", me.Reference())) } } if me.Parent == nil { // Special case for VirtualMachine within VirtualApp, // unlikely to hit this other than via Finder.Element() switch x := iface.(type) { case VirtualMachine: me.Parent = x.ParentVApp } } if me.Parent == nil { out = append(out, me) break } if *me.Parent == find { out = append(out, me) break } } } return out, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go
vendor/github.com/vmware/govmomi/vim25/mo/type_info.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package mo import ( "fmt" "reflect" "regexp" "strconv" "strings" "sync" "github.com/vmware/govmomi/vim25/types" ) type typeInfo struct { typ reflect.Type // Field indices of "Self" field. self []int // Map property names to field indices. props map[string][]int } var typeInfoLock sync.RWMutex var typeInfoMap = make(map[string]*typeInfo) func typeInfoForType(tname string) *typeInfo { typeInfoLock.RLock() ti, ok := typeInfoMap[tname] typeInfoLock.RUnlock() if ok { return ti } // Create new typeInfo for type. if typ, ok := t[tname]; !ok { panic("unknown type: " + tname) } else { // Multiple routines may race to set it, but the result is the same. typeInfoLock.Lock() ti = newTypeInfo(typ) typeInfoMap[tname] = ti typeInfoLock.Unlock() } return ti } func baseType(ftyp reflect.Type) reflect.Type { base := strings.TrimPrefix(ftyp.Name(), "Base") switch base { case "MethodFault": return nil } if kind, ok := types.TypeFunc()(base); ok { return kind } return nil } func newTypeInfo(typ reflect.Type) *typeInfo { t := typeInfo{ typ: typ, props: make(map[string][]int), } t.build(typ, "", []int{}) return &t } var managedObjectRefType = reflect.TypeOf((*types.ManagedObjectReference)(nil)).Elem() func buildName(fn string, f reflect.StructField) string { if fn != "" { fn += "." } motag := f.Tag.Get("json") if motag != "" { tokens := strings.Split(motag, ",") if tokens[0] != "" { return fn + tokens[0] } } xmltag := f.Tag.Get("xml") if xmltag != "" { tokens := strings.Split(xmltag, ",") if tokens[0] != "" { return fn + tokens[0] } } return "" } func (t *typeInfo) build(typ reflect.Type, fn string, fi []int) { if typ.Kind() == reflect.Ptr { typ = typ.Elem() } if typ.Kind() != reflect.Struct { panic("need struct") } for i := 0; i < typ.NumField(); i++ { f := typ.Field(i) ftyp := f.Type // Copy field indices so they can be passed along. fic := make([]int, len(fi)+1) copy(fic, fi) fic[len(fi)] = i // Recurse into embedded field. if f.Anonymous { t.build(ftyp, fn, fic) continue } // Top level type has a "Self" field. if f.Name == "Self" && ftyp == managedObjectRefType { t.self = fic continue } fnc := buildName(fn, f) if fnc == "" { continue } t.props[fnc] = fic // Dereference pointer. if ftyp.Kind() == reflect.Ptr { ftyp = ftyp.Elem() } // Slices are not addressable by `foo.bar.qux`. if ftyp.Kind() == reflect.Slice { continue } // Skip the managed reference type. if ftyp == managedObjectRefType { continue } // Recurse into structs. if ftyp.Kind() == reflect.Struct { t.build(ftyp, fnc, fic) } // Base type can only access base fields, for example Datastore.Info // is types.BaseDataStore, so we create a new(types.DatastoreInfo) // Indexed property path may traverse into array element fields. // When interface, use the base type to index fields. // For example, BaseVirtualDevice: // config.hardware.device[4000].deviceInfo.label if ftyp.Kind() == reflect.Interface { if base := baseType(ftyp); base != nil { t.build(base, fnc, fic) } } } } var nilValue reflect.Value // assignValue assigns a value 'pv' to the struct pointed to by 'val', given a // slice of field indices. It recurses into the struct until it finds the field // specified by the indices. It creates new values for pointer types where // needed. func assignValue(val reflect.Value, fi []int, pv reflect.Value, field ...string) { // Indexed property path can only use base types if val.Kind() == reflect.Interface { if val.IsNil() { base := baseType(val.Type()) val.Set(reflect.New(base)) } val = val.Elem() } // Create new value if necessary. if val.Kind() == reflect.Ptr { if val.IsNil() { val.Set(reflect.New(val.Type().Elem())) } val = val.Elem() } rv := val.Field(fi[0]) fi = fi[1:] if len(fi) == 0 { if pv == nilValue { pv = reflect.Zero(rv.Type()) rv.Set(pv) return } rt := rv.Type() pt := pv.Type() // If type is a pointer, create new instance of type. if rt.Kind() == reflect.Ptr { rv.Set(reflect.New(rt.Elem())) rv = rv.Elem() rt = rv.Type() } // If the target type is a slice, but the source is not, deference any ArrayOfXYZ type if rt.Kind() == reflect.Slice && pt.Kind() != reflect.Slice { if pt.Kind() == reflect.Ptr { pv = pv.Elem() pt = pt.Elem() } m := arrayOfRegexp.FindStringSubmatch(pt.Name()) if len(m) > 0 { pv = pv.FieldByName(m[1]) // ArrayOfXYZ type has single field named XYZ pt = pv.Type() if !pv.IsValid() { panic(fmt.Sprintf("expected %s type to have field %s", m[0], m[1])) } } } // If type is an interface, check if pv implements it. if rt.Kind() == reflect.Interface && !pt.Implements(rt) { // Check if pointer to pv implements it. if reflect.PtrTo(pt).Implements(rt) { npv := reflect.New(pt) npv.Elem().Set(pv) pv = npv pt = pv.Type() } else { panic(fmt.Sprintf("type %s doesn't implement %s", pt.Name(), rt.Name())) } } else if rt.Kind() == reflect.Struct && pt.Kind() == reflect.Ptr { pv = pv.Elem() pt = pv.Type() } if pt.AssignableTo(rt) { rv.Set(pv) } else if rt.ConvertibleTo(pt) { rv.Set(pv.Convert(rt)) } else if rt.Kind() == reflect.Slice { // Indexed array value path := field[0] isInterface := rt.Elem().Kind() == reflect.Interface if len(path) == 0 { // Append item (pv) directly to the array, converting to pointer if interface if isInterface { npv := reflect.New(pt) npv.Elem().Set(pv) pv = npv pt = pv.Type() } } else { // Construct item to be appended to the array, setting field within to value of pv var item reflect.Value if isInterface { base := baseType(rt.Elem()) item = reflect.New(base) } else { item = reflect.New(rt.Elem()) } field := newTypeInfo(item.Type()) if ix, ok := field.props[path]; ok { assignValue(item, ix, pv) } if rt.Elem().Kind() == reflect.Struct { pv = item.Elem() } else { pv = item } pt = pv.Type() } rv.Set(reflect.Append(rv, pv)) } else { panic(fmt.Sprintf("cannot assign %q (%s) to %q (%s)", rt.Name(), rt.Kind(), pt.Name(), pt.Kind())) } return } assignValue(rv, fi, pv, field...) } var arrayOfRegexp = regexp.MustCompile("ArrayOf(.*)$") // LoadObjectFromContent loads properties from the 'PropSet' field in the // specified ObjectContent value into the value it represents, which is // returned as a reflect.Value. func (t *typeInfo) LoadFromObjectContent(o types.ObjectContent) (reflect.Value, error) { v := reflect.New(t.typ) assignValue(v, t.self, reflect.ValueOf(o.Obj)) for _, p := range o.PropSet { var field Field field.FromString(p.Name) rv, ok := t.props[field.Path] if !ok { continue } assignValue(v, rv, reflect.ValueOf(p.Val), field.Item) } return v, nil } func IsManagedObjectType(kind string) bool { _, ok := t[kind] return ok } // Value returns a new mo instance of the given ref Type. func Value(ref types.ManagedObjectReference) (Reference, bool) { if rt, ok := t[ref.Type]; ok { val := reflect.New(rt) if e, ok := val.Interface().(Entity); ok { e.Entity().Self = ref return val.Elem().Interface().(Reference), true } } return nil, false } // Field of a ManagedObject in string form. type Field struct { Path string Key any Item string } func (f *Field) String() string { if f.Key == nil { return f.Path } var key, item string switch f.Key.(type) { case string: key = fmt.Sprintf("%q", f.Key) default: key = fmt.Sprintf("%d", f.Key) } if f.Item != "" { item = "." + f.Item } return fmt.Sprintf("%s[%s]%s", f.Path, key, item) } func (f *Field) FromString(spec string) bool { s := strings.SplitN(spec, "[", 2) f.Path = s[0] f.Key = nil f.Item = "" if len(s) == 1 { return true } parts := strings.SplitN(s[1], "]", 2) if len(parts) != 2 { return false } ix := strings.Trim(parts[0], `"`) if ix == parts[0] { v, err := strconv.ParseInt(ix, 0, 32) if err != nil { return false } f.Key = int32(v) } else { f.Key = ix } if parts[1] == "" { return true } if parts[1][0] != '.' { return false } f.Item = parts[1][1:] return true }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go
vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package mo import ( "context" "reflect" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) func ignoreMissingProperty(ref types.ManagedObjectReference, p types.MissingProperty) bool { switch ref.Type { case "VirtualMachine": switch p.Path { case "environmentBrowser": // See https://github.com/vmware/govmomi/pull/242 return true case "alarmActionsEnabled": // Seen with vApp child VM return true } case "ResourcePool": switch p.Path { case "resourceConfigSpecDetailed": return true } } return false } // ObjectContentToType loads an ObjectContent value into the value it // represents. If the ObjectContent value has a non-empty 'MissingSet' field, // it returns the first fault it finds there as error. If the 'MissingSet' // field is empty, it returns a pointer to a reflect.Value. It handles contain // nested properties, such as 'guest.ipAddress' or 'config.hardware'. func ObjectContentToType(o types.ObjectContent, ptr ...bool) (any, error) { // Expect no properties in the missing set for _, p := range o.MissingSet { if ignoreMissingProperty(o.Obj, p) { continue } return nil, soap.WrapVimFault(p.Fault.Fault) } ti := typeInfoForType(o.Obj.Type) v, err := ti.LoadFromObjectContent(o) if err != nil { return nil, err } if len(ptr) == 1 && ptr[0] { return v.Interface(), nil } return v.Elem().Interface(), nil } // ApplyPropertyChange converts the response of a call to WaitForUpdates // and applies it to the given managed object. func ApplyPropertyChange(obj Reference, changes []types.PropertyChange) { t := typeInfoForType(obj.Reference().Type) v := reflect.ValueOf(obj) for _, p := range changes { var field Field if !field.FromString(p.Name) { panic(p.Name + ": invalid property path") } rv, ok := t.props[field.Path] if !ok { panic(field.Path + ": property not found") } if field.Key == nil { // Key is only used for notifications assignValue(v, rv, reflect.ValueOf(p.Val)) } } } // LoadObjectContent converts the response of a call to // RetrieveProperties{Ex} to one or more managed objects. func LoadObjectContent(content []types.ObjectContent, dst any) error { rt := reflect.TypeOf(dst) if rt == nil || rt.Kind() != reflect.Ptr { panic("need pointer") } rv := reflect.ValueOf(dst).Elem() if !rv.CanSet() { panic("cannot set dst") } isSlice := false switch rt.Elem().Kind() { case reflect.Struct: case reflect.Slice: isSlice = true default: panic("unexpected type") } if isSlice { for _, p := range content { v, err := ObjectContentToType(p) if err != nil { return err } vt := reflect.TypeOf(v) if !rv.Type().AssignableTo(vt) { // For example: dst is []ManagedEntity, res is []HostSystem if field, ok := vt.FieldByName(rt.Elem().Elem().Name()); ok && field.Anonymous { rv.Set(reflect.Append(rv, reflect.ValueOf(v).FieldByIndex(field.Index))) continue } } rv.Set(reflect.Append(rv, reflect.ValueOf(v))) } } else { switch len(content) { case 0: case 1: v, err := ObjectContentToType(content[0]) if err != nil { return err } vt := reflect.TypeOf(v) if !rv.Type().AssignableTo(vt) { // For example: dst is ComputeResource, res is ClusterComputeResource if field, ok := vt.FieldByName(rt.Elem().Name()); ok && field.Anonymous { rv.Set(reflect.ValueOf(v).FieldByIndex(field.Index)) return nil } } rv.Set(reflect.ValueOf(v)) default: // If dst is not a slice, expect to receive 0 or 1 results panic("more than 1 result") } } return nil } // RetrievePropertiesEx wraps RetrievePropertiesEx and ContinueRetrievePropertiesEx to collect properties in batches. func RetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req types.RetrievePropertiesEx) ([]types.ObjectContent, error) { rx, err := methods.RetrievePropertiesEx(ctx, r, &req) if err != nil { return nil, err } if rx.Returnval == nil { return nil, nil } objects := rx.Returnval.Objects token := rx.Returnval.Token for token != "" { cx, err := methods.ContinueRetrievePropertiesEx(ctx, r, &types.ContinueRetrievePropertiesEx{ This: req.This, Token: token, }) if err != nil { return nil, err } token = cx.Returnval.Token objects = append(objects, cx.Returnval.Objects...) } return objects, nil } // RetrievePropertiesForRequest calls the RetrieveProperties method with the // specified request and decodes the response struct into the value pointed to // by dst. func RetrievePropertiesForRequest(ctx context.Context, r soap.RoundTripper, req types.RetrieveProperties, dst any) error { objects, err := RetrievePropertiesEx(ctx, r, types.RetrievePropertiesEx{ This: req.This, SpecSet: req.SpecSet, }) if err != nil { return err } return LoadObjectContent(objects, dst) } // RetrieveProperties retrieves the properties of the managed object specified // as obj and decodes the response struct into the value pointed to by dst. func RetrieveProperties(ctx context.Context, r soap.RoundTripper, pc, obj types.ManagedObjectReference, dst any) error { req := types.RetrieveProperties{ This: pc, SpecSet: []types.PropertyFilterSpec{ { ObjectSet: []types.ObjectSpec{ { Obj: obj, Skip: types.NewBool(false), }, }, PropSet: []types.PropertySpec{ { All: types.NewBool(true), Type: obj.Type, }, }, }, }, } return RetrievePropertiesForRequest(ctx, r, req, dst) } var morType = reflect.TypeOf((*types.ManagedObjectReference)(nil)).Elem() // References returns all non-nil moref field values in the given struct. // Only Anonymous struct fields are followed by default. The optional follow // param will follow any struct fields when true. func References(s any, follow ...bool) []types.ManagedObjectReference { var refs []types.ManagedObjectReference rval := reflect.ValueOf(s) rtype := rval.Type() if rval.Kind() == reflect.Ptr { rval = rval.Elem() rtype = rval.Type() } for i := 0; i < rval.NumField(); i++ { val := rval.Field(i) finfo := rtype.Field(i) if finfo.Anonymous { refs = append(refs, References(val.Interface(), follow...)...) continue } if finfo.Name == "Self" { continue } ftype := val.Type() if ftype.Kind() == reflect.Slice { if ftype.Elem() == morType { s := val.Interface().([]types.ManagedObjectReference) for i := range s { refs = append(refs, s[i]) } } continue } if ftype.Kind() == reflect.Ptr { if val.IsNil() { continue } val = val.Elem() ftype = val.Type() } if ftype == morType { refs = append(refs, val.Interface().(types.ManagedObjectReference)) continue } if len(follow) != 0 && follow[0] { if ftype.Kind() == reflect.Struct && val.CanSet() { refs = append(refs, References(val.Interface(), follow...)...) } } } return refs }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/mo/entity.go
vendor/github.com/vmware/govmomi/vim25/mo/entity.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package mo // Entity is the interface that is implemented by all managed objects // that extend ManagedEntity. type Entity interface { Reference Entity() *ManagedEntity }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/mo/mo.go
vendor/github.com/vmware/govmomi/vim25/mo/mo.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package mo import ( "reflect" "time" "github.com/vmware/govmomi/vim25/types" ) type Alarm struct { ExtensibleManagedObject Info types.AlarmInfo `json:"info"` } func init() { t["Alarm"] = reflect.TypeOf((*Alarm)(nil)).Elem() } type AlarmManager struct { Self types.ManagedObjectReference `json:"self"` DefaultExpression []types.BaseAlarmExpression `json:"defaultExpression"` Description types.AlarmDescription `json:"description"` } func (m AlarmManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["AlarmManager"] = reflect.TypeOf((*AlarmManager)(nil)).Elem() } type AuthorizationManager struct { Self types.ManagedObjectReference `json:"self"` PrivilegeList []types.AuthorizationPrivilege `json:"privilegeList"` RoleList []types.AuthorizationRole `json:"roleList"` Description types.AuthorizationDescription `json:"description"` } func (m AuthorizationManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["AuthorizationManager"] = reflect.TypeOf((*AuthorizationManager)(nil)).Elem() } type CertificateManager struct { Self types.ManagedObjectReference `json:"self"` } func (m CertificateManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["CertificateManager"] = reflect.TypeOf((*CertificateManager)(nil)).Elem() } type ClusterComputeResource struct { ComputeResource Configuration types.ClusterConfigInfo `json:"configuration"` Recommendation []types.ClusterRecommendation `json:"recommendation"` DrsRecommendation []types.ClusterDrsRecommendation `json:"drsRecommendation"` HciConfig *types.ClusterComputeResourceHCIConfigInfo `json:"hciConfig"` MigrationHistory []types.ClusterDrsMigration `json:"migrationHistory"` ActionHistory []types.ClusterActionHistory `json:"actionHistory"` DrsFault []types.ClusterDrsFaults `json:"drsFault"` } func init() { t["ClusterComputeResource"] = reflect.TypeOf((*ClusterComputeResource)(nil)).Elem() } type ClusterEVCManager struct { ExtensibleManagedObject ManagedCluster types.ManagedObjectReference `json:"managedCluster"` EvcState types.ClusterEVCManagerEVCState `json:"evcState"` } func init() { t["ClusterEVCManager"] = reflect.TypeOf((*ClusterEVCManager)(nil)).Elem() } type ClusterProfile struct { Profile } func init() { t["ClusterProfile"] = reflect.TypeOf((*ClusterProfile)(nil)).Elem() } type ClusterProfileManager struct { ProfileManager } func init() { t["ClusterProfileManager"] = reflect.TypeOf((*ClusterProfileManager)(nil)).Elem() } type ComputeResource struct { ManagedEntity ResourcePool *types.ManagedObjectReference `json:"resourcePool"` Host []types.ManagedObjectReference `json:"host"` Datastore []types.ManagedObjectReference `json:"datastore"` Network []types.ManagedObjectReference `json:"network"` Summary types.BaseComputeResourceSummary `json:"summary"` EnvironmentBrowser *types.ManagedObjectReference `json:"environmentBrowser"` ConfigurationEx types.BaseComputeResourceConfigInfo `json:"configurationEx"` LifecycleManaged *bool `json:"lifecycleManaged"` } func (m *ComputeResource) Entity() *ManagedEntity { return &m.ManagedEntity } func init() { t["ComputeResource"] = reflect.TypeOf((*ComputeResource)(nil)).Elem() } type ContainerView struct { ManagedObjectView Container types.ManagedObjectReference `json:"container"` Type []string `json:"type"` Recursive bool `json:"recursive"` } func init() { t["ContainerView"] = reflect.TypeOf((*ContainerView)(nil)).Elem() } type CryptoManager struct { Self types.ManagedObjectReference `json:"self"` Enabled bool `json:"enabled"` } func (m CryptoManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["CryptoManager"] = reflect.TypeOf((*CryptoManager)(nil)).Elem() } type CryptoManagerHost struct { CryptoManager } func init() { t["CryptoManagerHost"] = reflect.TypeOf((*CryptoManagerHost)(nil)).Elem() } type CryptoManagerHostKMS struct { CryptoManagerHost } func init() { t["CryptoManagerHostKMS"] = reflect.TypeOf((*CryptoManagerHostKMS)(nil)).Elem() } type CryptoManagerKmip struct { CryptoManager KmipServers []types.KmipClusterInfo `json:"kmipServers"` } func init() { t["CryptoManagerKmip"] = reflect.TypeOf((*CryptoManagerKmip)(nil)).Elem() } type CustomFieldsManager struct { Self types.ManagedObjectReference `json:"self"` Field []types.CustomFieldDef `json:"field"` } func (m CustomFieldsManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["CustomFieldsManager"] = reflect.TypeOf((*CustomFieldsManager)(nil)).Elem() } type CustomizationSpecManager struct { Self types.ManagedObjectReference `json:"self"` Info []types.CustomizationSpecInfo `json:"info"` EncryptionKey types.ByteSlice `json:"encryptionKey"` } func (m CustomizationSpecManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["CustomizationSpecManager"] = reflect.TypeOf((*CustomizationSpecManager)(nil)).Elem() } type Datacenter struct { ManagedEntity VmFolder types.ManagedObjectReference `json:"vmFolder"` HostFolder types.ManagedObjectReference `json:"hostFolder"` DatastoreFolder types.ManagedObjectReference `json:"datastoreFolder"` NetworkFolder types.ManagedObjectReference `json:"networkFolder"` Datastore []types.ManagedObjectReference `json:"datastore"` Network []types.ManagedObjectReference `json:"network"` Configuration types.DatacenterConfigInfo `json:"configuration"` } func (m *Datacenter) Entity() *ManagedEntity { return &m.ManagedEntity } func init() { t["Datacenter"] = reflect.TypeOf((*Datacenter)(nil)).Elem() } type Datastore struct { ManagedEntity Info types.BaseDatastoreInfo `json:"info"` Summary types.DatastoreSummary `json:"summary"` Host []types.DatastoreHostMount `json:"host"` Vm []types.ManagedObjectReference `json:"vm"` Browser types.ManagedObjectReference `json:"browser"` Capability types.DatastoreCapability `json:"capability"` IormConfiguration *types.StorageIORMInfo `json:"iormConfiguration"` } func (m *Datastore) Entity() *ManagedEntity { return &m.ManagedEntity } func init() { t["Datastore"] = reflect.TypeOf((*Datastore)(nil)).Elem() } type DatastoreNamespaceManager struct { Self types.ManagedObjectReference `json:"self"` } func (m DatastoreNamespaceManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["DatastoreNamespaceManager"] = reflect.TypeOf((*DatastoreNamespaceManager)(nil)).Elem() } type DiagnosticManager struct { Self types.ManagedObjectReference `json:"self"` } func (m DiagnosticManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["DiagnosticManager"] = reflect.TypeOf((*DiagnosticManager)(nil)).Elem() } type DistributedVirtualPortgroup struct { Network Key string `json:"key"` Config types.DVPortgroupConfigInfo `json:"config"` PortKeys []string `json:"portKeys"` } func init() { t["DistributedVirtualPortgroup"] = reflect.TypeOf((*DistributedVirtualPortgroup)(nil)).Elem() } type DistributedVirtualSwitch struct { ManagedEntity Uuid string `json:"uuid"` Capability types.DVSCapability `json:"capability"` Summary types.DVSSummary `json:"summary"` Config types.BaseDVSConfigInfo `json:"config"` NetworkResourcePool []types.DVSNetworkResourcePool `json:"networkResourcePool"` Portgroup []types.ManagedObjectReference `json:"portgroup"` Runtime *types.DVSRuntimeInfo `json:"runtime"` } func (m *DistributedVirtualSwitch) Entity() *ManagedEntity { return &m.ManagedEntity } func init() { t["DistributedVirtualSwitch"] = reflect.TypeOf((*DistributedVirtualSwitch)(nil)).Elem() } type DistributedVirtualSwitchManager struct { Self types.ManagedObjectReference `json:"self"` } func (m DistributedVirtualSwitchManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["DistributedVirtualSwitchManager"] = reflect.TypeOf((*DistributedVirtualSwitchManager)(nil)).Elem() } type EnvironmentBrowser struct { Self types.ManagedObjectReference `json:"self"` DatastoreBrowser *types.ManagedObjectReference `json:"datastoreBrowser"` } func (m EnvironmentBrowser) Reference() types.ManagedObjectReference { return m.Self } func init() { t["EnvironmentBrowser"] = reflect.TypeOf((*EnvironmentBrowser)(nil)).Elem() } type EventHistoryCollector struct { HistoryCollector LatestPage []types.BaseEvent `json:"latestPage"` } func init() { t["EventHistoryCollector"] = reflect.TypeOf((*EventHistoryCollector)(nil)).Elem() } type EventManager struct { Self types.ManagedObjectReference `json:"self"` Description types.EventDescription `json:"description"` LatestEvent types.BaseEvent `json:"latestEvent"` MaxCollector int32 `json:"maxCollector"` } func (m EventManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["EventManager"] = reflect.TypeOf((*EventManager)(nil)).Elem() } type ExtensibleManagedObject struct { Self types.ManagedObjectReference `json:"self"` Value []types.BaseCustomFieldValue `json:"value"` AvailableField []types.CustomFieldDef `json:"availableField"` } func (m ExtensibleManagedObject) Reference() types.ManagedObjectReference { return m.Self } func init() { t["ExtensibleManagedObject"] = reflect.TypeOf((*ExtensibleManagedObject)(nil)).Elem() } type ExtensionManager struct { Self types.ManagedObjectReference `json:"self"` ExtensionList []types.Extension `json:"extensionList"` } func (m ExtensionManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["ExtensionManager"] = reflect.TypeOf((*ExtensionManager)(nil)).Elem() } type FailoverClusterConfigurator struct { Self types.ManagedObjectReference `json:"self"` DisabledConfigureMethod []string `json:"disabledConfigureMethod"` } func (m FailoverClusterConfigurator) Reference() types.ManagedObjectReference { return m.Self } func init() { t["FailoverClusterConfigurator"] = reflect.TypeOf((*FailoverClusterConfigurator)(nil)).Elem() } type FailoverClusterManager struct { Self types.ManagedObjectReference `json:"self"` DisabledClusterMethod []string `json:"disabledClusterMethod"` } func (m FailoverClusterManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["FailoverClusterManager"] = reflect.TypeOf((*FailoverClusterManager)(nil)).Elem() } type FileManager struct { Self types.ManagedObjectReference `json:"self"` } func (m FileManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["FileManager"] = reflect.TypeOf((*FileManager)(nil)).Elem() } type Folder struct { ManagedEntity ChildType []string `json:"childType"` ChildEntity []types.ManagedObjectReference `json:"childEntity"` Namespace *string `json:"namespace"` } func (m *Folder) Entity() *ManagedEntity { return &m.ManagedEntity } func init() { t["Folder"] = reflect.TypeOf((*Folder)(nil)).Elem() } type GuestAliasManager struct { Self types.ManagedObjectReference `json:"self"` } func (m GuestAliasManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["GuestAliasManager"] = reflect.TypeOf((*GuestAliasManager)(nil)).Elem() } type GuestAuthManager struct { Self types.ManagedObjectReference `json:"self"` } func (m GuestAuthManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["GuestAuthManager"] = reflect.TypeOf((*GuestAuthManager)(nil)).Elem() } type GuestFileManager struct { Self types.ManagedObjectReference `json:"self"` } func (m GuestFileManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["GuestFileManager"] = reflect.TypeOf((*GuestFileManager)(nil)).Elem() } type GuestOperationsManager struct { Self types.ManagedObjectReference `json:"self"` AuthManager *types.ManagedObjectReference `json:"authManager"` FileManager *types.ManagedObjectReference `json:"fileManager"` ProcessManager *types.ManagedObjectReference `json:"processManager"` GuestWindowsRegistryManager *types.ManagedObjectReference `json:"guestWindowsRegistryManager"` AliasManager *types.ManagedObjectReference `json:"aliasManager"` } func (m GuestOperationsManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["GuestOperationsManager"] = reflect.TypeOf((*GuestOperationsManager)(nil)).Elem() } type GuestProcessManager struct { Self types.ManagedObjectReference `json:"self"` } func (m GuestProcessManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["GuestProcessManager"] = reflect.TypeOf((*GuestProcessManager)(nil)).Elem() } type GuestWindowsRegistryManager struct { Self types.ManagedObjectReference `json:"self"` } func (m GuestWindowsRegistryManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["GuestWindowsRegistryManager"] = reflect.TypeOf((*GuestWindowsRegistryManager)(nil)).Elem() } type HealthUpdateManager struct { Self types.ManagedObjectReference `json:"self"` } func (m HealthUpdateManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HealthUpdateManager"] = reflect.TypeOf((*HealthUpdateManager)(nil)).Elem() } type HistoryCollector struct { Self types.ManagedObjectReference `json:"self"` Filter types.AnyType `json:"filter"` } func (m HistoryCollector) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HistoryCollector"] = reflect.TypeOf((*HistoryCollector)(nil)).Elem() } type HostAccessManager struct { Self types.ManagedObjectReference `json:"self"` LockdownMode types.HostLockdownMode `json:"lockdownMode"` } func (m HostAccessManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostAccessManager"] = reflect.TypeOf((*HostAccessManager)(nil)).Elem() } type HostActiveDirectoryAuthentication struct { HostDirectoryStore } func init() { t["HostActiveDirectoryAuthentication"] = reflect.TypeOf((*HostActiveDirectoryAuthentication)(nil)).Elem() } type HostAssignableHardwareManager struct { Self types.ManagedObjectReference `json:"self"` Binding []types.HostAssignableHardwareBinding `json:"binding"` Config types.HostAssignableHardwareConfig `json:"config"` } func (m HostAssignableHardwareManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostAssignableHardwareManager"] = reflect.TypeOf((*HostAssignableHardwareManager)(nil)).Elem() } type HostAuthenticationManager struct { Self types.ManagedObjectReference `json:"self"` Info types.HostAuthenticationManagerInfo `json:"info"` SupportedStore []types.ManagedObjectReference `json:"supportedStore"` } func (m HostAuthenticationManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostAuthenticationManager"] = reflect.TypeOf((*HostAuthenticationManager)(nil)).Elem() } type HostAuthenticationStore struct { Self types.ManagedObjectReference `json:"self"` Info types.BaseHostAuthenticationStoreInfo `json:"info"` } func (m HostAuthenticationStore) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostAuthenticationStore"] = reflect.TypeOf((*HostAuthenticationStore)(nil)).Elem() } type HostAutoStartManager struct { Self types.ManagedObjectReference `json:"self"` Config types.HostAutoStartManagerConfig `json:"config"` } func (m HostAutoStartManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostAutoStartManager"] = reflect.TypeOf((*HostAutoStartManager)(nil)).Elem() } type HostBootDeviceSystem struct { Self types.ManagedObjectReference `json:"self"` } func (m HostBootDeviceSystem) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostBootDeviceSystem"] = reflect.TypeOf((*HostBootDeviceSystem)(nil)).Elem() } type HostCacheConfigurationManager struct { Self types.ManagedObjectReference `json:"self"` CacheConfigurationInfo []types.HostCacheConfigurationInfo `json:"cacheConfigurationInfo"` } func (m HostCacheConfigurationManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostCacheConfigurationManager"] = reflect.TypeOf((*HostCacheConfigurationManager)(nil)).Elem() } type HostCertificateManager struct { Self types.ManagedObjectReference `json:"self"` CertificateInfo types.HostCertificateManagerCertificateInfo `json:"certificateInfo"` } func (m HostCertificateManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostCertificateManager"] = reflect.TypeOf((*HostCertificateManager)(nil)).Elem() } type HostCpuSchedulerSystem struct { ExtensibleManagedObject HyperthreadInfo *types.HostHyperThreadScheduleInfo `json:"hyperthreadInfo"` } func init() { t["HostCpuSchedulerSystem"] = reflect.TypeOf((*HostCpuSchedulerSystem)(nil)).Elem() } type HostDatastoreBrowser struct { Self types.ManagedObjectReference `json:"self"` Datastore []types.ManagedObjectReference `json:"datastore"` SupportedType []types.BaseFileQuery `json:"supportedType"` } func (m HostDatastoreBrowser) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostDatastoreBrowser"] = reflect.TypeOf((*HostDatastoreBrowser)(nil)).Elem() } type HostDatastoreSystem struct { Self types.ManagedObjectReference `json:"self"` Datastore []types.ManagedObjectReference `json:"datastore"` Capabilities types.HostDatastoreSystemCapabilities `json:"capabilities"` } func (m HostDatastoreSystem) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostDatastoreSystem"] = reflect.TypeOf((*HostDatastoreSystem)(nil)).Elem() } type HostDateTimeSystem struct { Self types.ManagedObjectReference `json:"self"` DateTimeInfo types.HostDateTimeInfo `json:"dateTimeInfo"` } func (m HostDateTimeSystem) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostDateTimeSystem"] = reflect.TypeOf((*HostDateTimeSystem)(nil)).Elem() } type HostDiagnosticSystem struct { Self types.ManagedObjectReference `json:"self"` ActivePartition *types.HostDiagnosticPartition `json:"activePartition"` } func (m HostDiagnosticSystem) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostDiagnosticSystem"] = reflect.TypeOf((*HostDiagnosticSystem)(nil)).Elem() } type HostDirectoryStore struct { HostAuthenticationStore } func init() { t["HostDirectoryStore"] = reflect.TypeOf((*HostDirectoryStore)(nil)).Elem() } type HostEsxAgentHostManager struct { Self types.ManagedObjectReference `json:"self"` ConfigInfo types.HostEsxAgentHostManagerConfigInfo `json:"configInfo"` } func (m HostEsxAgentHostManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostEsxAgentHostManager"] = reflect.TypeOf((*HostEsxAgentHostManager)(nil)).Elem() } type HostFirewallSystem struct { ExtensibleManagedObject FirewallInfo *types.HostFirewallInfo `json:"firewallInfo"` } func init() { t["HostFirewallSystem"] = reflect.TypeOf((*HostFirewallSystem)(nil)).Elem() } type HostFirmwareSystem struct { Self types.ManagedObjectReference `json:"self"` } func (m HostFirmwareSystem) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostFirmwareSystem"] = reflect.TypeOf((*HostFirmwareSystem)(nil)).Elem() } type HostGraphicsManager struct { ExtensibleManagedObject GraphicsInfo []types.HostGraphicsInfo `json:"graphicsInfo"` GraphicsConfig *types.HostGraphicsConfig `json:"graphicsConfig"` SharedPassthruGpuTypes []string `json:"sharedPassthruGpuTypes"` SharedGpuCapabilities []types.HostSharedGpuCapabilities `json:"sharedGpuCapabilities"` } func init() { t["HostGraphicsManager"] = reflect.TypeOf((*HostGraphicsManager)(nil)).Elem() } type HostHealthStatusSystem struct { Self types.ManagedObjectReference `json:"self"` Runtime types.HealthSystemRuntime `json:"runtime"` } func (m HostHealthStatusSystem) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostHealthStatusSystem"] = reflect.TypeOf((*HostHealthStatusSystem)(nil)).Elem() } type HostImageConfigManager struct { Self types.ManagedObjectReference `json:"self"` } func (m HostImageConfigManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostImageConfigManager"] = reflect.TypeOf((*HostImageConfigManager)(nil)).Elem() } type HostKernelModuleSystem struct { Self types.ManagedObjectReference `json:"self"` } func (m HostKernelModuleSystem) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostKernelModuleSystem"] = reflect.TypeOf((*HostKernelModuleSystem)(nil)).Elem() } type HostLocalAccountManager struct { Self types.ManagedObjectReference `json:"self"` } func (m HostLocalAccountManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostLocalAccountManager"] = reflect.TypeOf((*HostLocalAccountManager)(nil)).Elem() } type HostLocalAuthentication struct { HostAuthenticationStore } func init() { t["HostLocalAuthentication"] = reflect.TypeOf((*HostLocalAuthentication)(nil)).Elem() } type HostMemorySystem struct { ExtensibleManagedObject ConsoleReservationInfo *types.ServiceConsoleReservationInfo `json:"consoleReservationInfo"` VirtualMachineReservationInfo *types.VirtualMachineMemoryReservationInfo `json:"virtualMachineReservationInfo"` } func init() { t["HostMemorySystem"] = reflect.TypeOf((*HostMemorySystem)(nil)).Elem() } type HostNetworkSystem struct { ExtensibleManagedObject Capabilities *types.HostNetCapabilities `json:"capabilities"` NetworkInfo *types.HostNetworkInfo `json:"networkInfo"` OffloadCapabilities *types.HostNetOffloadCapabilities `json:"offloadCapabilities"` NetworkConfig *types.HostNetworkConfig `json:"networkConfig"` DnsConfig types.BaseHostDnsConfig `json:"dnsConfig"` IpRouteConfig types.BaseHostIpRouteConfig `json:"ipRouteConfig"` ConsoleIpRouteConfig types.BaseHostIpRouteConfig `json:"consoleIpRouteConfig"` } func init() { t["HostNetworkSystem"] = reflect.TypeOf((*HostNetworkSystem)(nil)).Elem() } type HostNvdimmSystem struct { Self types.ManagedObjectReference `json:"self"` NvdimmSystemInfo types.NvdimmSystemInfo `json:"nvdimmSystemInfo"` } func (m HostNvdimmSystem) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostNvdimmSystem"] = reflect.TypeOf((*HostNvdimmSystem)(nil)).Elem() } type HostPatchManager struct { Self types.ManagedObjectReference `json:"self"` } func (m HostPatchManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostPatchManager"] = reflect.TypeOf((*HostPatchManager)(nil)).Elem() } type HostPciPassthruSystem struct { ExtensibleManagedObject PciPassthruInfo []types.BaseHostPciPassthruInfo `json:"pciPassthruInfo"` SriovDevicePoolInfo []types.BaseHostSriovDevicePoolInfo `json:"sriovDevicePoolInfo"` } func init() { t["HostPciPassthruSystem"] = reflect.TypeOf((*HostPciPassthruSystem)(nil)).Elem() } type HostPowerSystem struct { Self types.ManagedObjectReference `json:"self"` Capability types.PowerSystemCapability `json:"capability"` Info types.PowerSystemInfo `json:"info"` } func (m HostPowerSystem) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostPowerSystem"] = reflect.TypeOf((*HostPowerSystem)(nil)).Elem() } type HostProfile struct { Profile ValidationState *string `json:"validationState"` ValidationStateUpdateTime *time.Time `json:"validationStateUpdateTime"` ValidationFailureInfo *types.HostProfileValidationFailureInfo `json:"validationFailureInfo"` ReferenceHost *types.ManagedObjectReference `json:"referenceHost"` } func init() { t["HostProfile"] = reflect.TypeOf((*HostProfile)(nil)).Elem() } type HostProfileManager struct { ProfileManager } func init() { t["HostProfileManager"] = reflect.TypeOf((*HostProfileManager)(nil)).Elem() } type HostServiceSystem struct { ExtensibleManagedObject ServiceInfo types.HostServiceInfo `json:"serviceInfo"` } func init() { t["HostServiceSystem"] = reflect.TypeOf((*HostServiceSystem)(nil)).Elem() } type HostSnmpSystem struct { Self types.ManagedObjectReference `json:"self"` Configuration types.HostSnmpConfigSpec `json:"configuration"` Limits types.HostSnmpSystemAgentLimits `json:"limits"` } func (m HostSnmpSystem) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostSnmpSystem"] = reflect.TypeOf((*HostSnmpSystem)(nil)).Elem() } type HostSpecificationManager struct { Self types.ManagedObjectReference `json:"self"` } func (m HostSpecificationManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostSpecificationManager"] = reflect.TypeOf((*HostSpecificationManager)(nil)).Elem() } type HostStorageSystem struct { ExtensibleManagedObject StorageDeviceInfo *types.HostStorageDeviceInfo `json:"storageDeviceInfo"` FileSystemVolumeInfo types.HostFileSystemVolumeInfo `json:"fileSystemVolumeInfo"` SystemFile []string `json:"systemFile"` MultipathStateInfo *types.HostMultipathStateInfo `json:"multipathStateInfo"` } func init() { t["HostStorageSystem"] = reflect.TypeOf((*HostStorageSystem)(nil)).Elem() } type HostSystem struct { ManagedEntity Runtime types.HostRuntimeInfo `json:"runtime"` Summary types.HostListSummary `json:"summary"` Hardware *types.HostHardwareInfo `json:"hardware"` Capability *types.HostCapability `json:"capability"` LicensableResource types.HostLicensableResourceInfo `json:"licensableResource"` RemediationState *types.HostSystemRemediationState `json:"remediationState"` PrecheckRemediationResult *types.ApplyHostProfileConfigurationSpec `json:"precheckRemediationResult"` RemediationResult *types.ApplyHostProfileConfigurationResult `json:"remediationResult"` ComplianceCheckState *types.HostSystemComplianceCheckState `json:"complianceCheckState"` ComplianceCheckResult *types.ComplianceResult `json:"complianceCheckResult"` ConfigManager types.HostConfigManager `json:"configManager"` Config *types.HostConfigInfo `json:"config"` Vm []types.ManagedObjectReference `json:"vm"` Datastore []types.ManagedObjectReference `json:"datastore"` Network []types.ManagedObjectReference `json:"network"` DatastoreBrowser types.ManagedObjectReference `json:"datastoreBrowser"` SystemResources *types.HostSystemResourceInfo `json:"systemResources"` AnswerFileValidationState *types.AnswerFileStatusResult `json:"answerFileValidationState"` AnswerFileValidationResult *types.AnswerFileStatusResult `json:"answerFileValidationResult"` } func (m *HostSystem) Entity() *ManagedEntity { return &m.ManagedEntity } func init() { t["HostSystem"] = reflect.TypeOf((*HostSystem)(nil)).Elem() } type HostVFlashManager struct { Self types.ManagedObjectReference `json:"self"` VFlashConfigInfo *types.HostVFlashManagerVFlashConfigInfo `json:"vFlashConfigInfo"` } func (m HostVFlashManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostVFlashManager"] = reflect.TypeOf((*HostVFlashManager)(nil)).Elem() } type HostVMotionSystem struct { ExtensibleManagedObject NetConfig *types.HostVMotionNetConfig `json:"netConfig"` IpConfig *types.HostIpConfig `json:"ipConfig"` } func init() { t["HostVMotionSystem"] = reflect.TypeOf((*HostVMotionSystem)(nil)).Elem() } type HostVStorageObjectManager struct { VStorageObjectManagerBase } func init() { t["HostVStorageObjectManager"] = reflect.TypeOf((*HostVStorageObjectManager)(nil)).Elem() } type HostVirtualNicManager struct { ExtensibleManagedObject Info types.HostVirtualNicManagerInfo `json:"info"` } func init() { t["HostVirtualNicManager"] = reflect.TypeOf((*HostVirtualNicManager)(nil)).Elem() } type HostVsanInternalSystem struct { Self types.ManagedObjectReference `json:"self"` } func (m HostVsanInternalSystem) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostVsanInternalSystem"] = reflect.TypeOf((*HostVsanInternalSystem)(nil)).Elem() } type HostVsanSystem struct { Self types.ManagedObjectReference `json:"self"` Config types.VsanHostConfigInfo `json:"config"` } func (m HostVsanSystem) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HostVsanSystem"] = reflect.TypeOf((*HostVsanSystem)(nil)).Elem() } type HttpNfcLease struct { Self types.ManagedObjectReference `json:"self"` InitializeProgress int32 `json:"initializeProgress"` TransferProgress int32 `json:"transferProgress"` Mode string `json:"mode"` Capabilities types.HttpNfcLeaseCapabilities `json:"capabilities"` Info *types.HttpNfcLeaseInfo `json:"info"` State types.HttpNfcLeaseState `json:"state"` Error *types.LocalizedMethodFault `json:"error"` } func (m HttpNfcLease) Reference() types.ManagedObjectReference { return m.Self } func init() { t["HttpNfcLease"] = reflect.TypeOf((*HttpNfcLease)(nil)).Elem() } type InventoryView struct { ManagedObjectView } func init() { t["InventoryView"] = reflect.TypeOf((*InventoryView)(nil)).Elem() } type IoFilterManager struct { Self types.ManagedObjectReference `json:"self"` } func (m IoFilterManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["IoFilterManager"] = reflect.TypeOf((*IoFilterManager)(nil)).Elem() } type IpPoolManager struct { Self types.ManagedObjectReference `json:"self"` } func (m IpPoolManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["IpPoolManager"] = reflect.TypeOf((*IpPoolManager)(nil)).Elem() } type IscsiManager struct { Self types.ManagedObjectReference `json:"self"` } func (m IscsiManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["IscsiManager"] = reflect.TypeOf((*IscsiManager)(nil)).Elem() } type LicenseAssignmentManager struct { Self types.ManagedObjectReference `json:"self"` } func (m LicenseAssignmentManager) Reference() types.ManagedObjectReference { return m.Self } func init() { t["LicenseAssignmentManager"] = reflect.TypeOf((*LicenseAssignmentManager)(nil)).Elem() } type LicenseManager struct { Self types.ManagedObjectReference `json:"self"` Source types.BaseLicenseSource `json:"source"` SourceAvailable bool `json:"sourceAvailable"` Diagnostics *types.LicenseDiagnostics `json:"diagnostics"` FeatureInfo []types.LicenseFeatureInfo `json:"featureInfo"` LicensedEdition string `json:"licensedEdition"` Licenses []types.LicenseManagerLicenseInfo `json:"licenses"`
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/mo/helpers.go
vendor/github.com/vmware/govmomi/vim25/mo/helpers.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package mo import ( "fmt" "io" "text/tabwriter" "time" ) // Write implements the cli package's Write(io.Writer) error interface for // emitting objects to the command line. func (l HttpNfcLease) Write(w io.Writer) error { tw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0) fmt.Fprintf(tw, "Lease:\t%s\n", l.Reference().String()) fmt.Fprintf(tw, "InitializeProgress:\t%d\n", l.InitializeProgress) fmt.Fprintf(tw, "TransferProgress:\t%d\n", l.TransferProgress) fmt.Fprintf(tw, "Mode:\t%s\n", l.Mode) fmt.Fprintf(tw, "State:\t%s\n", l.State) fmt.Fprintf(tw, "Capabilities:\n") fmt.Fprintf(tw, " CorsSupported:\t%v\n", l.Capabilities.CorsSupported) fmt.Fprintf(tw, " PullModeSupported:\t%v\n", l.Capabilities.PullModeSupported) if info := l.Info; info != nil { fmt.Fprintf(tw, "Info:\n") fmt.Fprintf(tw, " Entity:\t%s\n", info.Entity.String()) timeout := time.Second * time.Duration(info.LeaseTimeout) fmt.Fprintf(tw, " Timeout:\t%s\n", timeout) fmt.Fprintf(tw, " TotalDiskCapacityInKB:\t%d\n", info.TotalDiskCapacityInKB) fmt.Fprintf(tw, " URLs:\n") for i := range info.DeviceUrl { du := info.DeviceUrl[i] fmt.Fprintf(tw, " Datastore:\t%s\n", du.DatastoreKey) fmt.Fprintf(tw, " DeviceKey:\t%s\n", du.Key) isDisk := false if du.Disk != nil { isDisk = *du.Disk } fmt.Fprintf(tw, " IsDisk:\t%v\n", isDisk) fmt.Fprintf(tw, " SSLThumbprint:\t%s\n", du.SslThumbprint) fmt.Fprintf(tw, " Target:\t%s\n", du.TargetId) fmt.Fprintf(tw, " URL:\t%s\n", du.Url) } } if err := l.Error; err != nil { fmt.Fprintf(tw, "Error:\t%s\n", err.LocalizedMessage) } return tw.Flush() }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/mo/reference.go
vendor/github.com/vmware/govmomi/vim25/mo/reference.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package mo import "github.com/vmware/govmomi/vim25/types" // Reference is the interface that is implemented by all the managed objects // defined in this package. It specifies that these managed objects have a // function that returns the managed object reference to themselves. type Reference interface { Reference() types.ManagedObjectReference }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/methods/unreleased.go
vendor/github.com/vmware/govmomi/vim25/methods/unreleased.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package methods import ( "context" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) type PlaceVmsXClusterBody struct { Req *types.PlaceVmsXCluster `xml:"urn:vim25 PlaceVmsXCluster,omitempty"` Res *types.PlaceVmsXClusterResponse `xml:"PlaceVmsXClusterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *PlaceVmsXClusterBody) Fault() *soap.Fault { return b.Fault_ } func PlaceVmsXCluster(ctx context.Context, r soap.RoundTripper, req *types.PlaceVmsXCluster) (*types.PlaceVmsXClusterResponse, error) { var reqBody, resBody PlaceVmsXClusterBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/methods/methods.go
vendor/github.com/vmware/govmomi/vim25/methods/methods.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package methods import ( "context" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) type AbandonHciWorkflowBody struct { Req *types.AbandonHciWorkflow `xml:"urn:vim25 AbandonHciWorkflow,omitempty"` Res *types.AbandonHciWorkflowResponse `xml:"AbandonHciWorkflowResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AbandonHciWorkflowBody) Fault() *soap.Fault { return b.Fault_ } func AbandonHciWorkflow(ctx context.Context, r soap.RoundTripper, req *types.AbandonHciWorkflow) (*types.AbandonHciWorkflowResponse, error) { var reqBody, resBody AbandonHciWorkflowBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AbdicateDomOwnershipBody struct { Req *types.AbdicateDomOwnership `xml:"urn:vim25 AbdicateDomOwnership,omitempty"` Res *types.AbdicateDomOwnershipResponse `xml:"AbdicateDomOwnershipResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AbdicateDomOwnershipBody) Fault() *soap.Fault { return b.Fault_ } func AbdicateDomOwnership(ctx context.Context, r soap.RoundTripper, req *types.AbdicateDomOwnership) (*types.AbdicateDomOwnershipResponse, error) { var reqBody, resBody AbdicateDomOwnershipBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AbortCustomization_TaskBody struct { Req *types.AbortCustomization_Task `xml:"urn:vim25 AbortCustomization_Task,omitempty"` Res *types.AbortCustomization_TaskResponse `xml:"AbortCustomization_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AbortCustomization_TaskBody) Fault() *soap.Fault { return b.Fault_ } func AbortCustomization_Task(ctx context.Context, r soap.RoundTripper, req *types.AbortCustomization_Task) (*types.AbortCustomization_TaskResponse, error) { var reqBody, resBody AbortCustomization_TaskBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AcknowledgeAlarmBody struct { Req *types.AcknowledgeAlarm `xml:"urn:vim25 AcknowledgeAlarm,omitempty"` Res *types.AcknowledgeAlarmResponse `xml:"AcknowledgeAlarmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AcknowledgeAlarmBody) Fault() *soap.Fault { return b.Fault_ } func AcknowledgeAlarm(ctx context.Context, r soap.RoundTripper, req *types.AcknowledgeAlarm) (*types.AcknowledgeAlarmResponse, error) { var reqBody, resBody AcknowledgeAlarmBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AcquireCimServicesTicketBody struct { Req *types.AcquireCimServicesTicket `xml:"urn:vim25 AcquireCimServicesTicket,omitempty"` Res *types.AcquireCimServicesTicketResponse `xml:"AcquireCimServicesTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AcquireCimServicesTicketBody) Fault() *soap.Fault { return b.Fault_ } func AcquireCimServicesTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireCimServicesTicket) (*types.AcquireCimServicesTicketResponse, error) { var reqBody, resBody AcquireCimServicesTicketBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AcquireCloneTicketBody struct { Req *types.AcquireCloneTicket `xml:"urn:vim25 AcquireCloneTicket,omitempty"` Res *types.AcquireCloneTicketResponse `xml:"AcquireCloneTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AcquireCloneTicketBody) Fault() *soap.Fault { return b.Fault_ } func AcquireCloneTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireCloneTicket) (*types.AcquireCloneTicketResponse, error) { var reqBody, resBody AcquireCloneTicketBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AcquireCredentialsInGuestBody struct { Req *types.AcquireCredentialsInGuest `xml:"urn:vim25 AcquireCredentialsInGuest,omitempty"` Res *types.AcquireCredentialsInGuestResponse `xml:"AcquireCredentialsInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AcquireCredentialsInGuestBody) Fault() *soap.Fault { return b.Fault_ } func AcquireCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *types.AcquireCredentialsInGuest) (*types.AcquireCredentialsInGuestResponse, error) { var reqBody, resBody AcquireCredentialsInGuestBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AcquireGenericServiceTicketBody struct { Req *types.AcquireGenericServiceTicket `xml:"urn:vim25 AcquireGenericServiceTicket,omitempty"` Res *types.AcquireGenericServiceTicketResponse `xml:"AcquireGenericServiceTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AcquireGenericServiceTicketBody) Fault() *soap.Fault { return b.Fault_ } func AcquireGenericServiceTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireGenericServiceTicket) (*types.AcquireGenericServiceTicketResponse, error) { var reqBody, resBody AcquireGenericServiceTicketBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AcquireLocalTicketBody struct { Req *types.AcquireLocalTicket `xml:"urn:vim25 AcquireLocalTicket,omitempty"` Res *types.AcquireLocalTicketResponse `xml:"AcquireLocalTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AcquireLocalTicketBody) Fault() *soap.Fault { return b.Fault_ } func AcquireLocalTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireLocalTicket) (*types.AcquireLocalTicketResponse, error) { var reqBody, resBody AcquireLocalTicketBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AcquireMksTicketBody struct { Req *types.AcquireMksTicket `xml:"urn:vim25 AcquireMksTicket,omitempty"` Res *types.AcquireMksTicketResponse `xml:"AcquireMksTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AcquireMksTicketBody) Fault() *soap.Fault { return b.Fault_ } func AcquireMksTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireMksTicket) (*types.AcquireMksTicketResponse, error) { var reqBody, resBody AcquireMksTicketBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AcquireTicketBody struct { Req *types.AcquireTicket `xml:"urn:vim25 AcquireTicket,omitempty"` Res *types.AcquireTicketResponse `xml:"AcquireTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AcquireTicketBody) Fault() *soap.Fault { return b.Fault_ } func AcquireTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireTicket) (*types.AcquireTicketResponse, error) { var reqBody, resBody AcquireTicketBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddAuthorizationRoleBody struct { Req *types.AddAuthorizationRole `xml:"urn:vim25 AddAuthorizationRole,omitempty"` Res *types.AddAuthorizationRoleResponse `xml:"AddAuthorizationRoleResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddAuthorizationRoleBody) Fault() *soap.Fault { return b.Fault_ } func AddAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *types.AddAuthorizationRole) (*types.AddAuthorizationRoleResponse, error) { var reqBody, resBody AddAuthorizationRoleBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddCustomFieldDefBody struct { Req *types.AddCustomFieldDef `xml:"urn:vim25 AddCustomFieldDef,omitempty"` Res *types.AddCustomFieldDefResponse `xml:"AddCustomFieldDefResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddCustomFieldDefBody) Fault() *soap.Fault { return b.Fault_ } func AddCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.AddCustomFieldDef) (*types.AddCustomFieldDefResponse, error) { var reqBody, resBody AddCustomFieldDefBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddDVPortgroup_TaskBody struct { Req *types.AddDVPortgroup_Task `xml:"urn:vim25 AddDVPortgroup_Task,omitempty"` Res *types.AddDVPortgroup_TaskResponse `xml:"AddDVPortgroup_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddDVPortgroup_TaskBody) Fault() *soap.Fault { return b.Fault_ } func AddDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req *types.AddDVPortgroup_Task) (*types.AddDVPortgroup_TaskResponse, error) { var reqBody, resBody AddDVPortgroup_TaskBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddDisks_TaskBody struct { Req *types.AddDisks_Task `xml:"urn:vim25 AddDisks_Task,omitempty"` Res *types.AddDisks_TaskResponse `xml:"AddDisks_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddDisks_TaskBody) Fault() *soap.Fault { return b.Fault_ } func AddDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.AddDisks_Task) (*types.AddDisks_TaskResponse, error) { var reqBody, resBody AddDisks_TaskBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddFilterBody struct { Req *types.AddFilter `xml:"urn:vim25 AddFilter,omitempty"` Res *types.AddFilterResponse `xml:"AddFilterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddFilterBody) Fault() *soap.Fault { return b.Fault_ } func AddFilter(ctx context.Context, r soap.RoundTripper, req *types.AddFilter) (*types.AddFilterResponse, error) { var reqBody, resBody AddFilterBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddFilterEntitiesBody struct { Req *types.AddFilterEntities `xml:"urn:vim25 AddFilterEntities,omitempty"` Res *types.AddFilterEntitiesResponse `xml:"AddFilterEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddFilterEntitiesBody) Fault() *soap.Fault { return b.Fault_ } func AddFilterEntities(ctx context.Context, r soap.RoundTripper, req *types.AddFilterEntities) (*types.AddFilterEntitiesResponse, error) { var reqBody, resBody AddFilterEntitiesBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddGuestAliasBody struct { Req *types.AddGuestAlias `xml:"urn:vim25 AddGuestAlias,omitempty"` Res *types.AddGuestAliasResponse `xml:"AddGuestAliasResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddGuestAliasBody) Fault() *soap.Fault { return b.Fault_ } func AddGuestAlias(ctx context.Context, r soap.RoundTripper, req *types.AddGuestAlias) (*types.AddGuestAliasResponse, error) { var reqBody, resBody AddGuestAliasBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddHost_TaskBody struct { Req *types.AddHost_Task `xml:"urn:vim25 AddHost_Task,omitempty"` Res *types.AddHost_TaskResponse `xml:"AddHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } func AddHost_Task(ctx context.Context, r soap.RoundTripper, req *types.AddHost_Task) (*types.AddHost_TaskResponse, error) { var reqBody, resBody AddHost_TaskBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddInternetScsiSendTargetsBody struct { Req *types.AddInternetScsiSendTargets `xml:"urn:vim25 AddInternetScsiSendTargets,omitempty"` Res *types.AddInternetScsiSendTargetsResponse `xml:"AddInternetScsiSendTargetsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddInternetScsiSendTargetsBody) Fault() *soap.Fault { return b.Fault_ } func AddInternetScsiSendTargets(ctx context.Context, r soap.RoundTripper, req *types.AddInternetScsiSendTargets) (*types.AddInternetScsiSendTargetsResponse, error) { var reqBody, resBody AddInternetScsiSendTargetsBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddInternetScsiStaticTargetsBody struct { Req *types.AddInternetScsiStaticTargets `xml:"urn:vim25 AddInternetScsiStaticTargets,omitempty"` Res *types.AddInternetScsiStaticTargetsResponse `xml:"AddInternetScsiStaticTargetsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddInternetScsiStaticTargetsBody) Fault() *soap.Fault { return b.Fault_ } func AddInternetScsiStaticTargets(ctx context.Context, r soap.RoundTripper, req *types.AddInternetScsiStaticTargets) (*types.AddInternetScsiStaticTargetsResponse, error) { var reqBody, resBody AddInternetScsiStaticTargetsBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddKeyBody struct { Req *types.AddKey `xml:"urn:vim25 AddKey,omitempty"` Res *types.AddKeyResponse `xml:"AddKeyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddKeyBody) Fault() *soap.Fault { return b.Fault_ } func AddKey(ctx context.Context, r soap.RoundTripper, req *types.AddKey) (*types.AddKeyResponse, error) { var reqBody, resBody AddKeyBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddKeysBody struct { Req *types.AddKeys `xml:"urn:vim25 AddKeys,omitempty"` Res *types.AddKeysResponse `xml:"AddKeysResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddKeysBody) Fault() *soap.Fault { return b.Fault_ } func AddKeys(ctx context.Context, r soap.RoundTripper, req *types.AddKeys) (*types.AddKeysResponse, error) { var reqBody, resBody AddKeysBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddLicenseBody struct { Req *types.AddLicense `xml:"urn:vim25 AddLicense,omitempty"` Res *types.AddLicenseResponse `xml:"AddLicenseResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddLicenseBody) Fault() *soap.Fault { return b.Fault_ } func AddLicense(ctx context.Context, r soap.RoundTripper, req *types.AddLicense) (*types.AddLicenseResponse, error) { var reqBody, resBody AddLicenseBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddMonitoredEntitiesBody struct { Req *types.AddMonitoredEntities `xml:"urn:vim25 AddMonitoredEntities,omitempty"` Res *types.AddMonitoredEntitiesResponse `xml:"AddMonitoredEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddMonitoredEntitiesBody) Fault() *soap.Fault { return b.Fault_ } func AddMonitoredEntities(ctx context.Context, r soap.RoundTripper, req *types.AddMonitoredEntities) (*types.AddMonitoredEntitiesResponse, error) { var reqBody, resBody AddMonitoredEntitiesBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddNetworkResourcePoolBody struct { Req *types.AddNetworkResourcePool `xml:"urn:vim25 AddNetworkResourcePool,omitempty"` Res *types.AddNetworkResourcePoolResponse `xml:"AddNetworkResourcePoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddNetworkResourcePoolBody) Fault() *soap.Fault { return b.Fault_ } func AddNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *types.AddNetworkResourcePool) (*types.AddNetworkResourcePoolResponse, error) { var reqBody, resBody AddNetworkResourcePoolBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddPortGroupBody struct { Req *types.AddPortGroup `xml:"urn:vim25 AddPortGroup,omitempty"` Res *types.AddPortGroupResponse `xml:"AddPortGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddPortGroupBody) Fault() *soap.Fault { return b.Fault_ } func AddPortGroup(ctx context.Context, r soap.RoundTripper, req *types.AddPortGroup) (*types.AddPortGroupResponse, error) { var reqBody, resBody AddPortGroupBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddServiceConsoleVirtualNicBody struct { Req *types.AddServiceConsoleVirtualNic `xml:"urn:vim25 AddServiceConsoleVirtualNic,omitempty"` Res *types.AddServiceConsoleVirtualNicResponse `xml:"AddServiceConsoleVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddServiceConsoleVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } func AddServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.AddServiceConsoleVirtualNic) (*types.AddServiceConsoleVirtualNicResponse, error) { var reqBody, resBody AddServiceConsoleVirtualNicBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddStandaloneHost_TaskBody struct { Req *types.AddStandaloneHost_Task `xml:"urn:vim25 AddStandaloneHost_Task,omitempty"` Res *types.AddStandaloneHost_TaskResponse `xml:"AddStandaloneHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddStandaloneHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } func AddStandaloneHost_Task(ctx context.Context, r soap.RoundTripper, req *types.AddStandaloneHost_Task) (*types.AddStandaloneHost_TaskResponse, error) { var reqBody, resBody AddStandaloneHost_TaskBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddVirtualNicBody struct { Req *types.AddVirtualNic `xml:"urn:vim25 AddVirtualNic,omitempty"` Res *types.AddVirtualNicResponse `xml:"AddVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } func AddVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.AddVirtualNic) (*types.AddVirtualNicResponse, error) { var reqBody, resBody AddVirtualNicBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AddVirtualSwitchBody struct { Req *types.AddVirtualSwitch `xml:"urn:vim25 AddVirtualSwitch,omitempty"` Res *types.AddVirtualSwitchResponse `xml:"AddVirtualSwitchResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AddVirtualSwitchBody) Fault() *soap.Fault { return b.Fault_ } func AddVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.AddVirtualSwitch) (*types.AddVirtualSwitchResponse, error) { var reqBody, resBody AddVirtualSwitchBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AllocateIpv4AddressBody struct { Req *types.AllocateIpv4Address `xml:"urn:vim25 AllocateIpv4Address,omitempty"` Res *types.AllocateIpv4AddressResponse `xml:"AllocateIpv4AddressResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AllocateIpv4AddressBody) Fault() *soap.Fault { return b.Fault_ } func AllocateIpv4Address(ctx context.Context, r soap.RoundTripper, req *types.AllocateIpv4Address) (*types.AllocateIpv4AddressResponse, error) { var reqBody, resBody AllocateIpv4AddressBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AllocateIpv6AddressBody struct { Req *types.AllocateIpv6Address `xml:"urn:vim25 AllocateIpv6Address,omitempty"` Res *types.AllocateIpv6AddressResponse `xml:"AllocateIpv6AddressResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AllocateIpv6AddressBody) Fault() *soap.Fault { return b.Fault_ } func AllocateIpv6Address(ctx context.Context, r soap.RoundTripper, req *types.AllocateIpv6Address) (*types.AllocateIpv6AddressResponse, error) { var reqBody, resBody AllocateIpv6AddressBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AnswerVMBody struct { Req *types.AnswerVM `xml:"urn:vim25 AnswerVM,omitempty"` Res *types.AnswerVMResponse `xml:"AnswerVMResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AnswerVMBody) Fault() *soap.Fault { return b.Fault_ } func AnswerVM(ctx context.Context, r soap.RoundTripper, req *types.AnswerVM) (*types.AnswerVMResponse, error) { var reqBody, resBody AnswerVMBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type ApplyEntitiesConfig_TaskBody struct { Req *types.ApplyEntitiesConfig_Task `xml:"urn:vim25 ApplyEntitiesConfig_Task,omitempty"` Res *types.ApplyEntitiesConfig_TaskResponse `xml:"ApplyEntitiesConfig_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *ApplyEntitiesConfig_TaskBody) Fault() *soap.Fault { return b.Fault_ } func ApplyEntitiesConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyEntitiesConfig_Task) (*types.ApplyEntitiesConfig_TaskResponse, error) { var reqBody, resBody ApplyEntitiesConfig_TaskBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type ApplyEvcModeVM_TaskBody struct { Req *types.ApplyEvcModeVM_Task `xml:"urn:vim25 ApplyEvcModeVM_Task,omitempty"` Res *types.ApplyEvcModeVM_TaskResponse `xml:"ApplyEvcModeVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *ApplyEvcModeVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } func ApplyEvcModeVM_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyEvcModeVM_Task) (*types.ApplyEvcModeVM_TaskResponse, error) { var reqBody, resBody ApplyEvcModeVM_TaskBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type ApplyHostConfig_TaskBody struct { Req *types.ApplyHostConfig_Task `xml:"urn:vim25 ApplyHostConfig_Task,omitempty"` Res *types.ApplyHostConfig_TaskResponse `xml:"ApplyHostConfig_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *ApplyHostConfig_TaskBody) Fault() *soap.Fault { return b.Fault_ } func ApplyHostConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyHostConfig_Task) (*types.ApplyHostConfig_TaskResponse, error) { var reqBody, resBody ApplyHostConfig_TaskBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type ApplyRecommendationBody struct { Req *types.ApplyRecommendation `xml:"urn:vim25 ApplyRecommendation,omitempty"` Res *types.ApplyRecommendationResponse `xml:"ApplyRecommendationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *ApplyRecommendationBody) Fault() *soap.Fault { return b.Fault_ } func ApplyRecommendation(ctx context.Context, r soap.RoundTripper, req *types.ApplyRecommendation) (*types.ApplyRecommendationResponse, error) { var reqBody, resBody ApplyRecommendationBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type ApplyStorageDrsRecommendationToPod_TaskBody struct { Req *types.ApplyStorageDrsRecommendationToPod_Task `xml:"urn:vim25 ApplyStorageDrsRecommendationToPod_Task,omitempty"` Res *types.ApplyStorageDrsRecommendationToPod_TaskResponse `xml:"ApplyStorageDrsRecommendationToPod_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *ApplyStorageDrsRecommendationToPod_TaskBody) Fault() *soap.Fault { return b.Fault_ } func ApplyStorageDrsRecommendationToPod_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyStorageDrsRecommendationToPod_Task) (*types.ApplyStorageDrsRecommendationToPod_TaskResponse, error) { var reqBody, resBody ApplyStorageDrsRecommendationToPod_TaskBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type ApplyStorageDrsRecommendation_TaskBody struct { Req *types.ApplyStorageDrsRecommendation_Task `xml:"urn:vim25 ApplyStorageDrsRecommendation_Task,omitempty"` Res *types.ApplyStorageDrsRecommendation_TaskResponse `xml:"ApplyStorageDrsRecommendation_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *ApplyStorageDrsRecommendation_TaskBody) Fault() *soap.Fault { return b.Fault_ } func ApplyStorageDrsRecommendation_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyStorageDrsRecommendation_Task) (*types.ApplyStorageDrsRecommendation_TaskResponse, error) { var reqBody, resBody ApplyStorageDrsRecommendation_TaskBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AreAlarmActionsEnabledBody struct { Req *types.AreAlarmActionsEnabled `xml:"urn:vim25 AreAlarmActionsEnabled,omitempty"` Res *types.AreAlarmActionsEnabledResponse `xml:"AreAlarmActionsEnabledResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AreAlarmActionsEnabledBody) Fault() *soap.Fault { return b.Fault_ } func AreAlarmActionsEnabled(ctx context.Context, r soap.RoundTripper, req *types.AreAlarmActionsEnabled) (*types.AreAlarmActionsEnabledResponse, error) { var reqBody, resBody AreAlarmActionsEnabledBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AssignUserToGroupBody struct { Req *types.AssignUserToGroup `xml:"urn:vim25 AssignUserToGroup,omitempty"` Res *types.AssignUserToGroupResponse `xml:"AssignUserToGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AssignUserToGroupBody) Fault() *soap.Fault { return b.Fault_ } func AssignUserToGroup(ctx context.Context, r soap.RoundTripper, req *types.AssignUserToGroup) (*types.AssignUserToGroupResponse, error) { var reqBody, resBody AssignUserToGroupBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AssociateProfileBody struct { Req *types.AssociateProfile `xml:"urn:vim25 AssociateProfile,omitempty"` Res *types.AssociateProfileResponse `xml:"AssociateProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AssociateProfileBody) Fault() *soap.Fault { return b.Fault_ } func AssociateProfile(ctx context.Context, r soap.RoundTripper, req *types.AssociateProfile) (*types.AssociateProfileResponse, error) { var reqBody, resBody AssociateProfileBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AttachDisk_TaskBody struct { Req *types.AttachDisk_Task `xml:"urn:vim25 AttachDisk_Task,omitempty"` Res *types.AttachDisk_TaskResponse `xml:"AttachDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AttachDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } func AttachDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.AttachDisk_Task) (*types.AttachDisk_TaskResponse, error) { var reqBody, resBody AttachDisk_TaskBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { return nil, err } return resBody.Res, nil } type AttachScsiLunBody struct { Req *types.AttachScsiLun `xml:"urn:vim25 AttachScsiLun,omitempty"` Res *types.AttachScsiLunResponse `xml:"AttachScsiLunResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } func (b *AttachScsiLunBody) Fault() *soap.Fault { return b.Fault_ } func AttachScsiLun(ctx context.Context, r soap.RoundTripper, req *types.AttachScsiLun) (*types.AttachScsiLunResponse, error) { var reqBody, resBody AttachScsiLunBody reqBody.Req = req if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/methods/service_content.go
vendor/github.com/vmware/govmomi/vim25/methods/service_content.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package methods import ( "context" "time" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) // copy of vim25.ServiceInstance to avoid import cycle var serviceInstance = types.ManagedObjectReference{ Type: "ServiceInstance", Value: "ServiceInstance", } func GetServiceContent(ctx context.Context, r soap.RoundTripper) (types.ServiceContent, error) { req := types.RetrieveServiceContent{ This: serviceInstance, } res, err := RetrieveServiceContent(ctx, r, &req) if err != nil { return types.ServiceContent{}, err } return res.Returnval, nil } func GetCurrentTime(ctx context.Context, r soap.RoundTripper) (*time.Time, error) { req := types.CurrentTime{ This: serviceInstance, } res, err := CurrentTime(ctx, r, &req) if err != nil { return nil, err } return &res.Returnval, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go
vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package xml import ( "fmt" "reflect" "strings" "sync" ) // typeInfo holds details for the xml representation of a type. type typeInfo struct { xmlname *fieldInfo fields []fieldInfo } // fieldInfo holds details for the xml representation of a single field. type fieldInfo struct { idx []int name string xmlns string flags fieldFlags parents []string } type fieldFlags int const ( fElement fieldFlags = 1 << iota fAttr fCDATA fCharData fInnerXML fComment fAny fOmitEmpty fTypeAttr fMode = fElement | fAttr | fCDATA | fCharData | fInnerXML | fComment | fAny xmlName = "XMLName" ) var tinfoMap sync.Map // map[reflect.Type]*typeInfo var nameType = reflect.TypeFor[Name]() // getTypeInfo returns the typeInfo structure with details necessary // for marshaling and unmarshaling typ. func getTypeInfo(typ reflect.Type) (*typeInfo, error) { if ti, ok := tinfoMap.Load(typ); ok { return ti.(*typeInfo), nil } tinfo := &typeInfo{} if typ.Kind() == reflect.Struct && typ != nameType { n := typ.NumField() for i := 0; i < n; i++ { f := typ.Field(i) if (!f.IsExported() && !f.Anonymous) || f.Tag.Get("xml") == "-" { continue // Private field } // For embedded structs, embed its fields. if f.Anonymous { t := f.Type if t.Kind() == reflect.Pointer { t = t.Elem() } if t.Kind() == reflect.Struct { inner, err := getTypeInfo(t) if err != nil { return nil, err } if tinfo.xmlname == nil { tinfo.xmlname = inner.xmlname } for _, finfo := range inner.fields { finfo.idx = append([]int{i}, finfo.idx...) if err := addFieldInfo(typ, tinfo, &finfo); err != nil { return nil, err } } continue } } finfo, err := structFieldInfo(typ, &f) if err != nil { return nil, err } if f.Name == xmlName { tinfo.xmlname = finfo continue } // Add the field if it doesn't conflict with other fields. if err := addFieldInfo(typ, tinfo, finfo); err != nil { return nil, err } } } ti, _ := tinfoMap.LoadOrStore(typ, tinfo) return ti.(*typeInfo), nil } // structFieldInfo builds and returns a fieldInfo for f. func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { finfo := &fieldInfo{idx: f.Index} // Split the tag from the xml namespace if necessary. tag := f.Tag.Get("xml") if ns, t, ok := strings.Cut(tag, " "); ok { finfo.xmlns, tag = ns, t } // Parse flags. tokens := strings.Split(tag, ",") if len(tokens) == 1 { finfo.flags = fElement } else { tag = tokens[0] for _, flag := range tokens[1:] { switch flag { case "attr": finfo.flags |= fAttr case "cdata": finfo.flags |= fCDATA case "chardata": finfo.flags |= fCharData case "innerxml": finfo.flags |= fInnerXML case "comment": finfo.flags |= fComment case "any": finfo.flags |= fAny case "omitempty": finfo.flags |= fOmitEmpty case "typeattr": finfo.flags |= fTypeAttr } } // Validate the flags used. valid := true switch mode := finfo.flags & fMode; mode { case 0: finfo.flags |= fElement case fAttr, fCDATA, fCharData, fInnerXML, fComment, fAny, fAny | fAttr: if f.Name == xmlName || tag != "" && mode != fAttr { valid = false } default: // This will also catch multiple modes in a single field. valid = false } if finfo.flags&fMode == fAny { finfo.flags |= fElement } if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 { valid = false } if !valid { return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q", f.Name, typ, f.Tag.Get("xml")) } } // Use of xmlns without a name is not allowed. if finfo.xmlns != "" && tag == "" { return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q", f.Name, typ, f.Tag.Get("xml")) } if f.Name == xmlName { // The XMLName field records the XML element name. Don't // process it as usual because its name should default to // empty rather than to the field name. finfo.name = tag return finfo, nil } if tag == "" { // If the name part of the tag is completely empty, get // default from XMLName of underlying struct if feasible, // or field name otherwise. if xmlname := lookupXMLName(f.Type); xmlname != nil { finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name } else { finfo.name = f.Name } return finfo, nil } // Prepare field name and parents. parents := strings.Split(tag, ">") if parents[0] == "" { parents[0] = f.Name } if parents[len(parents)-1] == "" { return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ) } finfo.name = parents[len(parents)-1] if len(parents) > 1 { if (finfo.flags & fElement) == 0 { return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ",")) } finfo.parents = parents[:len(parents)-1] } // If the field type has an XMLName field, the names must match // so that the behavior of both marshaling and unmarshaling // is straightforward and unambiguous. if finfo.flags&fElement != 0 { ftyp := f.Type xmlname := lookupXMLName(ftyp) if xmlname != nil && xmlname.name != finfo.name { return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName", finfo.name, typ, f.Name, xmlname.name, ftyp) } } return finfo, nil } // lookupXMLName returns the fieldInfo for typ's XMLName field // in case it exists and has a valid xml field tag, otherwise // it returns nil. func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { for typ.Kind() == reflect.Pointer { typ = typ.Elem() } if typ.Kind() != reflect.Struct { return nil } for i, n := 0, typ.NumField(); i < n; i++ { f := typ.Field(i) if f.Name != xmlName { continue } finfo, err := structFieldInfo(typ, &f) if err == nil && finfo.name != "" { return finfo } // Also consider errors as a non-existent field tag // and let getTypeInfo itself report the error. break } return nil } // addFieldInfo adds finfo to tinfo.fields if there are no // conflicts, or if conflicts arise from previous fields that were // obtained from deeper embedded structures than finfo. In the latter // case, the conflicting entries are dropped. // A conflict occurs when the path (parent + name) to a field is // itself a prefix of another path, or when two paths match exactly. // It is okay for field paths to share a common, shorter prefix. func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { var conflicts []int Loop: // First, figure all conflicts. Most working code will have none. for i := range tinfo.fields { oldf := &tinfo.fields[i] if oldf.flags&fMode != newf.flags&fMode { continue } if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns { continue } minl := min(len(newf.parents), len(oldf.parents)) for p := 0; p < minl; p++ { if oldf.parents[p] != newf.parents[p] { continue Loop } } if len(oldf.parents) > len(newf.parents) { if oldf.parents[len(newf.parents)] == newf.name { conflicts = append(conflicts, i) } } else if len(oldf.parents) < len(newf.parents) { if newf.parents[len(oldf.parents)] == oldf.name { conflicts = append(conflicts, i) } } else { if newf.name == oldf.name && newf.xmlns == oldf.xmlns { conflicts = append(conflicts, i) } } } // Without conflicts, add the new field and return. if conflicts == nil { tinfo.fields = append(tinfo.fields, *newf) return nil } // If any conflict is shallower, ignore the new field. // This matches the Go field resolution on embedding. for _, i := range conflicts { if len(tinfo.fields[i].idx) < len(newf.idx) { return nil } } // Otherwise, if any of them is at the same depth level, it's an error. for _, i := range conflicts { oldf := &tinfo.fields[i] if len(oldf.idx) == len(newf.idx) { f1 := typ.FieldByIndex(oldf.idx) f2 := typ.FieldByIndex(newf.idx) return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")} } } // Otherwise, the new field is shallower, and thus takes precedence, // so drop the conflicting fields from tinfo and append the new one. for c := len(conflicts) - 1; c >= 0; c-- { i := conflicts[c] copy(tinfo.fields[i:], tinfo.fields[i+1:]) tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] } tinfo.fields = append(tinfo.fields, *newf) return nil } // A TagPathError represents an error in the unmarshaling process // caused by the use of field tags with conflicting paths. type TagPathError struct { Struct reflect.Type Field1, Tag1 string Field2, Tag2 string } func (e *TagPathError) Error() string { return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) } const ( initNilPointers = true dontInitNilPointers = false ) // value returns v's field value corresponding to finfo. // It's equivalent to v.FieldByIndex(finfo.idx), but when passed // initNilPointers, it initializes and dereferences pointers as necessary. // When passed dontInitNilPointers and a nil pointer is reached, the function // returns a zero reflect.Value. func (finfo *fieldInfo) value(v reflect.Value, shouldInitNilPointers bool) reflect.Value { for i, x := range finfo.idx { if i > 0 { t := v.Type() if t.Kind() == reflect.Pointer && t.Elem().Kind() == reflect.Struct { if v.IsNil() { if !shouldInitNilPointers { return reflect.Value{} } v.Set(reflect.New(v.Type().Elem())) } v = v.Elem() } } v = v.Field(x) } return v }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/xml/xml.go
vendor/github.com/vmware/govmomi/vim25/xml/xml.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package xml implements a simple XML 1.0 parser that // understands XML name spaces. package xml // References: // Annotated XML spec: https://www.xml.com/axml/testaxml.htm // XML name spaces: https://www.w3.org/TR/REC-xml-names/ import ( "bufio" "bytes" "errors" "fmt" "io" "reflect" "strconv" "strings" "unicode" "unicode/utf8" ) // A SyntaxError represents a syntax error in the XML input stream. type SyntaxError struct { Msg string Line int } func (e *SyntaxError) Error() string { return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg } // A Name represents an XML name (Local) annotated // with a name space identifier (Space). // In tokens returned by [Decoder.Token], the Space identifier // is given as a canonical URL, not the short prefix used // in the document being parsed. type Name struct { Space, Local string } // An Attr represents an attribute in an XML element (Name=Value). type Attr struct { Name Name Value string } // A Token is an interface holding one of the token types: // [StartElement], [EndElement], [CharData], [Comment], [ProcInst], or [Directive]. type Token any // A StartElement represents an XML start element. type StartElement struct { Name Name Attr []Attr } // Copy creates a new copy of StartElement. func (e StartElement) Copy() StartElement { attrs := make([]Attr, len(e.Attr)) copy(attrs, e.Attr) e.Attr = attrs return e } // End returns the corresponding XML end element. func (e StartElement) End() EndElement { return EndElement{e.Name} } // An EndElement represents an XML end element. type EndElement struct { Name Name } // A CharData represents XML character data (raw text), // in which XML escape sequences have been replaced by // the characters they represent. type CharData []byte // Copy creates a new copy of CharData. func (c CharData) Copy() CharData { return CharData(bytes.Clone(c)) } // A Comment represents an XML comment of the form <!--comment-->. // The bytes do not include the <!-- and --> comment markers. type Comment []byte // Copy creates a new copy of Comment. func (c Comment) Copy() Comment { return Comment(bytes.Clone(c)) } // A ProcInst represents an XML processing instruction of the form <?target inst?> type ProcInst struct { Target string Inst []byte } // Copy creates a new copy of ProcInst. func (p ProcInst) Copy() ProcInst { p.Inst = bytes.Clone(p.Inst) return p } // A Directive represents an XML directive of the form <!text>. // The bytes do not include the <! and > markers. type Directive []byte // Copy creates a new copy of Directive. func (d Directive) Copy() Directive { return Directive(bytes.Clone(d)) } // CopyToken returns a copy of a Token. func CopyToken(t Token) Token { switch v := t.(type) { case CharData: return v.Copy() case Comment: return v.Copy() case Directive: return v.Copy() case ProcInst: return v.Copy() case StartElement: return v.Copy() } return t } // A TokenReader is anything that can decode a stream of XML tokens, including a // [Decoder]. // // When Token encounters an error or end-of-file condition after successfully // reading a token, it returns the token. It may return the (non-nil) error from // the same call or return the error (and a nil token) from a subsequent call. // An instance of this general case is that a TokenReader returning a non-nil // token at the end of the token stream may return either io.EOF or a nil error. // The next Read should return nil, [io.EOF]. // // Implementations of Token are discouraged from returning a nil token with a // nil error. Callers should treat a return of nil, nil as indicating that // nothing happened; in particular it does not indicate EOF. type TokenReader interface { Token() (Token, error) } // A Decoder represents an XML parser reading a particular input stream. // The parser assumes that its input is encoded in UTF-8. type Decoder struct { // Strict defaults to true, enforcing the requirements // of the XML specification. // If set to false, the parser allows input containing common // mistakes: // * If an element is missing an end tag, the parser invents // end tags as necessary to keep the return values from Token // properly balanced. // * In attribute values and character data, unknown or malformed // character entities (sequences beginning with &) are left alone. // // Setting: // // d.Strict = false // d.AutoClose = xml.HTMLAutoClose // d.Entity = xml.HTMLEntity // // creates a parser that can handle typical HTML. // // Strict mode does not enforce the requirements of the XML name spaces TR. // In particular it does not reject name space tags using undefined prefixes. // Such tags are recorded with the unknown prefix as the name space URL. Strict bool // When Strict == false, AutoClose indicates a set of elements to // consider closed immediately after they are opened, regardless // of whether an end element is present. AutoClose []string // Entity can be used to map non-standard entity names to string replacements. // The parser behaves as if these standard mappings are present in the map, // regardless of the actual map content: // // "lt": "<", // "gt": ">", // "amp": "&", // "apos": "'", // "quot": `"`, Entity map[string]string // CharsetReader, if non-nil, defines a function to generate // charset-conversion readers, converting from the provided // non-UTF-8 charset into UTF-8. If CharsetReader is nil or // returns an error, parsing stops with an error. One of the // CharsetReader's result values must be non-nil. CharsetReader func(charset string, input io.Reader) (io.Reader, error) // DefaultSpace sets the default name space used for unadorned tags, // as if the entire XML stream were wrapped in an element containing // the attribute xmlns="DefaultSpace". DefaultSpace string // TypeFunc is used to map type names to actual types. TypeFunc func(string) (reflect.Type, bool) r io.ByteReader t TokenReader buf bytes.Buffer saved *bytes.Buffer stk *stack free *stack needClose bool toClose Name nextToken Token nextByte int ns map[string]string err error line int linestart int64 offset int64 unmarshalDepth int } // NewDecoder creates a new XML parser reading from r. // If r does not implement [io.ByteReader], NewDecoder will // do its own buffering. func NewDecoder(r io.Reader) *Decoder { d := &Decoder{ ns: make(map[string]string), nextByte: -1, line: 1, Strict: true, } d.switchToReader(r) return d } // NewTokenDecoder creates a new XML parser using an underlying token stream. func NewTokenDecoder(t TokenReader) *Decoder { // Is it already a Decoder? if d, ok := t.(*Decoder); ok { return d } d := &Decoder{ ns: make(map[string]string), t: t, nextByte: -1, line: 1, Strict: true, } return d } // Token returns the next XML token in the input stream. // At the end of the input stream, Token returns nil, [io.EOF]. // // Slices of bytes in the returned token data refer to the // parser's internal buffer and remain valid only until the next // call to Token. To acquire a copy of the bytes, call [CopyToken] // or the token's Copy method. // // Token expands self-closing elements such as <br> // into separate start and end elements returned by successive calls. // // Token guarantees that the [StartElement] and [EndElement] // tokens it returns are properly nested and matched: // if Token encounters an unexpected end element // or EOF before all expected end elements, // it will return an error. // // If [Decoder.CharsetReader] is called and returns an error, // the error is wrapped and returned. // // Token implements XML name spaces as described by // https://www.w3.org/TR/REC-xml-names/. Each of the // [Name] structures contained in the Token has the Space // set to the URL identifying its name space when known. // If Token encounters an unrecognized name space prefix, // it uses the prefix as the Space rather than report an error. func (d *Decoder) Token() (Token, error) { var t Token var err error if d.stk != nil && d.stk.kind == stkEOF { return nil, io.EOF } if d.nextToken != nil { t = d.nextToken d.nextToken = nil } else { if t, err = d.rawToken(); t == nil && err != nil { if err == io.EOF && d.stk != nil && d.stk.kind != stkEOF { err = d.syntaxError("unexpected EOF") } return nil, err } // We still have a token to process, so clear any // errors (e.g. EOF) and proceed. err = nil } if !d.Strict { if t1, ok := d.autoClose(t); ok { d.nextToken = t t = t1 } } switch t1 := t.(type) { case StartElement: // In XML name spaces, the translations listed in the // attributes apply to the element name and // to the other attribute names, so process // the translations first. for _, a := range t1.Attr { if a.Name.Space == xmlnsPrefix { v, ok := d.ns[a.Name.Local] d.pushNs(a.Name.Local, v, ok) d.ns[a.Name.Local] = a.Value } if a.Name.Space == "" && a.Name.Local == xmlnsPrefix { // Default space for untagged names v, ok := d.ns[""] d.pushNs("", v, ok) d.ns[""] = a.Value } } d.pushElement(t1.Name) d.translate(&t1.Name, true) for i := range t1.Attr { d.translate(&t1.Attr[i].Name, false) } t = t1 case EndElement: if !d.popElement(&t1) { return nil, d.err } t = t1 } return t, err } const ( xmlURL = "http://www.w3.org/XML/1998/namespace" xmlnsPrefix = "xmlns" xmlPrefix = "xml" ) // Apply name space translation to name n. // The default name space (for Space=="") // applies only to element names, not to attribute names. func (d *Decoder) translate(n *Name, isElementName bool) { switch { case n.Space == xmlnsPrefix: return case n.Space == "" && !isElementName: return case n.Space == xmlPrefix: n.Space = xmlURL case n.Space == "" && n.Local == xmlnsPrefix: return } if v, ok := d.ns[n.Space]; ok { n.Space = v } else if n.Space == "" { n.Space = d.DefaultSpace } } func (d *Decoder) switchToReader(r io.Reader) { // Get efficient byte at a time reader. // Assume that if reader has its own // ReadByte, it's efficient enough. // Otherwise, use bufio. if rb, ok := r.(io.ByteReader); ok { d.r = rb } else { d.r = bufio.NewReader(r) } } // Parsing state - stack holds old name space translations // and the current set of open elements. The translations to pop when // ending a given tag are *below* it on the stack, which is // more work but forced on us by XML. type stack struct { next *stack kind int name Name ok bool } const ( stkStart = iota stkNs stkEOF ) func (d *Decoder) push(kind int) *stack { s := d.free if s != nil { d.free = s.next } else { s = new(stack) } s.next = d.stk s.kind = kind d.stk = s return s } func (d *Decoder) pop() *stack { s := d.stk if s != nil { d.stk = s.next s.next = d.free d.free = s } return s } // Record that after the current element is finished // (that element is already pushed on the stack) // Token should return EOF until popEOF is called. func (d *Decoder) pushEOF() { // Walk down stack to find Start. // It might not be the top, because there might be stkNs // entries above it. start := d.stk for start.kind != stkStart { start = start.next } // The stkNs entries below a start are associated with that // element too; skip over them. for start.next != nil && start.next.kind == stkNs { start = start.next } s := d.free if s != nil { d.free = s.next } else { s = new(stack) } s.kind = stkEOF s.next = start.next start.next = s } // Undo a pushEOF. // The element must have been finished, so the EOF should be at the top of the stack. func (d *Decoder) popEOF() bool { if d.stk == nil || d.stk.kind != stkEOF { return false } d.pop() return true } // Record that we are starting an element with the given name. func (d *Decoder) pushElement(name Name) { s := d.push(stkStart) s.name = name } // Record that we are changing the value of ns[local]. // The old value is url, ok. func (d *Decoder) pushNs(local string, url string, ok bool) { s := d.push(stkNs) s.name.Local = local s.name.Space = url s.ok = ok } // Creates a SyntaxError with the current line number. func (d *Decoder) syntaxError(msg string) error { return &SyntaxError{Msg: msg, Line: d.line} } // Record that we are ending an element with the given name. // The name must match the record at the top of the stack, // which must be a pushElement record. // After popping the element, apply any undo records from // the stack to restore the name translations that existed // before we saw this element. func (d *Decoder) popElement(t *EndElement) bool { s := d.pop() name := t.Name switch { case s == nil || s.kind != stkStart: d.err = d.syntaxError("unexpected end element </" + name.Local + ">") return false case s.name.Local != name.Local: if !d.Strict { d.needClose = true d.toClose = t.Name t.Name = s.name return true } d.err = d.syntaxError("element <" + s.name.Local + "> closed by </" + name.Local + ">") return false case s.name.Space != name.Space: ns := name.Space if name.Space == "" { ns = `""` } d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space + " closed by </" + name.Local + "> in space " + ns) return false } d.translate(&t.Name, true) // Pop stack until a Start or EOF is on the top, undoing the // translations that were associated with the element we just closed. for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF { s := d.pop() if s.ok { d.ns[s.name.Local] = s.name.Space } else { delete(d.ns, s.name.Local) } } return true } // If the top element on the stack is autoclosing and // t is not the end tag, invent the end tag. func (d *Decoder) autoClose(t Token) (Token, bool) { if d.stk == nil || d.stk.kind != stkStart { return nil, false } for _, s := range d.AutoClose { if strings.EqualFold(s, d.stk.name.Local) { // This one should be auto closed if t doesn't close it. et, ok := t.(EndElement) if !ok || !strings.EqualFold(et.Name.Local, d.stk.name.Local) { return EndElement{d.stk.name}, true } break } } return nil, false } var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method") // RawToken is like [Decoder.Token] but does not verify that // start and end elements match and does not translate // name space prefixes to their corresponding URLs. func (d *Decoder) RawToken() (Token, error) { if d.unmarshalDepth > 0 { return nil, errRawToken } return d.rawToken() } func (d *Decoder) rawToken() (Token, error) { if d.t != nil { return d.t.Token() } if d.err != nil { return nil, d.err } if d.needClose { // The last element we read was self-closing and // we returned just the StartElement half. // Return the EndElement half now. d.needClose = false return EndElement{d.toClose}, nil } b, ok := d.getc() if !ok { return nil, d.err } if b != '<' { // Text section. d.ungetc(b) data := d.text(-1, false) if data == nil { return nil, d.err } return CharData(data), nil } if b, ok = d.mustgetc(); !ok { return nil, d.err } switch b { case '/': // </: End element var name Name if name, ok = d.nsname(); !ok { if d.err == nil { d.err = d.syntaxError("expected element name after </") } return nil, d.err } d.space() if b, ok = d.mustgetc(); !ok { return nil, d.err } if b != '>' { d.err = d.syntaxError("invalid characters between </" + name.Local + " and >") return nil, d.err } return EndElement{name}, nil case '?': // <?: Processing instruction. var target string if target, ok = d.name(); !ok { if d.err == nil { d.err = d.syntaxError("expected target name after <?") } return nil, d.err } d.space() d.buf.Reset() var b0 byte for { if b, ok = d.mustgetc(); !ok { return nil, d.err } d.buf.WriteByte(b) if b0 == '?' && b == '>' { break } b0 = b } data := d.buf.Bytes() data = data[0 : len(data)-2] // chop ?> if target == "xml" { content := string(data) ver := procInst("version", content) if ver != "" && ver != "1.0" { d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver) return nil, d.err } enc := procInst("encoding", content) if enc != "" && enc != "utf-8" && enc != "UTF-8" && !strings.EqualFold(enc, "utf-8") { if d.CharsetReader == nil { d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) return nil, d.err } newr, err := d.CharsetReader(enc, d.r.(io.Reader)) if err != nil { d.err = fmt.Errorf("xml: opening charset %q: %w", enc, err) return nil, d.err } if newr == nil { panic("CharsetReader returned a nil Reader for charset " + enc) } d.switchToReader(newr) } } return ProcInst{target, data}, nil case '!': // <!: Maybe comment, maybe CDATA. if b, ok = d.mustgetc(); !ok { return nil, d.err } switch b { case '-': // <!- // Probably <!-- for a comment. if b, ok = d.mustgetc(); !ok { return nil, d.err } if b != '-' { d.err = d.syntaxError("invalid sequence <!- not part of <!--") return nil, d.err } // Look for terminator. d.buf.Reset() var b0, b1 byte for { if b, ok = d.mustgetc(); !ok { return nil, d.err } d.buf.WriteByte(b) if b0 == '-' && b1 == '-' { if b != '>' { d.err = d.syntaxError( `invalid sequence "--" not allowed in comments`) return nil, d.err } break } b0, b1 = b1, b } data := d.buf.Bytes() data = data[0 : len(data)-3] // chop --> return Comment(data), nil case '[': // <![ // Probably <![CDATA[. for i := 0; i < 6; i++ { if b, ok = d.mustgetc(); !ok { return nil, d.err } if b != "CDATA["[i] { d.err = d.syntaxError("invalid <![ sequence") return nil, d.err } } // Have <![CDATA[. Read text until ]]>. data := d.text(-1, true) if data == nil { return nil, d.err } return CharData(data), nil } // Probably a directive: <!DOCTYPE ...>, <!ENTITY ...>, etc. // We don't care, but accumulate for caller. Quoted angle // brackets do not count for nesting. d.buf.Reset() d.buf.WriteByte(b) inquote := uint8(0) depth := 0 for { if b, ok = d.mustgetc(); !ok { return nil, d.err } if inquote == 0 && b == '>' && depth == 0 { break } HandleB: d.buf.WriteByte(b) switch { case b == inquote: inquote = 0 case inquote != 0: // in quotes, no special action case b == '\'' || b == '"': inquote = b case b == '>' && inquote == 0: depth-- case b == '<' && inquote == 0: // Look for <!-- to begin comment. s := "!--" for i := 0; i < len(s); i++ { if b, ok = d.mustgetc(); !ok { return nil, d.err } if b != s[i] { for j := 0; j < i; j++ { d.buf.WriteByte(s[j]) } depth++ goto HandleB } } // Remove < that was written above. d.buf.Truncate(d.buf.Len() - 1) // Look for terminator. var b0, b1 byte for { if b, ok = d.mustgetc(); !ok { return nil, d.err } if b0 == '-' && b1 == '-' && b == '>' { break } b0, b1 = b1, b } // Replace the comment with a space in the returned Directive // body, so that markup parts that were separated by the comment // (like a "<" and a "!") don't get joined when re-encoding the // Directive, taking new semantic meaning. d.buf.WriteByte(' ') } } return Directive(d.buf.Bytes()), nil } // Must be an open element like <a href="foo"> d.ungetc(b) var ( name Name empty bool attr []Attr ) if name, ok = d.nsname(); !ok { if d.err == nil { d.err = d.syntaxError("expected element name after <") } return nil, d.err } attr = []Attr{} for { d.space() if b, ok = d.mustgetc(); !ok { return nil, d.err } if b == '/' { empty = true if b, ok = d.mustgetc(); !ok { return nil, d.err } if b != '>' { d.err = d.syntaxError("expected /> in element") return nil, d.err } break } if b == '>' { break } d.ungetc(b) a := Attr{} if a.Name, ok = d.nsname(); !ok { if d.err == nil { d.err = d.syntaxError("expected attribute name in element") } return nil, d.err } d.space() if b, ok = d.mustgetc(); !ok { return nil, d.err } if b != '=' { if d.Strict { d.err = d.syntaxError("attribute name without = in element") return nil, d.err } d.ungetc(b) a.Value = a.Name.Local } else { d.space() data := d.attrval() if data == nil { return nil, d.err } a.Value = string(data) } attr = append(attr, a) } if empty { d.needClose = true d.toClose = name } return StartElement{name, attr}, nil } func (d *Decoder) attrval() []byte { b, ok := d.mustgetc() if !ok { return nil } // Handle quoted attribute values if b == '"' || b == '\'' { return d.text(int(b), false) } // Handle unquoted attribute values for strict parsers if d.Strict { d.err = d.syntaxError("unquoted or missing attribute value in element") return nil } // Handle unquoted attribute values for unstrict parsers d.ungetc(b) d.buf.Reset() for { b, ok = d.mustgetc() if !ok { return nil } // https://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2 if 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' || '0' <= b && b <= '9' || b == '_' || b == ':' || b == '-' { d.buf.WriteByte(b) } else { d.ungetc(b) break } } return d.buf.Bytes() } // Skip spaces if any func (d *Decoder) space() { for { b, ok := d.getc() if !ok { return } switch b { case ' ', '\r', '\n', '\t': default: d.ungetc(b) return } } } // Read a single byte. // If there is no byte to read, return ok==false // and leave the error in d.err. // Maintain line number. func (d *Decoder) getc() (b byte, ok bool) { if d.err != nil { return 0, false } if d.nextByte >= 0 { b = byte(d.nextByte) d.nextByte = -1 } else { b, d.err = d.r.ReadByte() if d.err != nil { return 0, false } if d.saved != nil { d.saved.WriteByte(b) } } if b == '\n' { d.line++ d.linestart = d.offset + 1 } d.offset++ return b, true } // InputOffset returns the input stream byte offset of the current decoder position. // The offset gives the location of the end of the most recently returned token // and the beginning of the next token. func (d *Decoder) InputOffset() int64 { return d.offset } // InputPos returns the line of the current decoder position and the 1 based // input position of the line. The position gives the location of the end of the // most recently returned token. func (d *Decoder) InputPos() (line, column int) { return d.line, int(d.offset-d.linestart) + 1 } // Return saved offset. // If we did ungetc (nextByte >= 0), have to back up one. func (d *Decoder) savedOffset() int { n := d.saved.Len() if d.nextByte >= 0 { n-- } return n } // Must read a single byte. // If there is no byte to read, // set d.err to SyntaxError("unexpected EOF") // and return ok==false func (d *Decoder) mustgetc() (b byte, ok bool) { if b, ok = d.getc(); !ok { if d.err == io.EOF { d.err = d.syntaxError("unexpected EOF") } } return } // Unread a single byte. func (d *Decoder) ungetc(b byte) { if b == '\n' { d.line-- } d.nextByte = int(b) d.offset-- } var entity = map[string]rune{ "lt": '<', "gt": '>', "amp": '&', "apos": '\'', "quot": '"', } // Read plain text section (XML calls it character data). // If quote >= 0, we are in a quoted string and need to find the matching quote. // If cdata == true, we are in a <![CDATA[ section and need to find ]]>. // On failure return nil and leave the error in d.err. func (d *Decoder) text(quote int, cdata bool) []byte { var b0, b1 byte var trunc int d.buf.Reset() Input: for { b, ok := d.getc() if !ok { if cdata { if d.err == io.EOF { d.err = d.syntaxError("unexpected EOF in CDATA section") } return nil } break Input } // <![CDATA[ section ends with ]]>. // It is an error for ]]> to appear in ordinary text, // but it is allowed in quoted strings. if quote < 0 && b0 == ']' && b1 == ']' && b == '>' { if cdata { trunc = 2 break Input } d.err = d.syntaxError("unescaped ]]> not in CDATA section") return nil } // Stop reading text if we see a <. if b == '<' && !cdata { if quote >= 0 { d.err = d.syntaxError("unescaped < inside quoted string") return nil } d.ungetc('<') break Input } if quote >= 0 && b == byte(quote) { break Input } if b == '&' && !cdata { // Read escaped character expression up to semicolon. // XML in all its glory allows a document to define and use // its own character names with <!ENTITY ...> directives. // Parsers are required to recognize lt, gt, amp, apos, and quot // even if they have not been declared. before := d.buf.Len() d.buf.WriteByte('&') var ok bool var text string var haveText bool if b, ok = d.mustgetc(); !ok { return nil } if b == '#' { d.buf.WriteByte(b) if b, ok = d.mustgetc(); !ok { return nil } base := 10 if b == 'x' { base = 16 d.buf.WriteByte(b) if b, ok = d.mustgetc(); !ok { return nil } } start := d.buf.Len() for '0' <= b && b <= '9' || base == 16 && 'a' <= b && b <= 'f' || base == 16 && 'A' <= b && b <= 'F' { d.buf.WriteByte(b) if b, ok = d.mustgetc(); !ok { return nil } } if b != ';' { d.ungetc(b) } else { s := string(d.buf.Bytes()[start:]) d.buf.WriteByte(';') n, err := strconv.ParseUint(s, base, 64) if err == nil && n <= unicode.MaxRune { text = string(rune(n)) haveText = true } } } else { d.ungetc(b) if !d.readName() { if d.err != nil { return nil } } if b, ok = d.mustgetc(); !ok { return nil } if b != ';' { d.ungetc(b) } else { name := d.buf.Bytes()[before+1:] d.buf.WriteByte(';') if isName(name) { s := string(name) if r, ok := entity[s]; ok { text = string(r) haveText = true } else if d.Entity != nil { text, haveText = d.Entity[s] } } } } if haveText { d.buf.Truncate(before) d.buf.WriteString(text) b0, b1 = 0, 0 continue Input } if !d.Strict { b0, b1 = 0, 0 continue Input } ent := string(d.buf.Bytes()[before:]) if ent[len(ent)-1] != ';' { ent += " (no semicolon)" } d.err = d.syntaxError("invalid character entity " + ent) return nil } // We must rewrite unescaped \r and \r\n into \n. if b == '\r' { d.buf.WriteByte('\n') } else if b1 == '\r' && b == '\n' { // Skip \r\n--we already wrote \n. } else { d.buf.WriteByte(b) } b0, b1 = b1, b } data := d.buf.Bytes() data = data[0 : len(data)-trunc] // Inspect each rune for being a disallowed character. buf := data for len(buf) > 0 { r, size := utf8.DecodeRune(buf) if r == utf8.RuneError && size == 1 { d.err = d.syntaxError("invalid UTF-8") return nil } buf = buf[size:] if !isInCharacterRange(r) { d.err = d.syntaxError(fmt.Sprintf("illegal character code %U", r)) return nil } } return data } // Decide whether the given rune is in the XML Character Range, per // the Char production of https://www.xml.com/axml/testaxml.htm, // Section 2.2 Characters. func isInCharacterRange(r rune) (inrange bool) { return r == 0x09 || r == 0x0A || r == 0x0D || r >= 0x20 && r <= 0xD7FF || r >= 0xE000 && r <= 0xFFFD || r >= 0x10000 && r <= 0x10FFFF } // Get name space name: name with a : stuck in the middle. // The part before the : is the name space identifier. func (d *Decoder) nsname() (name Name, ok bool) { s, ok := d.name() if !ok { return } if strings.Count(s, ":") > 1 { return name, false } else if space, local, ok := strings.Cut(s, ":"); !ok || space == "" || local == "" { name.Local = s } else { name.Space = space name.Local = local } return name, true } // Get name: /first(first|second)*/ // Do not set d.err if the name is missing (unless unexpected EOF is received): // let the caller provide better context. func (d *Decoder) name() (s string, ok bool) { d.buf.Reset() if !d.readName() { return "", false } // Now we check the characters. b := d.buf.Bytes() if !isName(b) { d.err = d.syntaxError("invalid XML name: " + string(b)) return "", false } return string(b), true } // Read a name and append its bytes to d.buf. // The name is delimited by any single-byte character not valid in names. // All multi-byte characters are accepted; the caller must check their validity. func (d *Decoder) readName() (ok bool) { var b byte if b, ok = d.mustgetc(); !ok { return } if b < utf8.RuneSelf && !isNameByte(b) { d.ungetc(b) return false } d.buf.WriteByte(b) for { if b, ok = d.mustgetc(); !ok { return } if b < utf8.RuneSelf && !isNameByte(b) { d.ungetc(b) break } d.buf.WriteByte(b) } return true } func isNameByte(c byte) bool { return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c == ':' || c == '.' || c == '-' } func isName(s []byte) bool { if len(s) == 0 { return false } c, n := utf8.DecodeRune(s) if c == utf8.RuneError && n == 1 { return false } if !unicode.Is(first, c) { return false } for n < len(s) { s = s[n:] c, n = utf8.DecodeRune(s) if c == utf8.RuneError && n == 1 { return false } if !unicode.Is(first, c) && !unicode.Is(second, c) { return false } } return true } func isNameString(s string) bool { if len(s) == 0 { return false } c, n := utf8.DecodeRuneInString(s) if c == utf8.RuneError && n == 1 { return false } if !unicode.Is(first, c) { return false } for n < len(s) { s = s[n:] c, n = utf8.DecodeRuneInString(s) if c == utf8.RuneError && n == 1 { return false } if !unicode.Is(first, c) && !unicode.Is(second, c) { return false } } return true } // These tables were generated by cut and paste from Appendix B of // the XML spec at https://www.xml.com/axml/testaxml.htm // and then reformatting. First corresponds to (Letter | '_' | ':') // and second corresponds to NameChar. var first = &unicode.RangeTable{ R16: []unicode.Range16{ {0x003A, 0x003A, 1}, {0x0041, 0x005A, 1}, {0x005F, 0x005F, 1}, {0x0061, 0x007A, 1}, {0x00C0, 0x00D6, 1}, {0x00D8, 0x00F6, 1}, {0x00F8, 0x00FF, 1}, {0x0100, 0x0131, 1}, {0x0134, 0x013E, 1}, {0x0141, 0x0148, 1}, {0x014A, 0x017E, 1}, {0x0180, 0x01C3, 1}, {0x01CD, 0x01F0, 1}, {0x01F4, 0x01F5, 1}, {0x01FA, 0x0217, 1}, {0x0250, 0x02A8, 1}, {0x02BB, 0x02C1, 1}, {0x0386, 0x0386, 1}, {0x0388, 0x038A, 1}, {0x038C, 0x038C, 1}, {0x038E, 0x03A1, 1}, {0x03A3, 0x03CE, 1}, {0x03D0, 0x03D6, 1}, {0x03DA, 0x03E0, 2}, {0x03E2, 0x03F3, 1}, {0x0401, 0x040C, 1}, {0x040E, 0x044F, 1}, {0x0451, 0x045C, 1}, {0x045E, 0x0481, 1}, {0x0490, 0x04C4, 1}, {0x04C7, 0x04C8, 1}, {0x04CB, 0x04CC, 1}, {0x04D0, 0x04EB, 1}, {0x04EE, 0x04F5, 1}, {0x04F8, 0x04F9, 1}, {0x0531, 0x0556, 1}, {0x0559, 0x0559, 1}, {0x0561, 0x0586, 1}, {0x05D0, 0x05EA, 1}, {0x05F0, 0x05F2, 1}, {0x0621, 0x063A, 1}, {0x0641, 0x064A, 1}, {0x0671, 0x06B7, 1}, {0x06BA, 0x06BE, 1}, {0x06C0, 0x06CE, 1}, {0x06D0, 0x06D3, 1}, {0x06D5, 0x06D5, 1}, {0x06E5, 0x06E6, 1}, {0x0905, 0x0939, 1}, {0x093D, 0x093D, 1}, {0x0958, 0x0961, 1}, {0x0985, 0x098C, 1}, {0x098F, 0x0990, 1}, {0x0993, 0x09A8, 1}, {0x09AA, 0x09B0, 1}, {0x09B2, 0x09B2, 1}, {0x09B6, 0x09B9, 1}, {0x09DC, 0x09DD, 1}, {0x09DF, 0x09E1, 1}, {0x09F0, 0x09F1, 1}, {0x0A05, 0x0A0A, 1}, {0x0A0F, 0x0A10, 1}, {0x0A13, 0x0A28, 1}, {0x0A2A, 0x0A30, 1}, {0x0A32, 0x0A33, 1}, {0x0A35, 0x0A36, 1}, {0x0A38, 0x0A39, 1}, {0x0A59, 0x0A5C, 1}, {0x0A5E, 0x0A5E, 1}, {0x0A72, 0x0A74, 1}, {0x0A85, 0x0A8B, 1}, {0x0A8D, 0x0A8D, 1}, {0x0A8F, 0x0A91, 1}, {0x0A93, 0x0AA8, 1}, {0x0AAA, 0x0AB0, 1}, {0x0AB2, 0x0AB3, 1},
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/xml/extras.go
vendor/github.com/vmware/govmomi/vim25/xml/extras.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package xml import ( "reflect" "time" ) var xmlSchemaInstance = Name{Space: "http://www.w3.org/2001/XMLSchema-instance", Local: "type"} var xsiType = Name{Space: "xsi", Local: "type"} var stringToTypeMap = map[string]reflect.Type{ "xsd:boolean": reflect.TypeOf((*bool)(nil)).Elem(), "xsd:byte": reflect.TypeOf((*int8)(nil)).Elem(), "xsd:short": reflect.TypeOf((*int16)(nil)).Elem(), "xsd:int": reflect.TypeOf((*int32)(nil)).Elem(), "xsd:long": reflect.TypeOf((*int64)(nil)).Elem(), "xsd:unsignedByte": reflect.TypeOf((*uint8)(nil)).Elem(), "xsd:unsignedShort": reflect.TypeOf((*uint16)(nil)).Elem(), "xsd:unsignedInt": reflect.TypeOf((*uint32)(nil)).Elem(), "xsd:unsignedLong": reflect.TypeOf((*uint64)(nil)).Elem(), "xsd:float": reflect.TypeOf((*float32)(nil)).Elem(), "xsd:double": reflect.TypeOf((*float64)(nil)).Elem(), "xsd:string": reflect.TypeOf((*string)(nil)).Elem(), "xsd:dateTime": reflect.TypeOf((*time.Time)(nil)).Elem(), "xsd:base64Binary": reflect.TypeOf((*[]byte)(nil)).Elem(), } // Return a reflect.Type for the specified type. Nil if unknown. func stringToType(s string) reflect.Type { return stringToTypeMap[s] } // Return a string for the specified reflect.Type. Panic if unknown. func typeToString(typ reflect.Type) string { switch typ.Kind() { case reflect.Bool: return "xsd:boolean" case reflect.Int8: return "xsd:byte" case reflect.Int16: return "xsd:short" case reflect.Int32: return "xsd:int" case reflect.Int, reflect.Int64: return "xsd:long" case reflect.Uint8: return "xsd:unsignedByte" case reflect.Uint16: return "xsd:unsignedShort" case reflect.Uint32: return "xsd:unsignedInt" case reflect.Uint, reflect.Uint64: return "xsd:unsignedLong" case reflect.Float32: return "xsd:float" case reflect.Float64: return "xsd:double" case reflect.String: name := typ.Name() if name == "string" { return "xsd:string" } return name case reflect.Struct: if typ == stringToTypeMap["xsd:dateTime"] { return "xsd:dateTime" } // Expect any other struct to be handled... return typ.Name() case reflect.Slice: if typ.Elem().Kind() == reflect.Uint8 { return "xsd:base64Binary" } case reflect.Array: if typ.Elem().Kind() == reflect.Uint8 { return "xsd:base64Binary" } } panic("don't know what to do for type: " + typ.String()) } // Find reflect.Type for an element's type attribute. func (p *Decoder) typeForElement(val reflect.Value, start *StartElement) reflect.Type { t := "" for _, a := range start.Attr { if a.Name == xmlSchemaInstance || a.Name == xsiType { t = a.Value break } } if t == "" { // No type attribute; fall back to looking up type by interface name. t = val.Type().Name() } // Maybe the type is a basic xsd:* type. typ := stringToType(t) if typ != nil { return typ } // Maybe the type is a custom type. if p.TypeFunc != nil { if typ, ok := p.TypeFunc(t); ok { return typ } } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go
vendor/github.com/vmware/govmomi/vim25/xml/marshal.go
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package xml import ( "bufio" "bytes" "encoding" "errors" "fmt" "io" "reflect" "strconv" "strings" ) const ( // Header is a generic XML header suitable for use with the output of [Marshal]. // This is not automatically added to any output of this package, // it is provided as a convenience. Header = `<?xml version="1.0" encoding="UTF-8"?>` + "\n" ) // Marshal returns the XML encoding of v. // // Marshal handles an array or slice by marshaling each of the elements. // Marshal handles a pointer by marshaling the value it points at or, if the // pointer is nil, by writing nothing. Marshal handles an interface value by // marshaling the value it contains or, if the interface value is nil, by // writing nothing. Marshal handles all other data by writing one or more XML // elements containing the data. // // The name for the XML elements is taken from, in order of preference: // - the tag on the XMLName field, if the data is a struct // - the value of the XMLName field of type [Name] // - the tag of the struct field used to obtain the data // - the name of the struct field used to obtain the data // - the name of the marshaled type // // The XML element for a struct contains marshaled elements for each of the // exported fields of the struct, with these exceptions: // - the XMLName field, described above, is omitted. // - a field with tag "-" is omitted. // - a field with tag "name,attr" becomes an attribute with // the given name in the XML element. // - a field with tag ",attr" becomes an attribute with the // field name in the XML element. // - a field with tag ",chardata" is written as character data, // not as an XML element. // - a field with tag ",cdata" is written as character data // wrapped in one or more <![CDATA[ ... ]]> tags, not as an XML element. // - a field with tag ",innerxml" is written verbatim, not subject // to the usual marshaling procedure. // - a field with tag ",comment" is written as an XML comment, not // subject to the usual marshaling procedure. It must not contain // the "--" string within it. // - a field with a tag including the "omitempty" option is omitted // if the field value is empty. The empty values are false, 0, any // nil pointer or interface value, and any array, slice, map, or // string of length zero. // - an anonymous struct field is handled as if the fields of its // value were part of the outer struct. // - an anonymous struct field of interface type is treated the same as having // that type as its name, rather than being anonymous. // - a field implementing [Marshaler] is written by calling its MarshalXML // method. // - a field implementing [encoding.TextMarshaler] is written by encoding the // result of its MarshalText method as text. // // If a field uses a tag "a>b>c", then the element c will be nested inside // parent elements a and b. Fields that appear next to each other that name // the same parent will be enclosed in one XML element. // // If the XML name for a struct field is defined by both the field tag and the // struct's XMLName field, the names must match. // // See [MarshalIndent] for an example. // // Marshal will return an error if asked to marshal a channel, function, or map. func Marshal(v any) ([]byte, error) { var b bytes.Buffer enc := NewEncoder(&b) if err := enc.Encode(v); err != nil { return nil, err } if err := enc.Close(); err != nil { return nil, err } return b.Bytes(), nil } // Marshaler is the interface implemented by objects that can marshal // themselves into valid XML elements. // // MarshalXML encodes the receiver as zero or more XML elements. // By convention, arrays or slices are typically encoded as a sequence // of elements, one per entry. // Using start as the element tag is not required, but doing so // will enable [Unmarshal] to match the XML elements to the correct // struct field. // One common implementation strategy is to construct a separate // value with a layout corresponding to the desired XML and then // to encode it using e.EncodeElement. // Another common strategy is to use repeated calls to e.EncodeToken // to generate the XML output one token at a time. // The sequence of encoded tokens must make up zero or more valid // XML elements. type Marshaler interface { MarshalXML(e *Encoder, start StartElement) error } // MarshalerAttr is the interface implemented by objects that can marshal // themselves into valid XML attributes. // // MarshalXMLAttr returns an XML attribute with the encoded value of the receiver. // Using name as the attribute name is not required, but doing so // will enable [Unmarshal] to match the attribute to the correct // struct field. // If MarshalXMLAttr returns the zero attribute [Attr]{}, no attribute // will be generated in the output. // MarshalXMLAttr is used only for struct fields with the // "attr" option in the field tag. type MarshalerAttr interface { MarshalXMLAttr(name Name) (Attr, error) } // MarshalIndent works like [Marshal], but each XML element begins on a new // indented line that starts with prefix and is followed by one or more // copies of indent according to the nesting depth. func MarshalIndent(v any, prefix, indent string) ([]byte, error) { var b bytes.Buffer enc := NewEncoder(&b) enc.Indent(prefix, indent) if err := enc.Encode(v); err != nil { return nil, err } if err := enc.Close(); err != nil { return nil, err } return b.Bytes(), nil } // An Encoder writes XML data to an output stream. type Encoder struct { p printer } // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { e := &Encoder{printer{w: bufio.NewWriter(w)}} e.p.encoder = e return e } // Indent sets the encoder to generate XML in which each element // begins on a new indented line that starts with prefix and is followed by // one or more copies of indent according to the nesting depth. func (enc *Encoder) Indent(prefix, indent string) { enc.p.prefix = prefix enc.p.indent = indent } // Encode writes the XML encoding of v to the stream. // // See the documentation for [Marshal] for details about the conversion // of Go values to XML. // // Encode calls [Encoder.Flush] before returning. func (enc *Encoder) Encode(v any) error { err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil) if err != nil { return err } return enc.p.w.Flush() } // EncodeElement writes the XML encoding of v to the stream, // using start as the outermost tag in the encoding. // // See the documentation for [Marshal] for details about the conversion // of Go values to XML. // // EncodeElement calls [Encoder.Flush] before returning. func (enc *Encoder) EncodeElement(v any, start StartElement) error { err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start) if err != nil { return err } return enc.p.w.Flush() } var ( begComment = []byte("<!--") endComment = []byte("-->") endProcInst = []byte("?>") ) // EncodeToken writes the given XML token to the stream. // It returns an error if [StartElement] and [EndElement] tokens are not properly matched. // // EncodeToken does not call [Encoder.Flush], because usually it is part of a larger operation // such as [Encoder.Encode] or [Encoder.EncodeElement] (or a custom [Marshaler]'s MarshalXML invoked // during those), and those will call Flush when finished. // Callers that create an Encoder and then invoke EncodeToken directly, without // using Encode or EncodeElement, need to call Flush when finished to ensure // that the XML is written to the underlying writer. // // EncodeToken allows writing a [ProcInst] with Target set to "xml" only as the first token // in the stream. func (enc *Encoder) EncodeToken(t Token) error { p := &enc.p switch t := t.(type) { case StartElement: if err := p.writeStart(&t); err != nil { return err } case EndElement: if err := p.writeEnd(t.Name); err != nil { return err } case CharData: escapeText(p, t, false) case Comment: if bytes.Contains(t, endComment) { return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") } p.WriteString("<!--") p.Write(t) p.WriteString("-->") return p.cachedWriteError() case ProcInst: // First token to be encoded which is also a ProcInst with target of xml // is the xml declaration. The only ProcInst where target of xml is allowed. if t.Target == "xml" && p.w.Buffered() != 0 { return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") } if !isNameString(t.Target) { return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target") } if bytes.Contains(t.Inst, endProcInst) { return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker") } p.WriteString("<?") p.WriteString(t.Target) if len(t.Inst) > 0 { p.WriteByte(' ') p.Write(t.Inst) } p.WriteString("?>") case Directive: if !isValidDirective(t) { return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers") } p.WriteString("<!") p.Write(t) p.WriteString(">") default: return fmt.Errorf("xml: EncodeToken of invalid token type") } return p.cachedWriteError() } // isValidDirective reports whether dir is a valid directive text, // meaning angle brackets are matched, ignoring comments and strings. func isValidDirective(dir Directive) bool { var ( depth int inquote uint8 incomment bool ) for i, c := range dir { switch { case incomment: if c == '>' { if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) { incomment = false } } // Just ignore anything in comment case inquote != 0: if c == inquote { inquote = 0 } // Just ignore anything within quotes case c == '\'' || c == '"': inquote = c case c == '<': if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) { incomment = true } else { depth++ } case c == '>': if depth == 0 { return false } depth-- } } return depth == 0 && inquote == 0 && !incomment } // Flush flushes any buffered XML to the underlying writer. // See the [Encoder.EncodeToken] documentation for details about when it is necessary. func (enc *Encoder) Flush() error { return enc.p.w.Flush() } // Close the Encoder, indicating that no more data will be written. It flushes // any buffered XML to the underlying writer and returns an error if the // written XML is invalid (e.g. by containing unclosed elements). func (enc *Encoder) Close() error { return enc.p.Close() } type printer struct { w *bufio.Writer encoder *Encoder seq int indent string prefix string depth int indentedIn bool putNewline bool attrNS map[string]string // map prefix -> name space attrPrefix map[string]string // map name space -> prefix prefixes []string tags []Name closed bool err error } // createAttrPrefix finds the name space prefix attribute to use for the given name space, // defining a new prefix if necessary. It returns the prefix. func (p *printer) createAttrPrefix(url string) string { if prefix := p.attrPrefix[url]; prefix != "" { return prefix } // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml" // and must be referred to that way. // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", // but users should not be trying to use that one directly - that's our job.) if url == xmlURL { return xmlPrefix } // Need to define a new name space. if p.attrPrefix == nil { p.attrPrefix = make(map[string]string) p.attrNS = make(map[string]string) } // Pick a name. We try to use the final element of the path // but fall back to _. prefix := strings.TrimRight(url, "/") if i := strings.LastIndex(prefix, "/"); i >= 0 { prefix = prefix[i+1:] } if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") { prefix = "_" } // xmlanything is reserved and any variant of it regardless of // case should be matched, so: // (('X'|'x') ('M'|'m') ('L'|'l')) // See Section 2.3 of https://www.w3.org/TR/REC-xml/ if len(prefix) >= 3 && strings.EqualFold(prefix[:3], "xml") { prefix = "_" + prefix } if p.attrNS[prefix] != "" { // Name is taken. Find a better one. for p.seq++; ; p.seq++ { if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" { prefix = id break } } } p.attrPrefix[url] = prefix p.attrNS[prefix] = url p.WriteString(`xmlns:`) p.WriteString(prefix) p.WriteString(`="`) EscapeText(p, []byte(url)) p.WriteString(`" `) p.prefixes = append(p.prefixes, prefix) return prefix } // deleteAttrPrefix removes an attribute name space prefix. func (p *printer) deleteAttrPrefix(prefix string) { delete(p.attrPrefix, p.attrNS[prefix]) delete(p.attrNS, prefix) } func (p *printer) markPrefix() { p.prefixes = append(p.prefixes, "") } func (p *printer) popPrefix() { for len(p.prefixes) > 0 { prefix := p.prefixes[len(p.prefixes)-1] p.prefixes = p.prefixes[:len(p.prefixes)-1] if prefix == "" { break } p.deleteAttrPrefix(prefix) } } var ( marshalerType = reflect.TypeFor[Marshaler]() marshalerAttrType = reflect.TypeFor[MarshalerAttr]() textMarshalerType = reflect.TypeFor[encoding.TextMarshaler]() ) // marshalValue writes one or more XML elements representing val. // If val was obtained from a struct field, finfo must have its details. func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { if startTemplate != nil && startTemplate.Name.Local == "" { return fmt.Errorf("xml: EncodeElement of StartElement with missing name") } if !val.IsValid() { return nil } if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) { return nil } // Drill into interfaces and pointers. // This can turn into an infinite loop given a cyclic chain, // but it matches the Go 1 behavior. for val.Kind() == reflect.Interface || val.Kind() == reflect.Pointer { if val.IsNil() { return nil } val = val.Elem() } kind := val.Kind() typ := val.Type() // Check for marshaler. if val.CanInterface() && typ.Implements(marshalerType) { return p.marshalInterface(val.Interface().(Marshaler), defaultStart(typ, finfo, startTemplate)) } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(marshalerType) { return p.marshalInterface(pv.Interface().(Marshaler), defaultStart(pv.Type(), finfo, startTemplate)) } } // Check for text marshaler. if val.CanInterface() && typ.Implements(textMarshalerType) { return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), defaultStart(typ, finfo, startTemplate)) } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), defaultStart(pv.Type(), finfo, startTemplate)) } } // Slices and arrays iterate over the elements. They do not have an enclosing tag. if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 { for i, n := 0, val.Len(); i < n; i++ { if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil { return err } } return nil } tinfo, err := getTypeInfo(typ) if err != nil { return err } // Create start element. // Precedence for the XML element name is: // 0. startTemplate // 1. XMLName field in underlying struct; // 2. field name/tag in the struct field; and // 3. type name var start StartElement if startTemplate != nil { start.Name = startTemplate.Name start.Attr = append(start.Attr, startTemplate.Attr...) } else if tinfo.xmlname != nil { xmlname := tinfo.xmlname if xmlname.name != "" { start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name } else { fv := xmlname.value(val, dontInitNilPointers) if v, ok := fv.Interface().(Name); ok && v.Local != "" { start.Name = v } } } if start.Name.Local == "" && finfo != nil { start.Name.Space, start.Name.Local = finfo.xmlns, finfo.name } if start.Name.Local == "" { name := typ.Name() if i := strings.IndexByte(name, '['); i >= 0 { // Truncate generic instantiation name. See issue 48318. name = name[:i] } if name == "" { return &UnsupportedTypeError{typ} } start.Name.Local = name } // Attributes for i := range tinfo.fields { finfo := &tinfo.fields[i] if finfo.flags&fAttr == 0 { continue } fv := finfo.value(val, dontInitNilPointers) if finfo.flags&fOmitEmpty != 0 && (!fv.IsValid() || isEmptyValue(fv)) { continue } if fv.Kind() == reflect.Interface && fv.IsNil() { continue } name := Name{Space: finfo.xmlns, Local: finfo.name} if err := p.marshalAttr(&start, name, fv); err != nil { return err } } // Add type attribute if necessary if finfo != nil && finfo.flags&fTypeAttr != 0 { start.Attr = append(start.Attr, Attr{xmlSchemaInstance, typeToString(typ)}) } // If an empty name was found, namespace is overridden with an empty space if tinfo.xmlname != nil && start.Name.Space == "" && tinfo.xmlname.xmlns == "" && tinfo.xmlname.name == "" && len(p.tags) != 0 && p.tags[len(p.tags)-1].Space != "" { start.Attr = append(start.Attr, Attr{Name{"", xmlnsPrefix}, ""}) } if err := p.writeStart(&start); err != nil { return err } if val.Kind() == reflect.Struct { err = p.marshalStruct(tinfo, val) } else { s, b, err1 := p.marshalSimple(typ, val) if err1 != nil { err = err1 } else if b != nil { EscapeText(p, b) } else { p.EscapeString(s) } } if err != nil { return err } if err := p.writeEnd(start.Name); err != nil { return err } return p.cachedWriteError() } // marshalAttr marshals an attribute with the given name and value, adding to start.Attr. func (p *printer) marshalAttr(start *StartElement, name Name, val reflect.Value) error { if val.CanInterface() && val.Type().Implements(marshalerAttrType) { attr, err := val.Interface().(MarshalerAttr).MarshalXMLAttr(name) if err != nil { return err } if attr.Name.Local != "" { start.Attr = append(start.Attr, attr) } return nil } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) if err != nil { return err } if attr.Name.Local != "" { start.Attr = append(start.Attr, attr) } return nil } } if val.CanInterface() && val.Type().Implements(textMarshalerType) { text, err := val.Interface().(encoding.TextMarshaler).MarshalText() if err != nil { return err } start.Attr = append(start.Attr, Attr{name, string(text)}) return nil } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() if err != nil { return err } start.Attr = append(start.Attr, Attr{name, string(text)}) return nil } } // Dereference or skip nil pointer, interface values. switch val.Kind() { case reflect.Pointer, reflect.Interface: if val.IsNil() { return nil } val = val.Elem() } // Walk slices. if val.Kind() == reflect.Slice && val.Type().Elem().Kind() != reflect.Uint8 { n := val.Len() for i := 0; i < n; i++ { if err := p.marshalAttr(start, name, val.Index(i)); err != nil { return err } } return nil } if val.Type() == attrType { start.Attr = append(start.Attr, val.Interface().(Attr)) return nil } s, b, err := p.marshalSimple(val.Type(), val) if err != nil { return err } if b != nil { s = string(b) } start.Attr = append(start.Attr, Attr{name, s}) return nil } // defaultStart returns the default start element to use, // given the reflect type, field info, and start template. func defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { var start StartElement // Precedence for the XML element name is as above, // except that we do not look inside structs for the first field. if startTemplate != nil { start.Name = startTemplate.Name start.Attr = append(start.Attr, startTemplate.Attr...) } else if finfo != nil && finfo.name != "" { start.Name.Local = finfo.name start.Name.Space = finfo.xmlns } else if typ.Name() != "" { start.Name.Local = typ.Name() } else { // Must be a pointer to a named type, // since it has the Marshaler methods. start.Name.Local = typ.Elem().Name() } // Add type attribute if necessary if finfo != nil && finfo.flags&fTypeAttr != 0 { start.Attr = append(start.Attr, Attr{xmlSchemaInstance, typeToString(typ)}) } return start } // marshalInterface marshals a Marshaler interface value. func (p *printer) marshalInterface(val Marshaler, start StartElement) error { // Push a marker onto the tag stack so that MarshalXML // cannot close the XML tags that it did not open. p.tags = append(p.tags, Name{}) n := len(p.tags) err := val.MarshalXML(p.encoder, start) if err != nil { return err } // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark. if len(p.tags) > n { return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local) } p.tags = p.tags[:n-1] return nil } // marshalTextInterface marshals a TextMarshaler interface value. func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error { if err := p.writeStart(&start); err != nil { return err } text, err := val.MarshalText() if err != nil { return err } EscapeText(p, text) return p.writeEnd(start.Name) } // writeStart writes the given start element. func (p *printer) writeStart(start *StartElement) error { if start.Name.Local == "" { return fmt.Errorf("xml: start tag with no name") } p.tags = append(p.tags, start.Name) p.markPrefix() p.writeIndent(1) p.WriteByte('<') p.WriteString(start.Name.Local) if start.Name.Space != "" { p.WriteString(` xmlns="`) p.EscapeString(start.Name.Space) p.WriteByte('"') } // Attributes for _, attr := range start.Attr { name := attr.Name if name.Local == "" { continue } p.WriteByte(' ') if name.Space != "" { p.WriteString(p.createAttrPrefix(name.Space)) p.WriteByte(':') } p.WriteString(name.Local) p.WriteString(`="`) p.EscapeString(attr.Value) p.WriteByte('"') } p.WriteByte('>') return nil } func (p *printer) writeEnd(name Name) error { if name.Local == "" { return fmt.Errorf("xml: end tag with no name") } if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" { return fmt.Errorf("xml: end tag </%s> without start tag", name.Local) } if top := p.tags[len(p.tags)-1]; top != name { if top.Local != name.Local { return fmt.Errorf("xml: end tag </%s> does not match start tag <%s>", name.Local, top.Local) } return fmt.Errorf("xml: end tag </%s> in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space) } p.tags = p.tags[:len(p.tags)-1] p.writeIndent(-1) p.WriteByte('<') p.WriteByte('/') p.WriteString(name.Local) p.WriteByte('>') p.popPrefix() return nil } func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) { switch val.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return strconv.FormatInt(val.Int(), 10), nil, nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return strconv.FormatUint(val.Uint(), 10), nil, nil case reflect.Float32, reflect.Float64: return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil case reflect.String: return val.String(), nil, nil case reflect.Bool: return strconv.FormatBool(val.Bool()), nil, nil case reflect.Array: if typ.Elem().Kind() != reflect.Uint8 { break } // [...]byte var bytes []byte if val.CanAddr() { bytes = val.Bytes() } else { bytes = make([]byte, val.Len()) reflect.Copy(reflect.ValueOf(bytes), val) } return "", bytes, nil case reflect.Slice: if typ.Elem().Kind() != reflect.Uint8 { break } // []byte return "", val.Bytes(), nil } return "", nil, &UnsupportedTypeError{typ} } var ddBytes = []byte("--") // indirect drills into interfaces and pointers, returning the pointed-at value. // If it encounters a nil interface or pointer, indirect returns that nil value. // This can turn into an infinite loop given a cyclic chain, // but it matches the Go 1 behavior. func indirect(vf reflect.Value) reflect.Value { for vf.Kind() == reflect.Interface || vf.Kind() == reflect.Pointer { if vf.IsNil() { return vf } vf = vf.Elem() } return vf } func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { s := parentStack{p: p} for i := range tinfo.fields { finfo := &tinfo.fields[i] if finfo.flags&fAttr != 0 { continue } vf := finfo.value(val, dontInitNilPointers) if !vf.IsValid() { // The field is behind an anonymous struct field that's // nil. Skip it. continue } switch finfo.flags & fMode { case fCDATA, fCharData: emit := EscapeText if finfo.flags&fMode == fCDATA { emit = emitCDATA } if err := s.trim(finfo.parents); err != nil { return err } if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() if err != nil { return err } if err := emit(p, data); err != nil { return err } continue } if vf.CanAddr() { pv := vf.Addr() if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() if err != nil { return err } if err := emit(p, data); err != nil { return err } continue } } var scratch [64]byte vf = indirect(vf) switch vf.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: if err := emit(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)); err != nil { return err } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: if err := emit(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)); err != nil { return err } case reflect.Float32, reflect.Float64: if err := emit(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())); err != nil { return err } case reflect.Bool: if err := emit(p, strconv.AppendBool(scratch[:0], vf.Bool())); err != nil { return err } case reflect.String: if err := emit(p, []byte(vf.String())); err != nil { return err } case reflect.Slice: if elem, ok := vf.Interface().([]byte); ok { if err := emit(p, elem); err != nil { return err } } } continue case fComment: if err := s.trim(finfo.parents); err != nil { return err } vf = indirect(vf) k := vf.Kind() if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) } if vf.Len() == 0 { continue } p.writeIndent(0) p.WriteString("<!--") dashDash := false dashLast := false switch k { case reflect.String: s := vf.String() dashDash = strings.Contains(s, "--") dashLast = s[len(s)-1] == '-' if !dashDash { p.WriteString(s) } case reflect.Slice: b := vf.Bytes() dashDash = bytes.Contains(b, ddBytes) dashLast = b[len(b)-1] == '-' if !dashDash { p.Write(b) } default: panic("can't happen") } if dashDash { return fmt.Errorf(`xml: comments must not contain "--"`) } if dashLast { // "--->" is invalid grammar. Make it "- -->" p.WriteByte(' ') } p.WriteString("-->") continue case fInnerXML: vf = indirect(vf) iface := vf.Interface() switch raw := iface.(type) { case []byte: p.Write(raw) continue case string: p.WriteString(raw) continue } case fElement, fElement | fAny: if err := s.trim(finfo.parents); err != nil { return err } if len(finfo.parents) > len(s.stack) { if vf.Kind() != reflect.Pointer && vf.Kind() != reflect.Interface || !vf.IsNil() { if err := s.push(finfo.parents[len(s.stack):]); err != nil { return err } } } } if err := p.marshalValue(vf, finfo, nil); err != nil { return err } } s.trim(nil) return p.cachedWriteError() } // Write implements io.Writer func (p *printer) Write(b []byte) (n int, err error) { if p.closed && p.err == nil { p.err = errors.New("use of closed Encoder") } if p.err == nil { n, p.err = p.w.Write(b) } return n, p.err } // WriteString implements io.StringWriter func (p *printer) WriteString(s string) (n int, err error) { if p.closed && p.err == nil { p.err = errors.New("use of closed Encoder") } if p.err == nil { n, p.err = p.w.WriteString(s) } return n, p.err } // WriteByte implements io.ByteWriter func (p *printer) WriteByte(c byte) error { if p.closed && p.err == nil { p.err = errors.New("use of closed Encoder") } if p.err == nil { p.err = p.w.WriteByte(c) } return p.err } // Close the Encoder, indicating that no more data will be written. It flushes // any buffered XML to the underlying writer and returns an error if the // written XML is invalid (e.g. by containing unclosed elements). func (p *printer) Close() error { if p.closed { return nil } p.closed = true if err := p.w.Flush(); err != nil { return err } if len(p.tags) > 0 { return fmt.Errorf("unclosed tag <%s>", p.tags[len(p.tags)-1].Local) } return nil } // return the bufio Writer's cached write error func (p *printer) cachedWriteError() error { _, err := p.Write(nil) return err } func (p *printer) writeIndent(depthDelta int) { if len(p.prefix) == 0 && len(p.indent) == 0 { return } if depthDelta < 0 { p.depth-- if p.indentedIn { p.indentedIn = false return } p.indentedIn = false } if p.putNewline { p.WriteByte('\n') } else { p.putNewline = true } if len(p.prefix) > 0 { p.WriteString(p.prefix) } if len(p.indent) > 0 { for i := 0; i < p.depth; i++ { p.WriteString(p.indent) } } if depthDelta > 0 { p.depth++ p.indentedIn = true } } type parentStack struct { p *printer stack []string } // trim updates the XML context to match the longest common prefix of the stack // and the given parents. A closing tag will be written for every parent // popped. Passing a zero slice or nil will close all the elements. func (s *parentStack) trim(parents []string) error { split := 0 for ; split < len(parents) && split < len(s.stack); split++ { if parents[split] != s.stack[split] { break } } for i := len(s.stack) - 1; i >= split; i-- { if err := s.p.writeEnd(Name{Local: s.stack[i]}); err != nil { return err } } s.stack = s.stack[:split] return nil } // push adds parent elements to the stack and writes open tags. func (s *parentStack) push(parents []string) error { for i := 0; i < len(parents); i++ { if err := s.p.writeStart(&StartElement{Name: Name{Local: parents[i]}}); err != nil { return err } } s.stack = append(s.stack, parents...) return nil } // UnsupportedTypeError is returned when [Marshal] encounters a type // that cannot be converted into XML. type UnsupportedTypeError struct { Type reflect.Type } func (e *UnsupportedTypeError) Error() string { return "xml: unsupported type: " + e.Type.String() } func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64, reflect.Interface, reflect.Pointer: return v.IsZero() } return false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/xml/read.go
vendor/github.com/vmware/govmomi/vim25/xml/read.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package xml import ( "bytes" "encoding" "errors" "fmt" "reflect" "runtime" "strconv" "strings" ) // BUG(rsc): Mapping between XML elements and data structures is inherently flawed: // an XML element is an order-dependent collection of anonymous // values, while a data structure is an order-independent collection // of named values. // See [encoding/json] for a textual representation more suitable // to data structures. // Unmarshal parses the XML-encoded data and stores the result in // the value pointed to by v, which must be an arbitrary struct, // slice, or string. Well-formed data that does not fit into v is // discarded. // // Because Unmarshal uses the reflect package, it can only assign // to exported (upper case) fields. Unmarshal uses a case-sensitive // comparison to match XML element names to tag values and struct // field names. // // Unmarshal maps an XML element to a struct using the following rules. // In the rules, the tag of a field refers to the value associated with the // key 'xml' in the struct field's tag (see the example above). // // - If the struct has a field of type []byte or string with tag // ",innerxml", Unmarshal accumulates the raw XML nested inside the // element in that field. The rest of the rules still apply. // // - If the struct has a field named XMLName of type Name, // Unmarshal records the element name in that field. // // - If the XMLName field has an associated tag of the form // "name" or "namespace-URL name", the XML element must have // the given name (and, optionally, name space) or else Unmarshal // returns an error. // // - If the XML element has an attribute whose name matches a // struct field name with an associated tag containing ",attr" or // the explicit name in a struct field tag of the form "name,attr", // Unmarshal records the attribute value in that field. // // - If the XML element has an attribute not handled by the previous // rule and the struct has a field with an associated tag containing // ",any,attr", Unmarshal records the attribute value in the first // such field. // // - If the XML element contains character data, that data is // accumulated in the first struct field that has tag ",chardata". // The struct field may have type []byte or string. // If there is no such field, the character data is discarded. // // - If the XML element contains comments, they are accumulated in // the first struct field that has tag ",comment". The struct // field may have type []byte or string. If there is no such // field, the comments are discarded. // // - If the XML element contains a sub-element whose name matches // the prefix of a tag formatted as "a" or "a>b>c", unmarshal // will descend into the XML structure looking for elements with the // given names, and will map the innermost elements to that struct // field. A tag starting with ">" is equivalent to one starting // with the field name followed by ">". // // - If the XML element contains a sub-element whose name matches // a struct field's XMLName tag and the struct field has no // explicit name tag as per the previous rule, unmarshal maps // the sub-element to that struct field. // // - If the XML element contains a sub-element whose name matches a // field without any mode flags (",attr", ",chardata", etc), Unmarshal // maps the sub-element to that struct field. // // - If the XML element contains a sub-element that hasn't matched any // of the above rules and the struct has a field with tag ",any", // unmarshal maps the sub-element to that struct field. // // - An anonymous struct field is handled as if the fields of its // value were part of the outer struct. // // - A struct field with tag "-" is never unmarshaled into. // // If Unmarshal encounters a field type that implements the Unmarshaler // interface, Unmarshal calls its UnmarshalXML method to produce the value from // the XML element. Otherwise, if the value implements // [encoding.TextUnmarshaler], Unmarshal calls that value's UnmarshalText method. // // Unmarshal maps an XML element to a string or []byte by saving the // concatenation of that element's character data in the string or // []byte. The saved []byte is never nil. // // Unmarshal maps an attribute value to a string or []byte by saving // the value in the string or slice. // // Unmarshal maps an attribute value to an [Attr] by saving the attribute, // including its name, in the Attr. // // Unmarshal maps an XML element or attribute value to a slice by // extending the length of the slice and mapping the element or attribute // to the newly created value. // // Unmarshal maps an XML element or attribute value to a bool by // setting it to the boolean value represented by the string. Whitespace // is trimmed and ignored. // // Unmarshal maps an XML element or attribute value to an integer or // floating-point field by setting the field to the result of // interpreting the string value in decimal. There is no check for // overflow. Whitespace is trimmed and ignored. // // Unmarshal maps an XML element to a Name by recording the element // name. // // Unmarshal maps an XML element to a pointer by setting the pointer // to a freshly allocated value and then mapping the element to that value. // // A missing element or empty attribute value will be unmarshaled as a zero value. // If the field is a slice, a zero value will be appended to the field. Otherwise, the // field will be set to its zero value. func Unmarshal(data []byte, v any) error { return NewDecoder(bytes.NewReader(data)).Decode(v) } // Decode works like [Unmarshal], except it reads the decoder // stream to find the start element. func (d *Decoder) Decode(v any) error { return d.DecodeElement(v, nil) } // DecodeElement works like [Unmarshal] except that it takes // a pointer to the start XML element to decode into v. // It is useful when a client reads some raw XML tokens itself // but also wants to defer to [Unmarshal] for some elements. func (d *Decoder) DecodeElement(v any, start *StartElement) error { val := reflect.ValueOf(v) if val.Kind() != reflect.Pointer { return errors.New("non-pointer passed to Unmarshal") } if val.IsNil() { return errors.New("nil pointer passed to Unmarshal") } return d.unmarshal(val.Elem(), start, 0) } // An UnmarshalError represents an error in the unmarshaling process. type UnmarshalError string func (e UnmarshalError) Error() string { return string(e) } // Unmarshaler is the interface implemented by objects that can unmarshal // an XML element description of themselves. // // UnmarshalXML decodes a single XML element // beginning with the given start element. // If it returns an error, the outer call to Unmarshal stops and // returns that error. // UnmarshalXML must consume exactly one XML element. // One common implementation strategy is to unmarshal into // a separate value with a layout matching the expected XML // using d.DecodeElement, and then to copy the data from // that value into the receiver. // Another common strategy is to use d.Token to process the // XML object one token at a time. // UnmarshalXML may not use d.RawToken. type Unmarshaler interface { UnmarshalXML(d *Decoder, start StartElement) error } // UnmarshalerAttr is the interface implemented by objects that can unmarshal // an XML attribute description of themselves. // // UnmarshalXMLAttr decodes a single XML attribute. // If it returns an error, the outer call to [Unmarshal] stops and // returns that error. // UnmarshalXMLAttr is used only for struct fields with the // "attr" option in the field tag. type UnmarshalerAttr interface { UnmarshalXMLAttr(attr Attr) error } // receiverType returns the receiver type to use in an expression like "%s.MethodName". func receiverType(val any) string { t := reflect.TypeOf(val) if t.Name() != "" { return t.String() } return "(" + t.String() + ")" } // unmarshalInterface unmarshals a single XML element into val. // start is the opening tag of the element. func (d *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { // Record that decoder must stop at end tag corresponding to start. d.pushEOF() d.unmarshalDepth++ err := val.UnmarshalXML(d, *start) d.unmarshalDepth-- if err != nil { d.popEOF() return err } if !d.popEOF() { return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) } return nil } // unmarshalTextInterface unmarshals a single XML element into val. // The chardata contained in the element (but not its children) // is passed to the text unmarshaler. func (d *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler) error { var buf []byte depth := 1 for depth > 0 { t, err := d.Token() if err != nil { return err } switch t := t.(type) { case CharData: if depth == 1 { buf = append(buf, t...) } case StartElement: depth++ case EndElement: depth-- } } return val.UnmarshalText(buf) } // unmarshalAttr unmarshals a single XML attribute into val. func (d *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { if val.Kind() == reflect.Pointer { if val.IsNil() { val.Set(reflect.New(val.Type().Elem())) } val = val.Elem() } if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) } } // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) } } if val.Type().Kind() == reflect.Slice && val.Type().Elem().Kind() != reflect.Uint8 { // Slice of element values. // Grow slice. n := val.Len() val.Grow(1) val.SetLen(n + 1) // Recur to read element into slice. if err := d.unmarshalAttr(val.Index(n), attr); err != nil { val.SetLen(n) return err } return nil } if val.Type() == attrType { val.Set(reflect.ValueOf(attr)) return nil } return copyValue(val, []byte(attr.Value)) } var ( attrType = reflect.TypeFor[Attr]() unmarshalerType = reflect.TypeFor[Unmarshaler]() unmarshalerAttrType = reflect.TypeFor[UnmarshalerAttr]() textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]() ) const ( maxUnmarshalDepth = 10000 maxUnmarshalDepthWasm = 5000 // go.dev/issue/56498 ) var errUnmarshalDepth = errors.New("exceeded max depth") // Unmarshal a single XML element into val. func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) error { if depth >= maxUnmarshalDepth || runtime.GOARCH == "wasm" && depth >= maxUnmarshalDepthWasm { return errUnmarshalDepth } // Find start element if we need it. if start == nil { for { tok, err := d.Token() if err != nil { return err } if t, ok := tok.(StartElement); ok { start = &t break } } } // Try to figure out type for empty interface values. if val.Kind() == reflect.Interface && val.IsNil() { typ := d.typeForElement(val, start) if typ != nil { pval := reflect.New(typ).Elem() err := d.unmarshal(pval, start, depth) if err != nil { return err } for i := 0; i < 2; i++ { if typ.Implements(val.Type()) { val.Set(pval) return nil } typ = reflect.PtrTo(typ) pval = pval.Addr() } val.Set(pval) return nil } } // Load value from interface, but only if the result will be // usefully addressable. if val.Kind() == reflect.Interface && !val.IsNil() { e := val.Elem() if e.Kind() == reflect.Pointer && !e.IsNil() { val = e } } if val.Kind() == reflect.Pointer { if val.IsNil() { val.Set(reflect.New(val.Type().Elem())) } val = val.Elem() } if val.CanInterface() && val.Type().Implements(unmarshalerType) { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. return d.unmarshalInterface(val.Interface().(Unmarshaler), start) } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { return d.unmarshalInterface(pv.Interface().(Unmarshaler), start) } } if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { return d.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler)) } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { return d.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler)) } } var ( data []byte saveData reflect.Value comment []byte saveComment reflect.Value saveXML reflect.Value saveXMLIndex int saveXMLData []byte saveAny reflect.Value sv reflect.Value tinfo *typeInfo err error ) switch v := val; v.Kind() { default: return errors.New("unknown type " + v.Type().String()) case reflect.Interface: // TODO: For now, simply ignore the field. In the near // future we may choose to unmarshal the start // element on it, if not nil. return d.Skip() case reflect.Slice: typ := v.Type() if typ.Elem().Kind() == reflect.Uint8 { // []byte saveData = v break } // Slice of element values. // Grow slice. n := v.Len() v.Grow(1) v.SetLen(n + 1) // Recur to read element into slice. if err := d.unmarshal(v.Index(n), start, depth+1); err != nil { v.SetLen(n) return err } return nil case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: saveData = v case reflect.Struct: typ := v.Type() if typ == nameType { v.Set(reflect.ValueOf(start.Name)) break } sv = v tinfo, err = getTypeInfo(typ) if err != nil { return err } // Validate and assign element name. if tinfo.xmlname != nil { finfo := tinfo.xmlname if finfo.name != "" && finfo.name != start.Name.Local { return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") } if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " if start.Name.Space == "" { e += "no name space" } else { e += start.Name.Space } return UnmarshalError(e) } fv := finfo.value(sv, initNilPointers) if _, ok := fv.Interface().(Name); ok { fv.Set(reflect.ValueOf(start.Name)) } } // Assign attributes. for _, a := range start.Attr { handled := false any := -1 for i := range tinfo.fields { finfo := &tinfo.fields[i] switch finfo.flags & fMode { case fAttr: strv := finfo.value(sv, initNilPointers) if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { needTypeAttr := (finfo.flags & fTypeAttr) != 0 // HACK: avoid using xsi:type value for a "type" attribute, such as ManagedObjectReference.Type for example. if needTypeAttr || (a.Name != xmlSchemaInstance && a.Name != xsiType) { if err := d.unmarshalAttr(strv, a); err != nil { return err } } handled = true } case fAny | fAttr: if any == -1 { any = i } } } if !handled && any >= 0 { finfo := &tinfo.fields[any] strv := finfo.value(sv, initNilPointers) if err := d.unmarshalAttr(strv, a); err != nil { return err } } } // Determine whether we need to save character data or comments. for i := range tinfo.fields { finfo := &tinfo.fields[i] switch finfo.flags & fMode { case fCDATA, fCharData: if !saveData.IsValid() { saveData = finfo.value(sv, initNilPointers) } case fComment: if !saveComment.IsValid() { saveComment = finfo.value(sv, initNilPointers) } case fAny, fAny | fElement: if !saveAny.IsValid() { saveAny = finfo.value(sv, initNilPointers) } case fInnerXML: if !saveXML.IsValid() { saveXML = finfo.value(sv, initNilPointers) if d.saved == nil { saveXMLIndex = 0 d.saved = new(bytes.Buffer) } else { saveXMLIndex = d.savedOffset() } } } } } // Find end element. // Process sub-elements along the way. Loop: for { var savedOffset int if saveXML.IsValid() { savedOffset = d.savedOffset() } tok, err := d.Token() if err != nil { return err } switch t := tok.(type) { case StartElement: consumed := false if sv.IsValid() { // unmarshalPath can call unmarshal, so we need to pass the depth through so that // we can continue to enforce the maximum recursion limit. consumed, err = d.unmarshalPath(tinfo, sv, nil, &t, depth) if err != nil { return err } if !consumed && saveAny.IsValid() { consumed = true if err := d.unmarshal(saveAny, &t, depth+1); err != nil { return err } } } if !consumed { if err := d.Skip(); err != nil { return err } } case EndElement: if saveXML.IsValid() { saveXMLData = d.saved.Bytes()[saveXMLIndex:savedOffset] if saveXMLIndex == 0 { d.saved = nil } } break Loop case CharData: if saveData.IsValid() { data = append(data, t...) } case Comment: if saveComment.IsValid() { comment = append(comment, t...) } } } if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { return err } saveData = reflect.Value{} } if saveData.IsValid() && saveData.CanAddr() { pv := saveData.Addr() if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { return err } saveData = reflect.Value{} } } if err := copyValue(saveData, data); err != nil { return err } switch t := saveComment; t.Kind() { case reflect.String: t.SetString(string(comment)) case reflect.Slice: t.Set(reflect.ValueOf(comment)) } switch t := saveXML; t.Kind() { case reflect.String: t.SetString(string(saveXMLData)) case reflect.Slice: if t.Type().Elem().Kind() == reflect.Uint8 { t.Set(reflect.ValueOf(saveXMLData)) } } return nil } func copyValue(dst reflect.Value, src []byte) (err error) { dst0 := dst if dst.Kind() == reflect.Pointer { if dst.IsNil() { dst.Set(reflect.New(dst.Type().Elem())) } dst = dst.Elem() } // Save accumulated data. switch dst.Kind() { case reflect.Invalid: // Probably a comment. default: return errors.New("cannot unmarshal into " + dst0.Type().String()) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: if len(src) == 0 { dst.SetInt(0) return nil } itmp, err := strconv.ParseInt(strings.TrimSpace(string(src)), 10, dst.Type().Bits()) if err != nil { return err } dst.SetInt(itmp) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: var utmp uint64 if len(src) > 0 && src[0] == '-' { // Negative value for unsigned field. // Assume it was serialized following two's complement. itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) if err != nil { return err } // Reinterpret value based on type width. switch dst.Type().Bits() { case 8: utmp = uint64(uint8(itmp)) case 16: utmp = uint64(uint16(itmp)) case 32: utmp = uint64(uint32(itmp)) case 64: utmp = uint64(uint64(itmp)) } } else { if len(src) == 0 { dst.SetUint(0) return nil } utmp, err = strconv.ParseUint(strings.TrimSpace(string(src)), 10, dst.Type().Bits()) if err != nil { return err } } dst.SetUint(utmp) case reflect.Float32, reflect.Float64: if len(src) == 0 { dst.SetFloat(0) return nil } ftmp, err := strconv.ParseFloat(strings.TrimSpace(string(src)), dst.Type().Bits()) if err != nil { return err } dst.SetFloat(ftmp) case reflect.Bool: if len(src) == 0 { dst.SetBool(false) return nil } value, err := strconv.ParseBool(strings.TrimSpace(string(src))) if err != nil { return err } dst.SetBool(value) case reflect.String: dst.SetString(string(src)) case reflect.Slice: if len(src) == 0 { // non-nil to flag presence src = []byte{} } dst.SetBytes(src) } return nil } // unmarshalPath walks down an XML structure looking for wanted // paths, and calls unmarshal on them. // The consumed result tells whether XML elements have been consumed // from the Decoder until start's matching end element, or if it's // still untouched because start is uninteresting for sv's fields. func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement, depth int) (consumed bool, err error) { recurse := false Loop: for i := range tinfo.fields { finfo := &tinfo.fields[i] if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { continue } for j := range parents { if parents[j] != finfo.parents[j] { continue Loop } } if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { // It's a perfect match, unmarshal the field. return true, d.unmarshal(finfo.value(sv, initNilPointers), start, depth+1) } if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { // It's a prefix for the field. Break and recurse // since it's not ok for one field path to be itself // the prefix for another field path. recurse = true // We can reuse the same slice as long as we // don't try to append to it. parents = finfo.parents[:len(parents)+1] break } } if !recurse { // We have no business with this element. return false, nil } // The element is not a perfect match for any field, but one // or more fields have the path to this element as a parent // prefix. Recurse and attempt to match these. for { var tok Token tok, err = d.Token() if err != nil { return true, err } switch t := tok.(type) { case StartElement: // the recursion depth of unmarshalPath is limited to the path length specified // by the struct field tag, so we don't increment the depth here. consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t, depth) if err != nil { return true, err } if !consumed2 { if err := d.Skip(); err != nil { return true, err } } case EndElement: return true, nil } } } // Skip reads tokens until it has consumed the end element // matching the most recent start element already consumed, // skipping nested structures. // It returns nil if it finds an end element matching the start // element; otherwise it returns an error describing the problem. func (d *Decoder) Skip() error { var depth int64 for { tok, err := d.Token() if err != nil { return err } switch tok.(type) { case StartElement: depth++ case EndElement: if depth == 0 { return nil } depth-- } } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/soap/client.go
vendor/github.com/vmware/govmomi/vim25/soap/client.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package soap import ( "bufio" "bytes" "context" "crypto/sha1" "crypto/sha256" "crypto/tls" "crypto/x509" "encoding/json" "errors" "fmt" "io" "log" "net" "net/http" "net/http/cookiejar" "net/url" "os" "path" "path/filepath" "reflect" "regexp" "runtime" "strings" "sync" "github.com/vmware/govmomi/internal/version" "github.com/vmware/govmomi/vim25/progress" "github.com/vmware/govmomi/vim25/types" "github.com/vmware/govmomi/vim25/xml" ) type HasFault interface { Fault() *Fault } type RoundTripper interface { RoundTrip(ctx context.Context, req, res HasFault) error } const ( SessionCookieName = "vmware_soap_session" ) // defaultUserAgent is the default user agent string, e.g. // "govc govmomi/0.28.0 (go1.18.3;linux;amd64)" var defaultUserAgent = fmt.Sprintf( "%s %s/%s (%s)", execName(), version.ClientName, version.ClientVersion, strings.Join([]string{runtime.Version(), runtime.GOOS, runtime.GOARCH}, ";"), ) type Client struct { http.Client u *url.URL k bool // Named after curl's -k flag d *debugContainer t *http.Transport hostsMu sync.Mutex hosts map[string]string Namespace string `json:"namespace"` // Vim namespace Version string `json:"version"` // Vim version Types types.Func `json:"types"` UserAgent string `json:"userAgent"` // Cookie returns a value for the SOAP Header.Cookie. // This SOAP request header is used for authentication by // API endpoints such as pbm, vslm and sms. // When nil, no SOAP Header.Cookie is set. Cookie func() *HeaderElement insecureCookies bool useJSON bool } var schemeMatch = regexp.MustCompile(`^\w+://`) type errInvalidCACertificate struct { File string } func (e errInvalidCACertificate) Error() string { return fmt.Sprintf( "invalid certificate '%s', cannot be used as a trusted CA certificate", e.File, ) } // ParseURL is wrapper around url.Parse, where Scheme defaults to "https" and Path defaults to "/sdk" func ParseURL(s string) (*url.URL, error) { var err error var u *url.URL if s != "" { // Default the scheme to https if !schemeMatch.MatchString(s) { s = "https://" + s } s := strings.TrimSuffix(s, "/") u, err = url.Parse(s) if err != nil { return nil, err } // Default the path to /sdk if u.Path == "" { u.Path = "/sdk" } if u.User == nil { u.User = url.UserPassword("", "") } } return u, nil } // Go's ForceAttemptHTTP2 default is true, we disable by default. // This undocumented env var can be used to enable. var http2 = os.Getenv("GOVMOMI_HTTP2") == "true" func NewClient(u *url.URL, insecure bool) *Client { var t *http.Transport if d, ok := http.DefaultTransport.(*http.Transport); ok { // Inherit the same defaults explicitly set in http.DefaultTransport, // unless otherwise noted. t = &http.Transport{ Proxy: d.Proxy, DialContext: d.DialContext, ForceAttemptHTTP2: http2, // false by default in govmomi MaxIdleConns: d.MaxIdleConns, IdleConnTimeout: d.IdleConnTimeout, TLSHandshakeTimeout: d.TLSHandshakeTimeout, ExpectContinueTimeout: d.ExpectContinueTimeout, } } else { t = new(http.Transport) } t.TLSClientConfig = &tls.Config{ InsecureSkipVerify: insecure, } c := newClientWithTransport(u, insecure, t) // Always set DialTLS and DialTLSContext, even if InsecureSkipVerify=true, // because of how certificate verification has been delegated to the host's // PKI framework in Go 1.18. Please see the following links for more info: // // * https://tip.golang.org/doc/go1.18 (search for "Certificate.Verify") // * https://github.com/square/certigo/issues/264 t.DialTLSContext = c.dialTLSContext return c } func newClientWithTransport(u *url.URL, insecure bool, t *http.Transport) *Client { c := Client{ u: u, k: insecure, d: newDebug(), t: t, Types: types.TypeFunc(), } c.hosts = make(map[string]string) c.Client.Transport = c.t c.Client.Jar, _ = cookiejar.New(nil) // Remove user information from a copy of the URL c.u = c.URL() c.u.User = nil if c.u.Scheme == "http" { c.insecureCookies = os.Getenv("GOVMOMI_INSECURE_COOKIES") == "true" } return &c } func (c *Client) DefaultTransport() *http.Transport { return c.t } // NewServiceClient creates a NewClient with the given URL.Path and namespace. func (c *Client) NewServiceClient(path string, namespace string) *Client { return c.newServiceClientWithTransport(path, namespace, c.t) } func sessionCookie(jar http.CookieJar, u *url.URL) *HeaderElement { for _, cookie := range jar.Cookies(u) { if cookie.Name == SessionCookieName { return &HeaderElement{Value: cookie.Value} } } return nil } // SessionCookie returns a SessionCookie with value of the vmware_soap_session http.Cookie. func (c *Client) SessionCookie() *HeaderElement { u := c.URL() if cookie := sessionCookie(c.Jar, u); cookie != nil { return cookie } // Default "/sdk" Path would match above, // but saw a case of Path == "sdk", where above returns nil. // The jar entry Path is normally "/", so fallback to that. u.Path = "/" return sessionCookie(c.Jar, u) } func (c *Client) newServiceClientWithTransport(path string, namespace string, t *http.Transport) *Client { vc := c.URL() u, err := url.Parse(path) if err != nil { log.Panicf("url.Parse(%q): %s", path, err) } if u.Host == "" { u.Scheme = vc.Scheme u.Host = vc.Host } client := newClientWithTransport(u, c.k, t) client.Namespace = "urn:" + namespace // Copy the trusted thumbprints c.hostsMu.Lock() for k, v := range c.hosts { client.hosts[k] = v } c.hostsMu.Unlock() // Copy the cookies client.Client.Jar.SetCookies(u, c.Client.Jar.Cookies(u)) // Copy any query params (e.g. GOVMOMI_TUNNEL_PROXY_PORT used in testing) client.u.RawQuery = vc.RawQuery client.UserAgent = c.UserAgent vimTypes := c.Types client.Types = func(name string) (reflect.Type, bool) { kind, ok := vimTypes(name) if ok { return kind, ok } // vim25/xml typeToString() does not have an option to include namespace prefix. // Workaround this by re-trying the lookup with the namespace prefix. return vimTypes(namespace + ":" + name) } return client } // UseJSON changes the protocol between SOAP and JSON. Starting with vCenter // 8.0.1 JSON over HTTP can be used. Note this method has no locking and clients // should be careful to not interfere with concurrent use of the client // instance. func (c *Client) UseJSON(useJSON bool) { c.useJSON = useJSON } // SetRootCAs defines the set of PEM-encoded file locations of root certificate // authorities the client uses when verifying server certificates instead of the // TLS defaults which uses the host's root CA set. Multiple PEM file locations // can be specified using the OS-specific PathListSeparator. // // See: http.Client.Transport.TLSClientConfig.RootCAs and // https://pkg.go.dev/os#PathListSeparator func (c *Client) SetRootCAs(pemPaths string) error { pool := x509.NewCertPool() for _, name := range filepath.SplitList(pemPaths) { pem, err := os.ReadFile(filepath.Clean(name)) if err != nil { return err } if ok := pool.AppendCertsFromPEM(pem); !ok { return errInvalidCACertificate{ File: name, } } } c.t.TLSClientConfig.RootCAs = pool return nil } // Add default https port if missing func hostAddr(addr string) string { _, port := splitHostPort(addr) if port == "" { return addr + ":443" } return addr } // SetThumbprint sets the known certificate thumbprint for the given host. // A custom DialTLS function is used to support thumbprint based verification. // We first try tls.Dial with the default tls.Config, only falling back to thumbprint verification // if it fails with an x509.UnknownAuthorityError or x509.HostnameError // // See: http.Client.Transport.DialTLS func (c *Client) SetThumbprint(host string, thumbprint string) { host = hostAddr(host) c.hostsMu.Lock() if thumbprint == "" { delete(c.hosts, host) } else { c.hosts[host] = thumbprint } c.hostsMu.Unlock() } // Thumbprint returns the certificate thumbprint for the given host if known to this client. func (c *Client) Thumbprint(host string) string { host = hostAddr(host) c.hostsMu.Lock() defer c.hostsMu.Unlock() return c.hosts[host] } // KnownThumbprint checks whether the provided thumbprint is known to this client. func (c *Client) KnownThumbprint(tp string) bool { c.hostsMu.Lock() defer c.hostsMu.Unlock() for _, v := range c.hosts { if v == tp { return true } } return false } // LoadThumbprints from file with the give name. // If name is empty or name does not exist this function will return nil. func (c *Client) LoadThumbprints(file string) error { if file == "" { return nil } for _, name := range filepath.SplitList(file) { err := c.loadThumbprints(name) if err != nil { return err } } return nil } func (c *Client) loadThumbprints(name string) error { f, err := os.Open(filepath.Clean(name)) if err != nil { if os.IsNotExist(err) { return nil } return err } scanner := bufio.NewScanner(f) for scanner.Scan() { e := strings.SplitN(scanner.Text(), " ", 2) if len(e) != 2 { continue } c.SetThumbprint(e[0], e[1]) } _ = f.Close() return scanner.Err() } // ThumbprintSHA1 returns the thumbprint of the given cert in the same format used by the SDK and Client.SetThumbprint. // // See: SSLVerifyFault.Thumbprint, SessionManagerGenericServiceTicket.Thumbprint, HostConnectSpec.SslThumbprint func ThumbprintSHA1(cert *x509.Certificate) string { sum := sha1.Sum(cert.Raw) hex := make([]string, len(sum)) for i, b := range sum { hex[i] = fmt.Sprintf("%02X", b) } return strings.Join(hex, ":") } // ThumbprintSHA256 returns the sha256 thumbprint of the given cert. func ThumbprintSHA256(cert *x509.Certificate) string { sum := sha256.Sum256(cert.Raw) hex := make([]string, len(sum)) for i, b := range sum { hex[i] = fmt.Sprintf("%02X", b) } return strings.Join(hex, ":") } func thumbprintMatches(thumbprint string, cert *x509.Certificate) bool { return thumbprint == ThumbprintSHA256(cert) || thumbprint == ThumbprintSHA1(cert) } func (c *Client) dialTLSContext( ctx context.Context, network, addr string) (net.Conn, error) { // Would be nice if there was a tls.Config.Verify func, // see tls.clientHandshakeState.doFullHandshake conn, err := tls.Dial(network, addr, c.t.TLSClientConfig) if err == nil { return conn, nil } // Allow a thumbprint verification attempt if the error indicates // the failure was due to lack of trust. if !IsCertificateUntrusted(err) { return nil, err } thumbprint := c.Thumbprint(addr) if thumbprint == "" { return nil, err } config := &tls.Config{InsecureSkipVerify: true} conn, err = tls.Dial(network, addr, config) if err != nil { return nil, err } cert := conn.ConnectionState().PeerCertificates[0] if thumbprintMatches(thumbprint, cert) { return conn, nil } _ = conn.Close() return nil, fmt.Errorf("host %q thumbprint does not match %q", addr, thumbprint) } // splitHostPort is similar to net.SplitHostPort, // but rather than return error if there isn't a ':port', // return an empty string for the port. func splitHostPort(host string) (string, string) { ix := strings.LastIndex(host, ":") if ix <= strings.LastIndex(host, "]") { return host, "" } name := host[:ix] port := host[ix+1:] return name, port } const sdkTunnel = "sdkTunnel:8089" // Certificate returns the current TLS certificate. func (c *Client) Certificate() *tls.Certificate { certs := c.t.TLSClientConfig.Certificates if len(certs) == 0 { return nil } return &certs[0] } // SetCertificate st a certificate for TLS use. func (c *Client) SetCertificate(cert tls.Certificate) { t := c.Client.Transport.(*http.Transport) // Extension or HoK certificate t.TLSClientConfig.Certificates = []tls.Certificate{cert} } // UseServiceVersion sets Client.Version to the current version of the service endpoint via /sdk/vimServiceVersions.xml func (c *Client) UseServiceVersion(kind ...string) error { ns := "vim" if len(kind) != 0 { ns = kind[0] } u := c.URL() u.Path = path.Join("/sdk", ns+"ServiceVersions.xml") res, err := c.Get(u.String()) if err != nil { return err } if res.StatusCode != http.StatusOK { return fmt.Errorf("http.Get(%s): %s", u.Path, res.Status) } v := struct { Namespace *string `xml:"namespace>name"` Version *string `xml:"namespace>version"` }{ &c.Namespace, &c.Version, } err = xml.NewDecoder(res.Body).Decode(&v) _ = res.Body.Close() if err != nil { return fmt.Errorf("xml.Decode(%s): %s", u.Path, err) } return nil } // Tunnel returns a Client configured to proxy requests through vCenter's http port 80, // to the SDK tunnel virtual host. Use of the SDK tunnel is required by LoginExtensionByCertificate() // and optional for other methods. func (c *Client) Tunnel() *Client { tunnel := c.newServiceClientWithTransport(c.u.Path, c.Namespace, c.DefaultTransport().Clone()) t := tunnel.Client.Transport.(*http.Transport) // Proxy to vCenter host on port 80 host := tunnel.u.Hostname() // Should be no reason to change the default port other than testing key := "GOVMOMI_TUNNEL_PROXY_PORT" port := tunnel.URL().Query().Get(key) if port == "" { port = os.Getenv(key) } if port != "" { host += ":" + port } t.Proxy = http.ProxyURL(&url.URL{ Scheme: "http", Host: host, }) // Rewrite url Host to use the sdk tunnel, required for a certificate request. tunnel.u.Host = sdkTunnel return tunnel } // URL returns the URL to which the client is configured func (c *Client) URL() *url.URL { urlCopy := *c.u return &urlCopy } type marshaledClient struct { Cookies []*http.Cookie `json:"cookies"` URL *url.URL `json:"url"` Insecure bool `json:"insecure"` Version string `json:"version"` UseJSON bool `json:"useJSON"` } // MarshalJSON writes the Client configuration to JSON. func (c *Client) MarshalJSON() ([]byte, error) { m := marshaledClient{ Cookies: c.Jar.Cookies(c.u), URL: c.u, Insecure: c.k, Version: c.Version, UseJSON: c.useJSON, } return json.Marshal(m) } // UnmarshalJSON rads Client configuration from JSON. func (c *Client) UnmarshalJSON(b []byte) error { var m marshaledClient err := json.Unmarshal(b, &m) if err != nil { return err } *c = *NewClient(m.URL, m.Insecure) c.Version = m.Version c.Jar.SetCookies(m.URL, m.Cookies) c.useJSON = m.UseJSON return nil } func (c *Client) setInsecureCookies(res *http.Response) { cookies := res.Cookies() if len(cookies) != 0 { for _, cookie := range cookies { cookie.Secure = false } c.Jar.SetCookies(c.u, cookies) } } // Do is equivalent to http.Client.Do and takes care of API specifics including // logging, user-agent header, handling cookies, measuring responsiveness of the // API func (c *Client) Do(ctx context.Context, req *http.Request, f func(*http.Response) error) error { if ctx == nil { ctx = context.Background() } // Create debugging context for this round trip d := c.d.newRoundTrip() if d.enabled() { defer d.done() } // use default if c.UserAgent == "" { c.UserAgent = defaultUserAgent } req.Header.Set(`User-Agent`, c.UserAgent) ext := "" if d.enabled() { ext = d.debugRequest(req) } res, err := c.Client.Do(req.WithContext(ctx)) if err != nil { return err } if d.enabled() { d.debugResponse(res, ext) } if c.insecureCookies { c.setInsecureCookies(res) } defer res.Body.Close() return f(res) } // Signer can be implemented by soap.Header.Security to sign requests. // If the soap.Header.Security field is set to an implementation of Signer via WithHeader(), // then Client.RoundTrip will call Sign() to marshal the SOAP request. type Signer interface { Sign(Envelope) ([]byte, error) } type headerContext struct{} // WithHeader can be used to modify the outgoing request soap.Header fields. func (c *Client) WithHeader(ctx context.Context, header Header) context.Context { return context.WithValue(ctx, headerContext{}, header) } type statusError struct { res *http.Response } // Temporary returns true for HTTP response codes that can be retried // See vim25.IsTemporaryNetworkError func (e *statusError) Temporary() bool { switch e.res.StatusCode { case http.StatusBadGateway: return true } return false } func (e *statusError) Error() string { return e.res.Status } func newStatusError(res *http.Response) error { return &url.Error{ Op: res.Request.Method, URL: res.Request.URL.Path, Err: &statusError{res}, } } // RoundTrip executes an API request to VMOMI server. func (c *Client) RoundTrip(ctx context.Context, reqBody, resBody HasFault) error { if !c.useJSON { return c.soapRoundTrip(ctx, reqBody, resBody) } return c.jsonRoundTrip(ctx, reqBody, resBody) } func (c *Client) soapRoundTrip(ctx context.Context, reqBody, resBody HasFault) error { var err error var b []byte reqEnv := Envelope{Body: reqBody} resEnv := Envelope{Body: resBody} h, ok := ctx.Value(headerContext{}).(Header) if !ok { h = Header{} } // We added support for OperationID before soap.Header was exported. if id, ok := ctx.Value(types.ID{}).(string); ok { h.ID = id } if c.Cookie != nil { h.Cookie = c.Cookie() } if h.Cookie != nil || h.ID != "" || h.Security != nil { reqEnv.Header = &h // XML marshal header only if a field is set } if signer, ok := h.Security.(Signer); ok { b, err = signer.Sign(reqEnv) if err != nil { return err } } else { b, err = xml.Marshal(reqEnv) if err != nil { panic(err) } } rawReqBody := io.MultiReader(strings.NewReader(xml.Header), bytes.NewReader(b)) req, err := http.NewRequest("POST", c.u.String(), rawReqBody) if err != nil { panic(err) } req.Header.Set(`Content-Type`, `text/xml; charset="utf-8"`) action := h.Action if action == "" { action = fmt.Sprintf("%s/%s", c.Namespace, c.Version) } req.Header.Set(`SOAPAction`, action) return c.Do(ctx, req, func(res *http.Response) error { switch res.StatusCode { case http.StatusOK: // OK case http.StatusInternalServerError: // Error, but typically includes a body explaining the error default: return newStatusError(res) } dec := xml.NewDecoder(res.Body) dec.TypeFunc = c.Types err = dec.Decode(&resEnv) if err != nil { return err } if f := resBody.Fault(); f != nil { return WrapSoapFault(f) } return err }) } func (c *Client) CloseIdleConnections() { c.t.CloseIdleConnections() } // ParseURL wraps url.Parse to rewrite the URL.Host field // In the case of VM guest uploads or NFC lease URLs, a Host // field with a value of "*" is rewritten to the Client's URL.Host. func (c *Client) ParseURL(urlStr string) (*url.URL, error) { u, err := url.Parse(urlStr) if err != nil { return nil, err } host, _ := splitHostPort(u.Host) if host == "*" { // Also use Client's port, to support port forwarding u.Host = c.URL().Host } return u, nil } type Upload struct { Type string Method string ContentLength int64 Headers map[string]string Ticket *http.Cookie Progress progress.Sinker Close bool } var DefaultUpload = Upload{ Type: "application/octet-stream", Method: "PUT", } // Upload PUTs the local file to the given URL func (c *Client) Upload(ctx context.Context, f io.Reader, u *url.URL, param *Upload) error { var err error if param.Progress != nil { pr := progress.NewReader(ctx, param.Progress, f, param.ContentLength) f = pr // Mark progress reader as done when returning from this function. defer func() { pr.Done(err) }() } req, err := http.NewRequest(param.Method, u.String(), f) if err != nil { return err } req = req.WithContext(ctx) req.Close = param.Close req.ContentLength = param.ContentLength req.Header.Set("Content-Type", param.Type) for k, v := range param.Headers { req.Header.Add(k, v) } if param.Ticket != nil { req.AddCookie(param.Ticket) } res, err := c.Client.Do(req) if err != nil { return err } defer res.Body.Close() switch res.StatusCode { case http.StatusOK: case http.StatusCreated: default: err = errors.New(res.Status) } return err } // UploadFile PUTs the local file to the given URL func (c *Client) UploadFile(ctx context.Context, file string, u *url.URL, param *Upload) error { if param == nil { p := DefaultUpload // Copy since we set ContentLength param = &p } s, err := os.Stat(file) if err != nil { return err } f, err := os.Open(filepath.Clean(file)) if err != nil { return err } defer f.Close() param.ContentLength = s.Size() return c.Upload(ctx, f, u, param) } type Download struct { Method string Headers map[string]string Ticket *http.Cookie Progress progress.Sinker Writer io.Writer Close bool } var DefaultDownload = Download{ Method: "GET", } // DownloadRequest wraps http.Client.Do, returning the http.Response without checking its StatusCode func (c *Client) DownloadRequest(ctx context.Context, u *url.URL, param *Download) (*http.Response, error) { req, err := http.NewRequest(param.Method, u.String(), nil) if err != nil { return nil, err } req = req.WithContext(ctx) req.Close = param.Close for k, v := range param.Headers { req.Header.Add(k, v) } if param.Ticket != nil { req.AddCookie(param.Ticket) } return c.Client.Do(req) } // Download GETs the remote file from the given URL func (c *Client) Download(ctx context.Context, u *url.URL, param *Download) (io.ReadCloser, int64, error) { res, err := c.DownloadRequest(ctx, u, param) if err != nil { return nil, 0, err } switch res.StatusCode { case http.StatusOK: default: err = fmt.Errorf("download(%s): %s", u, res.Status) } if err != nil { return nil, 0, err } r := res.Body return r, res.ContentLength, nil } func (c *Client) WriteFile(ctx context.Context, file string, src io.Reader, size int64, s progress.Sinker, w io.Writer) error { var err error r := src fh, err := os.Create(file) if err != nil { return err } if s != nil { pr := progress.NewReader(ctx, s, src, size) r = pr // Mark progress reader as done when returning from this function. defer func() { pr.Done(err) }() } if w == nil { w = fh } else { w = io.MultiWriter(w, fh) } _, err = io.Copy(w, r) cerr := fh.Close() if err == nil { err = cerr } return err } // DownloadFile GETs the given URL to a local file func (c *Client) DownloadFile(ctx context.Context, file string, u *url.URL, param *Download) error { var err error if param == nil { param = &DefaultDownload } rc, contentLength, err := c.Download(ctx, u, param) if err != nil { return err } return c.WriteFile(ctx, file, rc, contentLength, param.Progress, param.Writer) } // execName gets the name of the executable for the current process func execName() string { name, err := os.Executable() if err != nil { return "N/A" } return strings.TrimSuffix(filepath.Base(name), ".exe") }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/soap/error.go
vendor/github.com/vmware/govmomi/vim25/soap/error.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package soap import ( "crypto/x509" "encoding/json" "errors" "fmt" "reflect" "strings" "github.com/vmware/govmomi/vim25/types" ) type soapFaultError struct { fault *Fault } func (s soapFaultError) Error() string { msg := s.fault.String if msg == "" { if s.fault.Detail.Fault == nil { msg = "unknown fault" } else { msg = reflect.TypeOf(s.fault.Detail.Fault).Name() } } return fmt.Sprintf("%s: %s", s.fault.Code, msg) } func (s soapFaultError) MarshalJSON() ([]byte, error) { out := struct { Fault *Fault }{ Fault: s.fault, } return json.Marshal(out) } func (s soapFaultError) Fault() types.BaseMethodFault { if s.fault != nil { fault := s.fault.Detail.Fault if fault == nil { return nil } if f, ok := fault.(types.BaseMethodFault); ok { return f } if val := reflect.ValueOf(fault); val.Kind() != reflect.Pointer { ptrVal := reflect.New(val.Type()) ptrVal.Elem().Set(val) if f, ok := ptrVal.Interface().(types.BaseMethodFault); ok { return f } } } return nil } type vimFaultError struct { fault types.BaseMethodFault } func (v vimFaultError) Error() string { typ := reflect.TypeOf(v.fault) for typ.Kind() == reflect.Ptr { typ = typ.Elem() } return typ.Name() } func (v vimFaultError) Fault() types.BaseMethodFault { return v.fault } func WrapSoapFault(f *Fault) error { return soapFaultError{f} } func IsSoapFault(err error) bool { _, ok := err.(soapFaultError) return ok } func ToSoapFault(err error) *Fault { return err.(soapFaultError).fault } func WrapVimFault(v types.BaseMethodFault) error { return vimFaultError{v} } func IsVimFault(err error) bool { _, ok := err.(vimFaultError) return ok } func ToVimFault(err error) types.BaseMethodFault { return err.(vimFaultError).fault } func IsCertificateUntrusted(err error) bool { // golang 1.20 introduce a new type to wrap 509 errors. So instead of // casting the type, now we check the error chain contains the // x509 error or not. if errors.As(err, &x509.UnknownAuthorityError{}) { return true } if errors.As(err, &x509.HostnameError{}) { return true } // The err variable may not be a special type of x509 or HTTP // error that can be validated by a type assertion. The err variable is // in fact be an *errors.errorString. msgs := []string{ "certificate is not trusted", "certificate signed by unknown authority", } for _, msg := range msgs { if strings.HasSuffix(err.Error(), msg) { return true } } return false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/soap/json_client.go
vendor/github.com/vmware/govmomi/vim25/soap/json_client.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package soap import ( "bytes" "context" "errors" "fmt" "io" "mime" "net/http" "reflect" "strings" "github.com/vmware/govmomi/vim25/xml" "github.com/vmware/govmomi/vim25/types" ) const ( sessionHeader = "vmware-api-session-id" ) var ( // errInvalidResponse is used during unmarshaling when the response content // does not match expectations e.g. unexpected HTTP status code or MIME // type. errInvalidResponse error = errors.New("Invalid response") // errInputError is used as root error when the request is malformed. errInputError error = errors.New("Invalid input error") ) // Handles round trip using json HTTP func (c *Client) jsonRoundTrip(ctx context.Context, req, res HasFault) error { this, method, params, err := unpackSOAPRequest(req) if err != nil { return fmt.Errorf("Cannot unpack the request. %w", err) } return c.invoke(ctx, this, method, params, res) } // Invoke calls a managed object method func (c *Client) invoke(ctx context.Context, this types.ManagedObjectReference, method string, params any, res HasFault) error { buffer := bytes.Buffer{} if params != nil { marshaller := types.NewJSONEncoder(&buffer) err := marshaller.Encode(params) if err != nil { return fmt.Errorf("Encoding request to JSON failed. %w", err) } } uri := c.getPathForName(this, method) req, err := http.NewRequest(http.MethodPost, uri, &buffer) if err != nil { return err } if c.Cookie != nil { if cookie := c.Cookie(); cookie != nil { req.Header.Add(sessionHeader, cookie.Value) } } result, err := getSOAPResultPtr(res) if err != nil { return fmt.Errorf("Cannot get pointer to the result structure. %w", err) } return c.Do(ctx, req, c.responseUnmarshaler(&result)) } // responseUnmarshaler create unmarshaler function for VMOMI JSON request. The // unmarshaler checks for errors and tries to load the response body in the // result structure. It is assumed that result is pointer to a data structure or // interface{}. func (c *Client) responseUnmarshaler(result any) func(resp *http.Response) error { return func(resp *http.Response) error { if resp.StatusCode == http.StatusNoContent || (!isError(resp.StatusCode) && resp.ContentLength == 0) { return nil } if e := checkJSONContentType(resp); e != nil { return e } if resp.StatusCode == 500 { bodyBytes, e := io.ReadAll(resp.Body) if e != nil { return e } var serverErr any dec := types.NewJSONDecoder(bytes.NewReader(bodyBytes)) e = dec.Decode(&serverErr) if e != nil { return e } var faultStringStruct struct { FaultString string `json:"faultstring,omitempty"` } dec = types.NewJSONDecoder(bytes.NewReader(bodyBytes)) e = dec.Decode(&faultStringStruct) if e != nil { return e } f := &Fault{ XMLName: xml.Name{ Space: c.Namespace, Local: reflect.TypeOf(serverErr).Name() + "Fault", }, String: faultStringStruct.FaultString, Code: "ServerFaultCode", } f.Detail.Fault = serverErr return WrapSoapFault(f) } if isError(resp.StatusCode) { return fmt.Errorf("Unexpected HTTP error code: %v. %w", resp.StatusCode, errInvalidResponse) } dec := types.NewJSONDecoder(resp.Body) e := dec.Decode(result) if e != nil { return e } c.checkForSessionHeader(resp) return nil } } func isError(statusCode int) bool { return statusCode < http.StatusOK || statusCode >= http.StatusMultipleChoices } // checkForSessionHeader checks if we have new session id. // This is a hack that intercepts the session id header and then repeats it. // It is very similar to cookie store but only for the special vCenter // session header. func (c *Client) checkForSessionHeader(resp *http.Response) { sessionKey := resp.Header.Get(sessionHeader) if sessionKey != "" { c.Cookie = func() *HeaderElement { return &HeaderElement{Value: sessionKey} } } } // Checks if the payload of an HTTP response has the JSON MIME type. func checkJSONContentType(resp *http.Response) error { contentType := resp.Header.Get("content-type") mediaType, _, err := mime.ParseMediaType(contentType) if err != nil { return fmt.Errorf("error parsing content-type: %v, error %w", contentType, err) } if mediaType != "application/json" { return fmt.Errorf("content-type is not application/json: %v. %w", contentType, errInvalidResponse) } return nil } func (c *Client) getPathForName(this types.ManagedObjectReference, name string) string { const urnPrefix = "urn:" ns := c.Namespace if strings.HasPrefix(ns, urnPrefix) { ns = ns[len(urnPrefix):] } return fmt.Sprintf("%v/%v/%v/%v/%v/%v", c.u, ns, c.Version, this.Type, this.Value, name) } // unpackSOAPRequest converts SOAP request into this value, method nam and // parameters using reflection. The input is a one of the *Body structures // defined in methods.go. It is expected to have "Req" field that is a non-null // pointer to a struct. The struct simple type name is the method name. The // struct "This" member is the this MoRef value. func unpackSOAPRequest(req HasFault) (this types.ManagedObjectReference, method string, params any, err error) { reqBodyPtr := reflect.ValueOf(req) if reqBodyPtr.Kind() != reflect.Ptr { err = fmt.Errorf("Expected pointer to request body as input. %w", errInputError) return } reqBody := reqBodyPtr.Elem() if reqBody.Kind() != reflect.Struct { err = fmt.Errorf("Expected Request body to be structure. %w", errInputError) return } methodRequestPtr := reqBody.FieldByName("Req") if methodRequestPtr.Kind() != reflect.Ptr { err = fmt.Errorf("Expected method request body field to be pointer to struct. %w", errInputError) return } methodRequest := methodRequestPtr.Elem() if methodRequest.Kind() != reflect.Struct { err = fmt.Errorf("Expected method request body to be structure. %w", errInputError) return } thisValue := methodRequest.FieldByName("This") if thisValue.Kind() != reflect.Struct { err = fmt.Errorf("Expected This field in the method request body to be structure. %w", errInputError) return } var ok bool if this, ok = thisValue.Interface().(types.ManagedObjectReference); !ok { err = fmt.Errorf("Expected This field to be MoRef. %w", errInputError) return } method = methodRequest.Type().Name() params = methodRequestPtr.Interface() return } // getSOAPResultPtr extract a pointer to the result data structure using go // reflection from a SOAP data structure used for marshalling. func getSOAPResultPtr(result HasFault) (res any, err error) { resBodyPtr := reflect.ValueOf(result) if resBodyPtr.Kind() != reflect.Ptr { err = fmt.Errorf("Expected pointer to result body as input. %w", errInputError) return } resBody := resBodyPtr.Elem() if resBody.Kind() != reflect.Struct { err = fmt.Errorf("Expected result body to be structure. %w", errInputError) return } methodResponsePtr := resBody.FieldByName("Res") if methodResponsePtr.Kind() != reflect.Ptr { err = fmt.Errorf("Expected method response body field to be pointer to struct. %w", errInputError) return } if methodResponsePtr.IsNil() { methodResponsePtr.Set(reflect.New(methodResponsePtr.Type().Elem())) } methodResponse := methodResponsePtr.Elem() if methodResponse.Kind() != reflect.Struct { err = fmt.Errorf("Expected method response body to be structure. %w", errInputError) return } returnval := methodResponse.FieldByName("Returnval") if !returnval.IsValid() { // void method and we return nil, nil return } res = returnval.Addr().Interface() return }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/soap/soap.go
vendor/github.com/vmware/govmomi/vim25/soap/soap.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package soap import ( "github.com/vmware/govmomi/vim25/types" "github.com/vmware/govmomi/vim25/xml" ) // HeaderElement allows changing the default XMLName (e.g. Cookie's default of vcSessionCookie) type HeaderElement struct { XMLName xml.Name Value string `xml:",chardata"` } // Header includes optional soap Header fields. type Header struct { Action string `xml:"-"` // Action is the 'SOAPAction' HTTP header value. Defaults to "Client.Namespace/Client.Version". Cookie *HeaderElement `xml:"vcSessionCookie,omitempty"` // Cookie is a vCenter session cookie that can be used with other SDK endpoints (e.g. pbm, vslm). ID string `xml:"operationID,omitempty"` // ID is the operationID used by ESX/vCenter logging for correlation. Security any `xml:",omitempty"` // Security is used for SAML token authentication and request signing. } type Envelope struct { XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"` Header *Header `xml:"http://schemas.xmlsoap.org/soap/envelope/ Header,omitempty"` Body any } type Fault struct { XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault"` Code string `xml:"faultcode"` String string `xml:"faultstring"` Detail struct { Fault types.AnyType `xml:",any,typeattr"` } `xml:"detail"` } func (f *Fault) VimFault() types.AnyType { return f.Detail.Fault }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/soap/debug.go
vendor/github.com/vmware/govmomi/vim25/soap/debug.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package soap import ( "fmt" "io" "net/http" "net/http/httputil" "sync/atomic" "github.com/vmware/govmomi/vim25/debug" ) var ( // Trace reads an http request or response from rc and writes to w. // The content type (kind) should be one of "xml" or "json". Trace = func(rc io.ReadCloser, w io.Writer, kind string) io.ReadCloser { return debug.NewTeeReader(rc, w) } ) // debugRoundTrip contains state and logic needed to debug a single round trip. type debugRoundTrip struct { cn uint64 // Client number rn uint64 // Request number cs []io.Closer // Files that need closing when done } func (d *debugRoundTrip) enabled() bool { return d != nil } func (d *debugRoundTrip) done() { for _, c := range d.cs { c.Close() } } func (d *debugRoundTrip) newFile(suffix string) io.WriteCloser { return debug.NewFile(fmt.Sprintf("%d-%04d.%s", d.cn, d.rn, suffix)) } func (d *debugRoundTrip) ext(h http.Header) string { const json = "application/json" ext := "xml" if h.Get("Accept") == json || h.Get("Content-Type") == json { ext = "json" } return ext } func (d *debugRoundTrip) debugRequest(req *http.Request) string { if d == nil { return "" } // Capture headers var wc io.WriteCloser = d.newFile("req.headers") b, _ := httputil.DumpRequest(req, false) wc.Write(b) wc.Close() ext := d.ext(req.Header) // Capture body wc = d.newFile("req." + ext) if req.Body != nil { req.Body = Trace(req.Body, wc, ext) } // Delay closing until marked done d.cs = append(d.cs, wc) return ext } func (d *debugRoundTrip) debugResponse(res *http.Response, ext string) { if d == nil { return } // Capture headers var wc io.WriteCloser = d.newFile("res.headers") b, _ := httputil.DumpResponse(res, false) wc.Write(b) wc.Close() // Capture body wc = d.newFile("res." + ext) res.Body = Trace(res.Body, wc, ext) // Delay closing until marked done d.cs = append(d.cs, wc) } var cn uint64 // Client counter // debugContainer wraps the debugging state for a single client. type debugContainer struct { cn uint64 // Client number rn uint64 // Request counter } func newDebug() *debugContainer { d := debugContainer{ cn: atomic.AddUint64(&cn, 1), rn: 0, } if !debug.Enabled() { return nil } return &d } func (d *debugContainer) newRoundTrip() *debugRoundTrip { if d == nil { return nil } drt := debugRoundTrip{ cn: d.cn, rn: atomic.AddUint64(&d.rn, 1), } return &drt }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/json/fuzz.go
vendor/github.com/vmware/govmomi/vim25/json/fuzz.go
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build gofuzz // +build gofuzz package json import ( "fmt" ) func Fuzz(data []byte) (score int) { for _, ctor := range []func() interface{}{ func() interface{} { return new(interface{}) }, func() interface{} { return new(map[string]interface{}) }, func() interface{} { return new([]interface{}) }, } { v := ctor() err := Unmarshal(data, v) if err != nil { continue } score = 1 m, err := Marshal(v) if err != nil { fmt.Printf("v=%#v\n", v) panic(err) } u := ctor() err = Unmarshal(m, u) if err != nil { fmt.Printf("v=%#v\n", v) fmt.Printf("m=%s\n", m) panic(err) } } return }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/json/indent.go
vendor/github.com/vmware/govmomi/vim25/json/indent.go
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package json import ( "bytes" ) // Compact appends to dst the JSON-encoded src with // insignificant space characters elided. func Compact(dst *bytes.Buffer, src []byte) error { return compact(dst, src, false) } func compact(dst *bytes.Buffer, src []byte, escape bool) error { origLen := dst.Len() scan := newScanner() defer freeScanner(scan) start := 0 for i, c := range src { if escape && (c == '<' || c == '>' || c == '&') { if start < i { dst.Write(src[start:i]) } dst.WriteString(`\u00`) dst.WriteByte(hex[c>>4]) dst.WriteByte(hex[c&0xF]) start = i + 1 } // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9). if escape && c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 { if start < i { dst.Write(src[start:i]) } dst.WriteString(`\u202`) dst.WriteByte(hex[src[i+2]&0xF]) start = i + 3 } v := scan.step(scan, c) if v >= scanSkipSpace { if v == scanError { break } if start < i { dst.Write(src[start:i]) } start = i + 1 } } if scan.eof() == scanError { dst.Truncate(origLen) return scan.err } if start < len(src) { dst.Write(src[start:]) } return nil } func newline(dst *bytes.Buffer, prefix, indent string, depth int) { dst.WriteByte('\n') dst.WriteString(prefix) for i := 0; i < depth; i++ { dst.WriteString(indent) } } // Indent appends to dst an indented form of the JSON-encoded src. // Each element in a JSON object or array begins on a new, // indented line beginning with prefix followed by one or more // copies of indent according to the indentation nesting. // The data appended to dst does not begin with the prefix nor // any indentation, to make it easier to embed inside other formatted JSON data. // Although leading space characters (space, tab, carriage return, newline) // at the beginning of src are dropped, trailing space characters // at the end of src are preserved and copied to dst. // For example, if src has no trailing spaces, neither will dst; // if src ends in a trailing newline, so will dst. func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error { origLen := dst.Len() scan := newScanner() defer freeScanner(scan) needIndent := false depth := 0 for _, c := range src { scan.bytes++ v := scan.step(scan, c) if v == scanSkipSpace { continue } if v == scanError { break } if needIndent && v != scanEndObject && v != scanEndArray { needIndent = false depth++ newline(dst, prefix, indent, depth) } // Emit semantically uninteresting bytes // (in particular, punctuation in strings) unmodified. if v == scanContinue { dst.WriteByte(c) continue } // Add spacing around real punctuation. switch c { case '{', '[': // delay indent so that empty object and array are formatted as {} and []. needIndent = true dst.WriteByte(c) case ',': dst.WriteByte(c) newline(dst, prefix, indent, depth) case ':': dst.WriteByte(c) dst.WriteByte(' ') case '}', ']': if needIndent { // suppress indent in empty object/array needIndent = false } else { depth-- newline(dst, prefix, indent, depth) } dst.WriteByte(c) default: dst.WriteByte(c) } } if scan.eof() == scanError { dst.Truncate(origLen) return scan.err } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/json/fold.go
vendor/github.com/vmware/govmomi/vim25/json/fold.go
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package json import ( "bytes" "unicode/utf8" ) const ( caseMask = ^byte(0x20) // Mask to ignore case in ASCII. kelvin = '\u212a' smallLongEss = '\u017f' ) // foldFunc returns one of four different case folding equivalence // functions, from most general (and slow) to fastest: // // 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 // 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') // 3) asciiEqualFold, no special, but includes non-letters (including _) // 4) simpleLetterEqualFold, no specials, no non-letters. // // The letters S and K are special because they map to 3 runes, not just 2: // * S maps to s and to U+017F 'ſ' Latin small letter long s // * k maps to K and to U+212A 'K' Kelvin sign // See https://play.golang.org/p/tTxjOc0OGo // // The returned function is specialized for matching against s and // should only be given s. It's not curried for performance reasons. func foldFunc(s []byte) func(s, t []byte) bool { nonLetter := false special := false // special letter for _, b := range s { if b >= utf8.RuneSelf { return bytes.EqualFold } upper := b & caseMask if upper < 'A' || upper > 'Z' { nonLetter = true } else if upper == 'K' || upper == 'S' { // See above for why these letters are special. special = true } } if special { return equalFoldRight } if nonLetter { return asciiEqualFold } return simpleLetterEqualFold } // equalFoldRight is a specialization of bytes.EqualFold when s is // known to be all ASCII (including punctuation), but contains an 's', // 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. // See comments on foldFunc. func equalFoldRight(s, t []byte) bool { for _, sb := range s { if len(t) == 0 { return false } tb := t[0] if tb < utf8.RuneSelf { if sb != tb { sbUpper := sb & caseMask if 'A' <= sbUpper && sbUpper <= 'Z' { if sbUpper != tb&caseMask { return false } } else { return false } } t = t[1:] continue } // sb is ASCII and t is not. t must be either kelvin // sign or long s; sb must be s, S, k, or K. tr, size := utf8.DecodeRune(t) switch sb { case 's', 'S': if tr != smallLongEss { return false } case 'k', 'K': if tr != kelvin { return false } default: return false } t = t[size:] } if len(t) > 0 { return false } return true } // asciiEqualFold is a specialization of bytes.EqualFold for use when // s is all ASCII (but may contain non-letters) and contains no // special-folding letters. // See comments on foldFunc. func asciiEqualFold(s, t []byte) bool { if len(s) != len(t) { return false } for i, sb := range s { tb := t[i] if sb == tb { continue } if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { if sb&caseMask != tb&caseMask { return false } } else { return false } } return true } // simpleLetterEqualFold is a specialization of bytes.EqualFold for // use when s is all ASCII letters (no underscores, etc) and also // doesn't contain 'k', 'K', 's', or 'S'. // See comments on foldFunc. func simpleLetterEqualFold(s, t []byte) bool { if len(s) != len(t) { return false } for i, b := range s { if b&caseMask != t[i]&caseMask { return false } } return true }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/json/stream.go
vendor/github.com/vmware/govmomi/vim25/json/stream.go
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package json import ( "bytes" "errors" "io" ) // A Decoder reads and decodes JSON values from an input stream. type Decoder struct { r io.Reader buf []byte d decodeState scanp int // start of unread data in buf scanned int64 // amount of data already scanned scan scanner err error tokenState int tokenStack []int } // NewDecoder returns a new decoder that reads from r. // // The decoder introduces its own buffering and may // read data from r beyond the JSON values requested. func NewDecoder(r io.Reader) *Decoder { return &Decoder{r: r} } // UseNumber causes the Decoder to unmarshal a number into an interface{} as a // Number instead of as a float64. func (dec *Decoder) UseNumber() { dec.d.useNumber = true } // DisallowUnknownFields causes the Decoder to return an error when the destination // is a struct and the input contains object keys which do not match any // non-ignored, exported fields in the destination. func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true } // SetDiscriminator tells the decoder to check if JSON objects include a // discriminator that specifies the Go type into which the object should be // decoded. // Map and struct values are encoded as JSON objects as normal, but with an // additional field (typeFieldName) that specifies the object's Go type. // All other values are encoded inside an outer JSON object with a field // (typeFieldName) that specifies the value's Go type and a field // (valueFieldName) that specifies the actual value. // An optional typeFn may be provided to enable looking up custom types based // on type name strings. Built-in types are handled automatically and will be // ignored if they are returned by the typeFn. // Calling SetDiscriminator("", "", nil) disables the discriminator. func (dec *Decoder) SetDiscriminator(typeFieldName, valueFieldName string, typeFn DiscriminatorToTypeFunc) { dec.d.discriminatorTypeFieldName = typeFieldName dec.d.discriminatorValueFieldName = valueFieldName dec.d.discriminatorToTypeFn = typeFn } // Decode reads the next JSON-encoded value from its // input and stores it in the value pointed to by v. // // See the documentation for Unmarshal for details about // the conversion of JSON into a Go value. func (dec *Decoder) Decode(v interface{}) error { if dec.err != nil { return dec.err } if err := dec.tokenPrepareForDecode(); err != nil { return err } if !dec.tokenValueAllowed() { return &SyntaxError{msg: "not at beginning of value", Offset: dec.InputOffset()} } // Read whole value into buffer. n, err := dec.readValue() if err != nil { return err } dec.d.init(dec.buf[dec.scanp : dec.scanp+n]) dec.scanp += n // Don't save err from unmarshal into dec.err: // the connection is still usable since we read a complete JSON // object from it before the error happened. err = dec.d.unmarshal(v) // fixup token streaming state dec.tokenValueEnd() return err } // Buffered returns a reader of the data remaining in the Decoder's // buffer. The reader is valid until the next call to Decode. func (dec *Decoder) Buffered() io.Reader { return bytes.NewReader(dec.buf[dec.scanp:]) } // readValue reads a JSON value into dec.buf. // It returns the length of the encoding. func (dec *Decoder) readValue() (int, error) { dec.scan.reset() scanp := dec.scanp var err error Input: // help the compiler see that scanp is never negative, so it can remove // some bounds checks below. for scanp >= 0 { // Look in the buffer for a new value. for ; scanp < len(dec.buf); scanp++ { c := dec.buf[scanp] dec.scan.bytes++ switch dec.scan.step(&dec.scan, c) { case scanEnd: // scanEnd is delayed one byte so we decrement // the scanner bytes count by 1 to ensure that // this value is correct in the next call of Decode. dec.scan.bytes-- break Input case scanEndObject, scanEndArray: // scanEnd is delayed one byte. // We might block trying to get that byte from src, // so instead invent a space byte. if stateEndValue(&dec.scan, ' ') == scanEnd { scanp++ break Input } case scanError: dec.err = dec.scan.err return 0, dec.scan.err } } // Did the last read have an error? // Delayed until now to allow buffer scan. if err != nil { if err == io.EOF { if dec.scan.step(&dec.scan, ' ') == scanEnd { break Input } if nonSpace(dec.buf) { err = io.ErrUnexpectedEOF } } dec.err = err return 0, err } n := scanp - dec.scanp err = dec.refill() scanp = dec.scanp + n } return scanp - dec.scanp, nil } func (dec *Decoder) refill() error { // Make room to read more into the buffer. // First slide down data already consumed. if dec.scanp > 0 { dec.scanned += int64(dec.scanp) n := copy(dec.buf, dec.buf[dec.scanp:]) dec.buf = dec.buf[:n] dec.scanp = 0 } // Grow buffer if not large enough. const minRead = 512 if cap(dec.buf)-len(dec.buf) < minRead { newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead) copy(newBuf, dec.buf) dec.buf = newBuf } // Read. Delay error for next iteration (after scan). n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)]) dec.buf = dec.buf[0 : len(dec.buf)+n] return err } func nonSpace(b []byte) bool { for _, c := range b { if !isSpace(c) { return true } } return false } // An Encoder writes JSON values to an output stream. type Encoder struct { w io.Writer err error escapeHTML bool indentBuf *bytes.Buffer indentPrefix string indentValue string discriminatorTypeFieldName string discriminatorValueFieldName string discriminatorEncodeMode DiscriminatorEncodeMode typeToDiscriminatorFn TypeToDiscriminatorFunc } // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{w: w, escapeHTML: true} } // Encode writes the JSON encoding of v to the stream, // followed by a newline character. // // See the documentation for Marshal for details about the // conversion of Go values to JSON. func (enc *Encoder) Encode(v interface{}) error { if enc.err != nil { return enc.err } e := newEncodeState() err := e.marshal(v, encOpts{ escapeHTML: enc.escapeHTML, discriminatorTypeFieldName: enc.discriminatorTypeFieldName, discriminatorValueFieldName: enc.discriminatorValueFieldName, discriminatorEncodeMode: enc.discriminatorEncodeMode, discriminatorValueFn: enc.typeToDiscriminatorFn, }) if err != nil { return err } // Terminate each value with a newline. // This makes the output look a little nicer // when debugging, and some kind of space // is required if the encoded value was a number, // so that the reader knows there aren't more // digits coming. e.WriteByte('\n') b := e.Bytes() if enc.indentPrefix != "" || enc.indentValue != "" { if enc.indentBuf == nil { enc.indentBuf = new(bytes.Buffer) } enc.indentBuf.Reset() err = Indent(enc.indentBuf, b, enc.indentPrefix, enc.indentValue) if err != nil { return err } b = enc.indentBuf.Bytes() } if _, err = enc.w.Write(b); err != nil { enc.err = err } encodeStatePool.Put(e) return err } // SetIndent instructs the encoder to format each subsequent encoded // value as if indented by the package-level function Indent(dst, src, prefix, indent). // Calling SetIndent("", "") disables indentation. func (enc *Encoder) SetIndent(prefix, indent string) { enc.indentPrefix = prefix enc.indentValue = indent } // SetEscapeHTML specifies whether problematic HTML characters // should be escaped inside JSON quoted strings. // The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e // to avoid certain safety problems that can arise when embedding JSON in HTML. // // In non-HTML settings where the escaping interferes with the readability // of the output, SetEscapeHTML(false) disables this behavior. func (enc *Encoder) SetEscapeHTML(on bool) { enc.escapeHTML = on } // SetDiscriminator specifies that a value stored in an interface should be // encoded with information about the value's Go type. // Map and struct values are encoded as JSON objects as normal, but with an // additional field (typeFieldName) that specifies the object's Go type. // All other values are encoded inside an outer JSON object with a field // (typeFieldName) that specifies the value's Go type and a field // (valueFieldName) that specifies the actual value. // A mask (mode) is available to control the encoder's behavior. // Calling SetDiscriminator("", "", 0) disables the discriminator. func (enc *Encoder) SetDiscriminator(typeFieldName, valueFieldName string, mode DiscriminatorEncodeMode) { enc.discriminatorTypeFieldName = typeFieldName enc.discriminatorValueFieldName = valueFieldName enc.discriminatorEncodeMode = mode enc.typeToDiscriminatorFn = DefaultDiscriminatorFunc } // SetTypeToDiscriminatorFunc allows for customizing the discriminator value for // different types. This may be useful if the golang struct names do not match // the desired values. One example would be if discriminator values in a // protocol require special characters or start with lowercase letter. The // TypeToDiscriminatorFunc implementation may return empty string to suppress // the rendering of discriminator for specific type(s). func (enc *Encoder) SetTypeToDiscriminatorFunc(f TypeToDiscriminatorFunc) { if f == nil { enc.typeToDiscriminatorFn = DefaultDiscriminatorFunc return } enc.typeToDiscriminatorFn = f } // RawMessage is a raw encoded JSON value. // It implements Marshaler and Unmarshaler and can // be used to delay JSON decoding or precompute a JSON encoding. type RawMessage []byte // MarshalJSON returns m as the JSON encoding of m. func (m RawMessage) MarshalJSON() ([]byte, error) { if m == nil { return []byte("null"), nil } return m, nil } // UnmarshalJSON sets *m to a copy of data. func (m *RawMessage) UnmarshalJSON(data []byte) error { if m == nil { return errors.New("json.RawMessage: UnmarshalJSON on nil pointer") } *m = append((*m)[0:0], data...) return nil } var _ Marshaler = (*RawMessage)(nil) var _ Unmarshaler = (*RawMessage)(nil) // A Token holds a value of one of these types: // // Delim, for the four JSON delimiters [ ] { } // bool, for JSON booleans // float64, for JSON numbers // Number, for JSON numbers // string, for JSON string literals // nil, for JSON null type Token interface{} const ( tokenTopValue = iota tokenArrayStart tokenArrayValue tokenArrayComma tokenObjectStart tokenObjectKey tokenObjectColon tokenObjectValue tokenObjectComma ) // advance tokenstate from a separator state to a value state func (dec *Decoder) tokenPrepareForDecode() error { // Note: Not calling peek before switch, to avoid // putting peek into the standard Decode path. // peek is only called when using the Token API. switch dec.tokenState { case tokenArrayComma: c, err := dec.peek() if err != nil { return err } if c != ',' { return &SyntaxError{"expected comma after array element", dec.InputOffset()} } dec.scanp++ dec.tokenState = tokenArrayValue case tokenObjectColon: c, err := dec.peek() if err != nil { return err } if c != ':' { return &SyntaxError{"expected colon after object key", dec.InputOffset()} } dec.scanp++ dec.tokenState = tokenObjectValue } return nil } func (dec *Decoder) tokenValueAllowed() bool { switch dec.tokenState { case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue: return true } return false } func (dec *Decoder) tokenValueEnd() { switch dec.tokenState { case tokenArrayStart, tokenArrayValue: dec.tokenState = tokenArrayComma case tokenObjectValue: dec.tokenState = tokenObjectComma } } // A Delim is a JSON array or object delimiter, one of [ ] { or }. type Delim rune func (d Delim) String() string { return string(d) } // Token returns the next JSON token in the input stream. // At the end of the input stream, Token returns nil, io.EOF. // // Token guarantees that the delimiters [ ] { } it returns are // properly nested and matched: if Token encounters an unexpected // delimiter in the input, it will return an error. // // The input stream consists of basic JSON values—bool, string, // number, and null—along with delimiters [ ] { } of type Delim // to mark the start and end of arrays and objects. // Commas and colons are elided. func (dec *Decoder) Token() (Token, error) { for { c, err := dec.peek() if err != nil { return nil, err } switch c { case '[': if !dec.tokenValueAllowed() { return dec.tokenError(c) } dec.scanp++ dec.tokenStack = append(dec.tokenStack, dec.tokenState) dec.tokenState = tokenArrayStart return Delim('['), nil case ']': if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma { return dec.tokenError(c) } dec.scanp++ dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1] dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1] dec.tokenValueEnd() return Delim(']'), nil case '{': if !dec.tokenValueAllowed() { return dec.tokenError(c) } dec.scanp++ dec.tokenStack = append(dec.tokenStack, dec.tokenState) dec.tokenState = tokenObjectStart return Delim('{'), nil case '}': if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma { return dec.tokenError(c) } dec.scanp++ dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1] dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1] dec.tokenValueEnd() return Delim('}'), nil case ':': if dec.tokenState != tokenObjectColon { return dec.tokenError(c) } dec.scanp++ dec.tokenState = tokenObjectValue continue case ',': if dec.tokenState == tokenArrayComma { dec.scanp++ dec.tokenState = tokenArrayValue continue } if dec.tokenState == tokenObjectComma { dec.scanp++ dec.tokenState = tokenObjectKey continue } return dec.tokenError(c) case '"': if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey { var x string old := dec.tokenState dec.tokenState = tokenTopValue err := dec.Decode(&x) dec.tokenState = old if err != nil { return nil, err } dec.tokenState = tokenObjectColon return x, nil } fallthrough default: if !dec.tokenValueAllowed() { return dec.tokenError(c) } var x interface{} if err := dec.Decode(&x); err != nil { return nil, err } return x, nil } } } func (dec *Decoder) tokenError(c byte) (Token, error) { var context string switch dec.tokenState { case tokenTopValue: context = " looking for beginning of value" case tokenArrayStart, tokenArrayValue, tokenObjectValue: context = " looking for beginning of value" case tokenArrayComma: context = " after array element" case tokenObjectKey: context = " looking for beginning of object key string" case tokenObjectColon: context = " after object key" case tokenObjectComma: context = " after object key:value pair" } return nil, &SyntaxError{"invalid character " + quoteChar(c) + context, dec.InputOffset()} } // More reports whether there is another element in the // current array or object being parsed. func (dec *Decoder) More() bool { c, err := dec.peek() return err == nil && c != ']' && c != '}' } func (dec *Decoder) peek() (byte, error) { var err error for { for i := dec.scanp; i < len(dec.buf); i++ { c := dec.buf[i] if isSpace(c) { continue } dec.scanp = i return c, nil } // buffer has been scanned, now report any error if err != nil { return 0, err } err = dec.refill() } } // InputOffset returns the input stream byte offset of the current decoder position. // The offset gives the location of the end of the most recently returned token // and the beginning of the next token. func (dec *Decoder) InputOffset() int64 { return dec.scanned + int64(dec.scanp) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/json/tags.go
vendor/github.com/vmware/govmomi/vim25/json/tags.go
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package json import ( "strings" ) // tagOptions is the string following a comma in a struct field's "json" // tag, or the empty string. It does not include the leading comma. type tagOptions string // parseTag splits a struct field's json tag into its name and // comma-separated options. func parseTag(tag string) (string, tagOptions) { if idx := strings.Index(tag, ","); idx != -1 { return tag[:idx], tagOptions(tag[idx+1:]) } return tag, tagOptions("") } // Contains reports whether a comma-separated list of options // contains a particular substr flag. substr must be surrounded by a // string boundary or commas. func (o tagOptions) Contains(optionName string) bool { if len(o) == 0 { return false } s := string(o) for s != "" { var next string i := strings.Index(s, ",") if i >= 0 { s, next = s[:i], s[i+1:] } if s == optionName { return true } s = next } return false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/json/encode.go
vendor/github.com/vmware/govmomi/vim25/json/encode.go
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package json implements encoding and decoding of JSON as defined in // RFC 7159. The mapping between JSON and Go values is described // in the documentation for the Marshal and Unmarshal functions. // // See "JSON and Go" for an introduction to this package: // https://golang.org/doc/articles/json_and_go.html package json import ( "bytes" "encoding" "encoding/base64" "fmt" "math" "reflect" "sort" "strconv" "strings" "sync" "unicode" "unicode/utf8" ) // Marshal returns the JSON encoding of v. // // Marshal traverses the value v recursively. // If an encountered value implements the Marshaler interface // and is not a nil pointer, Marshal calls its MarshalJSON method // to produce JSON. If no MarshalJSON method is present but the // value implements encoding.TextMarshaler instead, Marshal calls // its MarshalText method and encodes the result as a JSON string. // The nil pointer exception is not strictly necessary // but mimics a similar, necessary exception in the behavior of // UnmarshalJSON. // // Otherwise, Marshal uses the following type-dependent default encodings: // // Boolean values encode as JSON booleans. // // Floating point, integer, and Number values encode as JSON numbers. // // String values encode as JSON strings coerced to valid UTF-8, // replacing invalid bytes with the Unicode replacement rune. // So that the JSON will be safe to embed inside HTML <script> tags, // the string is encoded using HTMLEscape, // which replaces "<", ">", "&", U+2028, and U+2029 are escaped // to "\u003c","\u003e", "\u0026", "\u2028", and "\u2029". // This replacement can be disabled when using an Encoder, // by calling SetEscapeHTML(false). // // Array and slice values encode as JSON arrays, except that // []byte encodes as a base64-encoded string, and a nil slice // encodes as the null JSON value. // // Struct values encode as JSON objects. // Each exported struct field becomes a member of the object, using the // field name as the object key, unless the field is omitted for one of the // reasons given below. // // The encoding of each struct field can be customized by the format string // stored under the "json" key in the struct field's tag. // The format string gives the name of the field, possibly followed by a // comma-separated list of options. The name may be empty in order to // specify options without overriding the default field name. // // The "omitempty" option specifies that the field should be omitted // from the encoding if the field has an empty value, defined as // false, 0, a nil pointer, a nil interface value, and any empty array, // slice, map, or string. // // As a special case, if the field tag is "-", the field is always omitted. // Note that a field with name "-" can still be generated using the tag "-,". // // Examples of struct field tags and their meanings: // // // Field appears in JSON as key "myName". // Field int `json:"myName"` // // // Field appears in JSON as key "myName" and // // the field is omitted from the object if its value is empty, // // as defined above. // Field int `json:"myName,omitempty"` // // // Field appears in JSON as key "Field" (the default), but // // the field is skipped if empty. // // Note the leading comma. // Field int `json:",omitempty"` // // // Field is ignored by this package. // Field int `json:"-"` // // // Field appears in JSON as key "-". // Field int `json:"-,"` // // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used // when communicating with JavaScript programs: // // Int64String int64 `json:",string"` // // The key name will be used if it's a non-empty string consisting of // only Unicode letters, digits, and ASCII punctuation except quotation // marks, backslash, and comma. // // Anonymous struct fields are usually marshaled as if their inner exported fields // were fields in the outer struct, subject to the usual Go visibility rules amended // as described in the next paragraph. // An anonymous struct field with a name given in its JSON tag is treated as // having that name, rather than being anonymous. // An anonymous struct field of interface type is treated the same as having // that type as its name, rather than being anonymous. // // The Go visibility rules for struct fields are amended for JSON when // deciding which field to marshal or unmarshal. If there are // multiple fields at the same level, and that level is the least // nested (and would therefore be the nesting level selected by the // usual Go rules), the following extra rules apply: // // 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, // even if there are multiple untagged fields that would otherwise conflict. // // 2) If there is exactly one field (tagged or not according to the first rule), that is selected. // // 3) Otherwise there are multiple fields, and all are ignored; no error occurs. // // Handling of anonymous struct fields is new in Go 1.1. // Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of // an anonymous struct field in both current and earlier versions, give the field // a JSON tag of "-". // // Map values encode as JSON objects. The map's key type must either be a // string, an integer type, or implement encoding.TextMarshaler. The map keys // are sorted and used as JSON object keys by applying the following rules, // subject to the UTF-8 coercion described for string values above: // - keys of any string type are used directly // - encoding.TextMarshalers are marshaled // - integer keys are converted to strings // // Pointer values encode as the value pointed to. // A nil pointer encodes as the null JSON value. // // Interface values encode as the value contained in the interface. // A nil interface value encodes as the null JSON value. // // Channel, complex, and function values cannot be encoded in JSON. // Attempting to encode such a value causes Marshal to return // an UnsupportedTypeError. // // JSON cannot represent cyclic data structures and Marshal does not // handle them. Passing cyclic structures to Marshal will result in // an error. func Marshal(v interface{}) ([]byte, error) { e := newEncodeState() err := e.marshal(v, encOpts{escapeHTML: true}) if err != nil { return nil, err } buf := append([]byte(nil), e.Bytes()...) encodeStatePool.Put(e) return buf, nil } // MarshalIndent is like Marshal but applies Indent to format the output. // Each JSON element in the output will begin on a new line beginning with prefix // followed by one or more copies of indent according to the indentation nesting. func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { b, err := Marshal(v) if err != nil { return nil, err } var buf bytes.Buffer err = Indent(&buf, b, prefix, indent) if err != nil { return nil, err } return buf.Bytes(), nil } // HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 // characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 // so that the JSON will be safe to embed inside HTML <script> tags. // For historical reasons, web browsers don't honor standard HTML // escaping within <script> tags, so an alternative JSON encoding must // be used. func HTMLEscape(dst *bytes.Buffer, src []byte) { // The characters can only appear in string literals, // so just scan the string one byte at a time. start := 0 for i, c := range src { if c == '<' || c == '>' || c == '&' { if start < i { dst.Write(src[start:i]) } dst.WriteString(`\u00`) dst.WriteByte(hex[c>>4]) dst.WriteByte(hex[c&0xF]) start = i + 1 } // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9). if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 { if start < i { dst.Write(src[start:i]) } dst.WriteString(`\u202`) dst.WriteByte(hex[src[i+2]&0xF]) start = i + 3 } } if start < len(src) { dst.Write(src[start:]) } } // Marshaler is the interface implemented by types that // can marshal themselves into valid JSON. type Marshaler interface { MarshalJSON() ([]byte, error) } // An UnsupportedTypeError is returned by Marshal when attempting // to encode an unsupported value type. type UnsupportedTypeError struct { Type reflect.Type } func (e *UnsupportedTypeError) Error() string { return "json: unsupported type: " + e.Type.String() } // An UnsupportedValueError is returned by Marshal when attempting // to encode an unsupported value. type UnsupportedValueError struct { Value reflect.Value Str string } func (e *UnsupportedValueError) Error() string { return "json: unsupported value: " + e.Str } // Before Go 1.2, an InvalidUTF8Error was returned by Marshal when // attempting to encode a string value with invalid UTF-8 sequences. // As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by // replacing invalid bytes with the Unicode replacement rune U+FFFD. // // Deprecated: No longer used; kept for compatibility. type InvalidUTF8Error struct { S string // the whole string value that caused the error } func (e *InvalidUTF8Error) Error() string { return "json: invalid UTF-8 in string: " + strconv.Quote(e.S) } // A MarshalerError represents an error from calling a MarshalJSON or MarshalText method. type MarshalerError struct { Type reflect.Type Err error sourceFunc string } func (e *MarshalerError) Error() string { srcFunc := e.sourceFunc if srcFunc == "" { srcFunc = "MarshalJSON" } return "json: error calling " + srcFunc + " for type " + e.Type.String() + ": " + e.Err.Error() } // Unwrap returns the underlying error. func (e *MarshalerError) Unwrap() error { return e.Err } var hex = "0123456789abcdef" // An encodeState encodes JSON into a bytes.Buffer. type encodeState struct { bytes.Buffer // accumulated output scratch [64]byte // Keep track of what pointers we've seen in the current recursive call // path, to avoid cycles that could lead to a stack overflow. Only do // the relatively expensive map operations if ptrLevel is larger than // startDetectingCyclesAfter, so that we skip the work if we're within a // reasonable amount of nested pointers deep. ptrLevel uint ptrSeen map[interface{}]struct{} // discriminatorEncodeTypeName is set to true when the type name should // be encoded along with a map or struct value. The flag is flipped back // to false as soon as the type name is encoded to prevent impacting // subsequent values. discriminatorEncodeTypeName bool } const startDetectingCyclesAfter = 1000 var encodeStatePool sync.Pool func newEncodeState() *encodeState { if v := encodeStatePool.Get(); v != nil { e := v.(*encodeState) e.Reset() if len(e.ptrSeen) > 0 { panic("ptrEncoder.encode should have emptied ptrSeen via defers") } e.ptrLevel = 0 e.discriminatorEncodeTypeName = false return e } return &encodeState{ptrSeen: make(map[interface{}]struct{})} } // jsonError is an error wrapper type for internal use only. // Panics with errors are wrapped in jsonError so that the top-level recover // can distinguish intentional panics from this package. type jsonError struct{ error } var interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() func (e *encodeState) marshal(v interface{}, opts encOpts) (err error) { defer func() { if r := recover(); r != nil { if je, ok := r.(jsonError); ok { err = je.error } else { panic(r) } } }() val := reflect.ValueOf(v) if val.IsValid() && opts.isDiscriminatorSet() && opts.discriminatorEncodeMode.root() { val = val.Convert(interfaceType) } e.reflectValue(val, opts) return nil } // error aborts the encoding by panicking with err wrapped in jsonError. func (e *encodeState) error(err error) { panic(jsonError{err}) } func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 case reflect.Bool: return !v.Bool() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 case reflect.Interface, reflect.Ptr: return v.IsNil() } return false } func (e *encodeState) reflectValue(v reflect.Value, opts encOpts) { valueEncoder(v)(e, v, opts) } type encOpts struct { // quoted causes primitive fields to be encoded inside JSON strings. quoted bool // escapeHTML causes '<', '>', and '&' to be escaped in JSON strings. escapeHTML bool // see Encoder.SetDiscriminator discriminatorTypeFieldName string // see Encoder.SetDiscriminator discriminatorValueFieldName string // see Encoder.SetDiscriminator discriminatorValueFn TypeToDiscriminatorFunc // see Encoder.SetDiscriminator discriminatorEncodeMode DiscriminatorEncodeMode } type encoderFunc func(e *encodeState, v reflect.Value, opts encOpts) var encoderCache sync.Map // map[reflect.Type]encoderFunc func valueEncoder(v reflect.Value) encoderFunc { if !v.IsValid() { return invalidValueEncoder } return typeEncoder(v.Type()) } func typeEncoder(t reflect.Type) encoderFunc { if fi, ok := encoderCache.Load(t); ok { return fi.(encoderFunc) } // To deal with recursive types, populate the map with an // indirect func before we build it. This type waits on the // real func (f) to be ready and then calls it. This indirect // func is only used for recursive types. var ( wg sync.WaitGroup f encoderFunc ) wg.Add(1) fi, loaded := encoderCache.LoadOrStore(t, encoderFunc(func(e *encodeState, v reflect.Value, opts encOpts) { wg.Wait() f(e, v, opts) })) if loaded { return fi.(encoderFunc) } // Compute the real encoder and replace the indirect func with it. f = newTypeEncoder(t, true) wg.Done() encoderCache.Store(t, f) return f } var ( marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() ) // newTypeEncoder constructs an encoderFunc for a type. // The returned encoder only checks CanAddr when allowAddr is true. func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc { // If we have a non-pointer value whose type implements // Marshaler with a value receiver, then we're better off taking // the address of the value - otherwise we end up with an // allocation as we cast the value to an interface. if t.Kind() != reflect.Ptr && allowAddr && reflect.PtrTo(t).Implements(marshalerType) { return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false)) } if t.Implements(marshalerType) { return marshalerEncoder } if t.Kind() != reflect.Ptr && allowAddr && reflect.PtrTo(t).Implements(textMarshalerType) { return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false)) } if t.Implements(textMarshalerType) { return textMarshalerEncoder } switch t.Kind() { case reflect.Bool: return boolEncoder case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return intEncoder case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return uintEncoder case reflect.Float32: return float32Encoder case reflect.Float64: return float64Encoder case reflect.String: return stringEncoder case reflect.Interface: return interfaceEncoder case reflect.Struct: return newStructEncoder(t) case reflect.Map: return newMapEncoder(t) case reflect.Slice: return newSliceEncoder(t) case reflect.Array: return newArrayEncoder(t) case reflect.Ptr: return newPtrEncoder(t) default: return unsupportedTypeEncoder } } func invalidValueEncoder(e *encodeState, v reflect.Value, _ encOpts) { e.WriteString("null") } func marshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) { if v.Kind() == reflect.Ptr && v.IsNil() { e.WriteString("null") return } m, ok := v.Interface().(Marshaler) if !ok { e.WriteString("null") return } b, err := m.MarshalJSON() if err == nil { // copy JSON into buffer, checking validity. err = compact(&e.Buffer, b, opts.escapeHTML) } if err != nil { e.error(&MarshalerError{v.Type(), err, "MarshalJSON"}) } } func addrMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) { va := v.Addr() if va.IsNil() { e.WriteString("null") return } m := va.Interface().(Marshaler) b, err := m.MarshalJSON() if err == nil { // copy JSON into buffer, checking validity. err = compact(&e.Buffer, b, opts.escapeHTML) } if err != nil { e.error(&MarshalerError{v.Type(), err, "MarshalJSON"}) } } func textMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) { if v.Kind() == reflect.Ptr && v.IsNil() { e.WriteString("null") return } m, ok := v.Interface().(encoding.TextMarshaler) if !ok { e.WriteString("null") return } b, err := m.MarshalText() if err != nil { e.error(&MarshalerError{v.Type(), err, "MarshalText"}) } e.stringBytes(b, opts.escapeHTML) } func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) { va := v.Addr() if va.IsNil() { e.WriteString("null") return } m := va.Interface().(encoding.TextMarshaler) b, err := m.MarshalText() if err != nil { e.error(&MarshalerError{v.Type(), err, "MarshalText"}) } e.stringBytes(b, opts.escapeHTML) } func boolEncoder(e *encodeState, v reflect.Value, opts encOpts) { if opts.quoted { e.WriteByte('"') } if v.Bool() { e.WriteString("true") } else { e.WriteString("false") } if opts.quoted { e.WriteByte('"') } } func intEncoder(e *encodeState, v reflect.Value, opts encOpts) { b := strconv.AppendInt(e.scratch[:0], v.Int(), 10) if opts.quoted { e.WriteByte('"') } e.Write(b) if opts.quoted { e.WriteByte('"') } } func uintEncoder(e *encodeState, v reflect.Value, opts encOpts) { b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10) if opts.quoted { e.WriteByte('"') } e.Write(b) if opts.quoted { e.WriteByte('"') } } type floatEncoder int // number of bits func (bits floatEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { f := v.Float() if math.IsInf(f, 0) || math.IsNaN(f) { e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))}) } // Convert as if by ES6 number to string conversion. // This matches most other JSON generators. // See golang.org/issue/6384 and golang.org/issue/14135. // Like fmt %g, but the exponent cutoffs are different // and exponents themselves are not padded to two digits. b := e.scratch[:0] abs := math.Abs(f) fmt := byte('f') // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. if abs != 0 { if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { fmt = 'e' } } b = strconv.AppendFloat(b, f, fmt, -1, int(bits)) if fmt == 'e' { // clean up e-09 to e-9 n := len(b) if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' { b[n-2] = b[n-1] b = b[:n-1] } } if opts.quoted { e.WriteByte('"') } e.Write(b) if opts.quoted { e.WriteByte('"') } } var ( float32Encoder = (floatEncoder(32)).encode float64Encoder = (floatEncoder(64)).encode ) func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) { if v.Type() == numberType { numStr := v.String() // In Go1.5 the empty string encodes to "0", while this is not a valid number literal // we keep compatibility so check validity after this. if numStr == "" { numStr = "0" // Number's zero-val } if !isValidNumber(numStr) { e.error(fmt.Errorf("json: invalid number literal %q", numStr)) } if opts.quoted { e.WriteByte('"') } e.WriteString(numStr) if opts.quoted { e.WriteByte('"') } return } if opts.quoted { e2 := newEncodeState() // Since we encode the string twice, we only need to escape HTML // the first time. e2.string(v.String(), opts.escapeHTML) e.stringBytes(e2.Bytes(), false) encodeStatePool.Put(e2) } else { e.string(v.String(), opts.escapeHTML) } } // isValidNumber reports whether s is a valid JSON number literal. func isValidNumber(s string) bool { // This function implements the JSON numbers grammar. // See https://tools.ietf.org/html/rfc7159#section-6 // and https://www.json.org/img/number.png if s == "" { return false } // Optional - if s[0] == '-' { s = s[1:] if s == "" { return false } } // Digits switch { default: return false case s[0] == '0': s = s[1:] case '1' <= s[0] && s[0] <= '9': s = s[1:] for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { s = s[1:] } } // . followed by 1 or more digits. if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { s = s[2:] for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { s = s[1:] } } // e or E followed by an optional - or + and // 1 or more digits. if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { s = s[1:] if s[0] == '+' || s[0] == '-' { s = s[1:] if s == "" { return false } } for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { s = s[1:] } } // Make sure we are at the end. return s == "" } func interfaceEncoder(e *encodeState, v reflect.Value, opts encOpts) { if v.IsNil() { e.WriteString("null") return } if opts.isDiscriminatorSet() { discriminatorInterfaceEncode(e, v, opts) return } e.reflectValue(v.Elem(), opts) } func unsupportedTypeEncoder(e *encodeState, v reflect.Value, _ encOpts) { e.error(&UnsupportedTypeError{v.Type()}) } type structEncoder struct { fields structFields } type structFields struct { list []field nameIndex map[string]int } func (se structEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { next := byte('{') if opts.isDiscriminatorSet() { next = discriminatorStructEncode(e, v, opts) } FieldLoop: for i := range se.fields.list { f := &se.fields.list[i] // Find the nested struct field by following f.index. fv := v for _, i := range f.index { if fv.Kind() == reflect.Ptr { if fv.IsNil() { continue FieldLoop } fv = fv.Elem() } fv = fv.Field(i) } if f.omitEmpty && isEmptyValue(fv) { continue } e.WriteByte(next) next = ',' if opts.escapeHTML { e.WriteString(f.nameEscHTML) } else { e.WriteString(f.nameNonEsc) } opts.quoted = f.quoted f.encoder(e, fv, opts) } if next == '{' { e.WriteString("{}") } else { e.WriteByte('}') } } func newStructEncoder(t reflect.Type) encoderFunc { se := structEncoder{fields: cachedTypeFields(t)} return se.encode } type mapEncoder struct { elemEnc encoderFunc } func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { if v.IsNil() { e.WriteString("null") return } if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { // We're a large number of nested ptrEncoder.encode calls deep; // start checking if we've run into a pointer cycle. ptr := v.Pointer() if _, ok := e.ptrSeen[ptr]; ok { e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) } e.ptrSeen[ptr] = struct{}{} defer delete(e.ptrSeen, ptr) } e.WriteByte('{') if opts.isDiscriminatorSet() { discriminatorMapEncode(e, v, opts) } // Extract and sort the keys. sv := make([]reflectWithString, v.Len()) mi := v.MapRange() for i := 0; mi.Next(); i++ { sv[i].k = mi.Key() sv[i].v = mi.Value() if err := sv[i].resolve(); err != nil { e.error(fmt.Errorf("json: encoding error for type %q: %q", v.Type().String(), err.Error())) } } sort.Slice(sv, func(i, j int) bool { return sv[i].ks < sv[j].ks }) for i, kv := range sv { if i > 0 { e.WriteByte(',') } e.string(kv.ks, opts.escapeHTML) e.WriteByte(':') me.elemEnc(e, kv.v, opts) } e.WriteByte('}') e.ptrLevel-- } func newMapEncoder(t reflect.Type) encoderFunc { switch t.Key().Kind() { case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: default: if !t.Key().Implements(textMarshalerType) { return unsupportedTypeEncoder } } me := mapEncoder{typeEncoder(t.Elem())} return me.encode } func encodeByteSlice(e *encodeState, v reflect.Value, _ encOpts) { if v.IsNil() { e.WriteString("null") return } s := v.Bytes() e.WriteByte('"') encodedLen := base64.StdEncoding.EncodedLen(len(s)) if encodedLen <= len(e.scratch) { // If the encoded bytes fit in e.scratch, avoid an extra // allocation and use the cheaper Encoding.Encode. dst := e.scratch[:encodedLen] base64.StdEncoding.Encode(dst, s) e.Write(dst) } else if encodedLen <= 1024 { // The encoded bytes are short enough to allocate for, and // Encoding.Encode is still cheaper. dst := make([]byte, encodedLen) base64.StdEncoding.Encode(dst, s) e.Write(dst) } else { // The encoded bytes are too long to cheaply allocate, and // Encoding.Encode is no longer noticeably cheaper. enc := base64.NewEncoder(base64.StdEncoding, e) enc.Write(s) enc.Close() } e.WriteByte('"') } // sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil. type sliceEncoder struct { arrayEnc encoderFunc } func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { if v.IsNil() { e.WriteString("null") return } if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { // We're a large number of nested ptrEncoder.encode calls deep; // start checking if we've run into a pointer cycle. // Here we use a struct to memorize the pointer to the first element of the slice // and its length. ptr := struct { ptr uintptr len int }{v.Pointer(), v.Len()} if _, ok := e.ptrSeen[ptr]; ok { e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) } e.ptrSeen[ptr] = struct{}{} defer delete(e.ptrSeen, ptr) } se.arrayEnc(e, v, opts) e.ptrLevel-- } func newSliceEncoder(t reflect.Type) encoderFunc { // Byte slices get special treatment; arrays don't. if t.Elem().Kind() == reflect.Uint8 { p := reflect.PtrTo(t.Elem()) if !p.Implements(marshalerType) && !p.Implements(textMarshalerType) { return encodeByteSlice } } enc := sliceEncoder{newArrayEncoder(t)} return enc.encode } type arrayEncoder struct { elemEnc encoderFunc } func (ae arrayEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { e.WriteByte('[') n := v.Len() for i := 0; i < n; i++ { if i > 0 { e.WriteByte(',') } ae.elemEnc(e, v.Index(i), opts) } e.WriteByte(']') } func newArrayEncoder(t reflect.Type) encoderFunc { enc := arrayEncoder{typeEncoder(t.Elem())} return enc.encode } type ptrEncoder struct { elemEnc encoderFunc } func (pe ptrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { if v.IsNil() { e.WriteString("null") return } if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { // We're a large number of nested ptrEncoder.encode calls deep; // start checking if we've run into a pointer cycle. ptr := v.Interface() if _, ok := e.ptrSeen[ptr]; ok { e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) } e.ptrSeen[ptr] = struct{}{} defer delete(e.ptrSeen, ptr) } pe.elemEnc(e, v.Elem(), opts) e.ptrLevel-- } func newPtrEncoder(t reflect.Type) encoderFunc { enc := ptrEncoder{typeEncoder(t.Elem())} return enc.encode } type condAddrEncoder struct { canAddrEnc, elseEnc encoderFunc } func (ce condAddrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { if v.CanAddr() { ce.canAddrEnc(e, v, opts) } else { ce.elseEnc(e, v, opts) } } // newCondAddrEncoder returns an encoder that checks whether its value // CanAddr and delegates to canAddrEnc if so, else to elseEnc. func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc { enc := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} return enc.encode } func isValidTag(s string) bool { if s == "" { return false } for _, c := range s { switch { case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): // Backslash and quote chars are reserved, but // otherwise any punctuation chars are allowed // in a tag name. case !unicode.IsLetter(c) && !unicode.IsDigit(c): return false } } return true } func typeByIndex(t reflect.Type, index []int) reflect.Type { for _, i := range index { if t.Kind() == reflect.Ptr { t = t.Elem() } t = t.Field(i).Type } return t } type reflectWithString struct { k reflect.Value v reflect.Value ks string } func (w *reflectWithString) resolve() error { if w.k.Kind() == reflect.String { w.ks = w.k.String() return nil } if tm, ok := w.k.Interface().(encoding.TextMarshaler); ok { if w.k.Kind() == reflect.Ptr && w.k.IsNil() { return nil } buf, err := tm.MarshalText() w.ks = string(buf) return err } switch w.k.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: w.ks = strconv.FormatInt(w.k.Int(), 10) return nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: w.ks = strconv.FormatUint(w.k.Uint(), 10) return nil } panic("unexpected map key type") } // NOTE: keep in sync with stringBytes below. func (e *encodeState) string(s string, escapeHTML bool) { e.WriteByte('"') start := 0 for i := 0; i < len(s); { if b := s[i]; b < utf8.RuneSelf { if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) { i++ continue } if start < i { e.WriteString(s[start:i]) } e.WriteByte('\\') switch b { case '\\', '"': e.WriteByte(b) case '\n': e.WriteByte('n') case '\r': e.WriteByte('r') case '\t': e.WriteByte('t') default: // This encodes bytes < 0x20 except for \t, \n and \r. // If escapeHTML is set, it also escapes <, >, and & // because they can lead to security holes when // user-controlled strings are rendered into JSON // and served to some browsers. e.WriteString(`u00`) e.WriteByte(hex[b>>4]) e.WriteByte(hex[b&0xF]) } i++ start = i continue } c, size := utf8.DecodeRuneInString(s[i:]) if c == utf8.RuneError && size == 1 { if start < i { e.WriteString(s[start:i]) } e.WriteString(`\ufffd`) i += size start = i continue } // U+2028 is LINE SEPARATOR. // U+2029 is PARAGRAPH SEPARATOR. // They are both technically valid characters in JSON strings, // but don't work in JSONP, which has to be evaluated as JavaScript, // and can lead to security holes there. It is valid JSON to // escape them, so we do so unconditionally. // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. if c == '\u2028' || c == '\u2029' { if start < i { e.WriteString(s[start:i]) } e.WriteString(`\u202`) e.WriteByte(hex[c&0xF]) i += size start = i continue } i += size } if start < len(s) { e.WriteString(s[start:]) } e.WriteByte('"') } // NOTE: keep in sync with string above. func (e *encodeState) stringBytes(s []byte, escapeHTML bool) { e.WriteByte('"') start := 0 for i := 0; i < len(s); { if b := s[i]; b < utf8.RuneSelf { if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) { i++ continue } if start < i { e.Write(s[start:i]) } e.WriteByte('\\') switch b { case '\\', '"': e.WriteByte(b) case '\n': e.WriteByte('n') case '\r': e.WriteByte('r') case '\t': e.WriteByte('t') default: // This encodes bytes < 0x20 except for \t, \n and \r. // If escapeHTML is set, it also escapes <, >, and & // because they can lead to security holes when // user-controlled strings are rendered into JSON // and served to some browsers. e.WriteString(`u00`) e.WriteByte(hex[b>>4]) e.WriteByte(hex[b&0xF]) } i++ start = i continue } c, size := utf8.DecodeRune(s[i:]) if c == utf8.RuneError && size == 1 { if start < i { e.Write(s[start:i]) } e.WriteString(`\ufffd`) i += size start = i continue } // U+2028 is LINE SEPARATOR. // U+2029 is PARAGRAPH SEPARATOR. // They are both technically valid characters in JSON strings, // but don't work in JSONP, which has to be evaluated as JavaScript,
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/json/tables.go
vendor/github.com/vmware/govmomi/vim25/json/tables.go
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package json import "unicode/utf8" // safeSet holds the value true if the ASCII character with the given array // position can be represented inside a JSON string without any further // escaping. // // All values are true except for the ASCII control characters (0-31), the // double quote ("), and the backslash character ("\"). var safeSet = [utf8.RuneSelf]bool{ ' ': true, '!': true, '"': false, '#': true, '$': true, '%': true, '&': true, '\'': true, '(': true, ')': true, '*': true, '+': true, ',': true, '-': true, '.': true, '/': true, '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, '8': true, '9': true, ':': true, ';': true, '<': true, '=': true, '>': true, '?': true, '@': true, 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, 'Y': true, 'Z': true, '[': true, '\\': false, ']': true, '^': true, '_': true, '`': true, 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, 'y': true, 'z': true, '{': true, '|': true, '}': true, '~': true, '\u007f': true, } // htmlSafeSet holds the value true if the ASCII character with the given // array position can be safely represented inside a JSON string, embedded // inside of HTML <script> tags, without any additional escaping. // // All values are true except for the ASCII control characters (0-31), the // double quote ("), the backslash character ("\"), HTML opening and closing // tags ("<" and ">"), and the ampersand ("&"). var htmlSafeSet = [utf8.RuneSelf]bool{ ' ': true, '!': true, '"': false, '#': true, '$': true, '%': true, '&': false, '\'': true, '(': true, ')': true, '*': true, '+': true, ',': true, '-': true, '.': true, '/': true, '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, '8': true, '9': true, ':': true, ';': true, '<': false, '=': true, '>': false, '?': true, '@': true, 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, 'Y': true, 'Z': true, '[': true, '\\': false, ']': true, '^': true, '_': true, '`': true, 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, 'y': true, 'z': true, '{': true, '|': true, '}': true, '~': true, '\u007f': true, }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/json/discriminator.go
vendor/github.com/vmware/govmomi/vim25/json/discriminator.go
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package json import ( "fmt" "reflect" "regexp" "strconv" "sync" ) // DiscriminatorToTypeFunc is used to get a reflect.Type from its // discriminator. type DiscriminatorToTypeFunc func(discriminator string) (reflect.Type, bool) // TypeToDiscriminatorFunc is used to get a discriminator string from a // reflect.Type. Empty return value suppresses discriminator rendering. type TypeToDiscriminatorFunc func(reflect.Type) (discriminator string) // DefaultDiscriminatorFunc is shorthand for the ShortName func and is used when // no other discriminator func is set explicitly var DefaultDiscriminatorFunc = ShortName // ShortName returns the type name in golang without the package name func ShortName(t reflect.Type) (discriminator string) { tn := t.Name() if tn == "" { return t.String() } return tn } // FullName return the name of the type prefixed with the package name as // appropriate func FullName(t reflect.Type) (discriminator string) { tn := t.Name() if tn == "" { return t.String() } if pp := t.PkgPath(); pp != "" { return fmt.Sprintf("%s.%s", pp, tn) } return tn } // DiscriminatorEncodeMode is a mask that describes the different encode // options. type DiscriminatorEncodeMode uint8 const ( // DiscriminatorEncodeTypeNameRootValue causes the type name to be encoded // for the root value. DiscriminatorEncodeTypeNameRootValue DiscriminatorEncodeMode = 1 << iota // DiscriminatorEncodeTypeNameAllObjects causes the type name to be encoded // for all struct and map values. Please note this specifically does not // apply to the root value. DiscriminatorEncodeTypeNameAllObjects // DiscriminatorEncodeTypeNameIfRequired is the default behavior when // the discriminator is set, and the type name is only encoded if required. DiscriminatorEncodeTypeNameIfRequired DiscriminatorEncodeMode = 0 ) func (m DiscriminatorEncodeMode) root() bool { return m&DiscriminatorEncodeTypeNameRootValue > 0 } func (m DiscriminatorEncodeMode) all() bool { return m&DiscriminatorEncodeTypeNameAllObjects > 0 } func (d *decodeState) isDiscriminatorSet() bool { return d.discriminatorTypeFieldName != "" && d.discriminatorValueFieldName != "" } // discriminatorOpType describes the current operation related to // discriminators when reading a JSON object's fields. type discriminatorOpType uint8 const ( // discriminatorOpTypeNameField indicates the discriminator type name // field was discovered. discriminatorOpTypeNameField = iota + 1 // discriminatorOpValueField indicates the discriminator value field // was discovered. discriminatorOpValueField ) func (d *decodeState) discriminatorGetValue() (reflect.Value, error) { // Record the current offset so we know where the data starts. offset := d.readIndex() // Create a temporary decodeState used to inspect the current object // and determine its discriminator type and decode its value. dd := &decodeState{ disallowUnknownFields: d.disallowUnknownFields, useNumber: d.useNumber, discriminatorToTypeFn: d.discriminatorToTypeFn, discriminatorTypeFieldName: d.discriminatorTypeFieldName, discriminatorValueFieldName: d.discriminatorValueFieldName, } dd.init(append([]byte{}, d.data[offset:]...)) defer freeScanner(&dd.scan) dd.scan.reset() var ( t reflect.Type // the instance of the type valueOff = -1 // the offset of a possible discriminator value ) dd.scanWhile(scanSkipSpace) if dd.opcode != scanBeginObject { panic(phasePanicMsg) } for { dd.scanWhile(scanSkipSpace) if dd.opcode == scanEndObject { // closing } - can only happen on first iteration. break } if dd.opcode != scanBeginLiteral { panic(phasePanicMsg) } // Read key. start := dd.readIndex() dd.rescanLiteral() item := dd.data[start:dd.readIndex()] key, ok := unquote(item) if !ok { panic(phasePanicMsg) } // Check to see if the key is related to the discriminator. var discriminatorOp discriminatorOpType switch key { case d.discriminatorTypeFieldName: discriminatorOp = discriminatorOpTypeNameField case d.discriminatorValueFieldName: discriminatorOp = discriminatorOpValueField } // Read : before value. if dd.opcode == scanSkipSpace { dd.scanWhile(scanSkipSpace) } if dd.opcode != scanObjectKey { panic(phasePanicMsg) } dd.scanWhile(scanSkipSpace) // Read value. valOff := dd.readIndex() val := dd.valueInterface() switch discriminatorOp { case discriminatorOpTypeNameField: tn, ok := val.(string) if !ok { return reflect.Value{}, fmt.Errorf( "json: discriminator type at offset %d is not string", offset+valOff) } if tn == "" { return reflect.Value{}, fmt.Errorf( "json: discriminator type at offset %d is empty", offset+valOff) } // Parse the type name into a type instance. ti, err := discriminatorParseTypeName(tn, d.discriminatorToTypeFn) if err != nil { return reflect.Value{}, err } // Assign the type instance to the outer variable, t. t = ti // Primitive types and types with Unmarshaler are wrapped in a // structure with type and value fields. Structures and Maps not // implementing Unmarshaler use discriminator embedded within their // content. if useNestedDiscriminator(t) { // If the type is a map or a struct not implementing Unmarshaler // then it is not necessary to continue walking over the current // JSON object since it will be completely re-scanned to decode // its value into the discovered type. dd.opcode = scanEndObject } else { // Otherwise if the value offset has been discovered then it is // safe to stop walking over the current JSON object as well. if valueOff > -1 { dd.opcode = scanEndObject } } case discriminatorOpValueField: valueOff = valOff // If the type has been discovered then it is safe to stop walking // over the current JSON object. if t != nil { dd.opcode = scanEndObject } } // Next token must be , or }. if dd.opcode == scanSkipSpace { dd.scanWhile(scanSkipSpace) } if dd.opcode == scanEndObject { break } if dd.opcode != scanObjectValue { panic(phasePanicMsg) } } // If there is not a type discriminator then return early. if t == nil { return reflect.Value{}, fmt.Errorf("json: missing discriminator") } // Instantiate a new instance of the discriminated type. var v reflect.Value switch t.Kind() { case reflect.Slice: // MakeSlice returns a value that is not addressable. // Instead, use MakeSlice to get the type, then use // reflect.New to create an addressable value. v = reflect.New(reflect.MakeSlice(t, 0, 0).Type()).Elem() case reflect.Map: // MakeMap returns a value that is not addressable. // Instead, use MakeMap to get the type, then use // reflect.New to create an addressable value. v = reflect.New(reflect.MakeMap(t).Type()).Elem() case reflect.Complex64, reflect.Complex128: return reflect.Value{}, fmt.Errorf("json: unsupported discriminator type: %s", t.Kind()) default: v = reflect.New(t) } // Reset the decode state to prepare for decoding the data. dd.scan.reset() if useNestedDiscriminator(t) { // Set the offset to zero since the entire object will be decoded // into v. dd.off = 0 } else { // Set the offset to what it was before the discriminator value was // read so only the value field is decoded into v. dd.off = valueOff } // This will initialize the correct scan step and op code. dd.scanWhile(scanSkipSpace) // Decode the data into the value. if err := dd.value(v); err != nil { return reflect.Value{}, err } // Check the saved error as well since the decoder.value function does not // always return an error. If the reflected value is still zero, then it is // likely the decoder was unable to decode the value. if err := dd.savedError; err != nil { switch v.Kind() { case reflect.Ptr, reflect.Interface: v = v.Elem() } if v.IsZero() { return reflect.Value{}, err } } return v, nil } func (d *decodeState) discriminatorInterfaceDecode(t reflect.Type, v reflect.Value) error { defer func() { // Advance the decode state, throwing away the value. _ = d.objectInterface() }() dv, err := d.discriminatorGetValue() if err != nil { return err } switch dv.Kind() { case reflect.Map, reflect.Slice: if dv.Type().AssignableTo(t) { v.Set(dv) return nil } if pdv := dv.Addr(); pdv.Type().AssignableTo(t) { v.Set(pdv) return nil } case reflect.Ptr: if dve := dv.Elem(); dve.Type().AssignableTo(t) { v.Set(dve) return nil } if dv.Type().AssignableTo(t) { v.Set(dv) return nil } } return fmt.Errorf("json: unsupported discriminator kind: %s", dv.Kind()) } func (o encOpts) isDiscriminatorSet() bool { return o.discriminatorTypeFieldName != "" && o.discriminatorValueFieldName != "" } func discriminatorInterfaceEncode(e *encodeState, v reflect.Value, opts encOpts) { v = v.Elem() if v.Type().Implements(marshalerType) { discriminatorValue := opts.discriminatorValueFn(v.Type()) if discriminatorValue == "" { marshalerEncoder(e, v, opts) } e.WriteString(`{"`) e.WriteString(opts.discriminatorTypeFieldName) e.WriteString(`":"`) e.WriteString(discriminatorValue) e.WriteString(`","`) e.WriteString(opts.discriminatorValueFieldName) e.WriteString(`":`) marshalerEncoder(e, v, opts) e.WriteByte('}') return } switch v.Kind() { case reflect.Chan, reflect.Func, reflect.Invalid: e.error(&UnsupportedValueError{v, fmt.Sprintf("invalid kind: %s", v.Kind())}) case reflect.Map: e.discriminatorEncodeTypeName = true newMapEncoder(v.Type())(e, v, opts) case reflect.Struct: e.discriminatorEncodeTypeName = true newStructEncoder(v.Type())(e, v, opts) case reflect.Ptr: if v.IsZero() { newPtrEncoder(v.Type())(e, v, opts) } else { discriminatorInterfaceEncode(e, v, opts) } default: discriminatorValue := opts.discriminatorValueFn(v.Type()) if discriminatorValue == "" { e.reflectValue(v, opts) return } e.WriteString(`{"`) e.WriteString(opts.discriminatorTypeFieldName) e.WriteString(`":"`) e.WriteString(discriminatorValue) e.WriteString(`","`) e.WriteString(opts.discriminatorValueFieldName) e.WriteString(`":`) e.reflectValue(v, opts) e.WriteByte('}') } } func discriminatorMapEncode(e *encodeState, v reflect.Value, opts encOpts) { if !e.discriminatorEncodeTypeName && !opts.discriminatorEncodeMode.all() { return } discriminatorValue := opts.discriminatorValueFn(v.Type()) if discriminatorValue == "" { return } e.WriteByte('"') e.WriteString(opts.discriminatorTypeFieldName) e.WriteString(`":"`) e.WriteString(discriminatorValue) e.WriteByte('"') if v.Len() > 0 { e.WriteByte(',') } e.discriminatorEncodeTypeName = false } func discriminatorStructEncode(e *encodeState, v reflect.Value, opts encOpts) byte { if !e.discriminatorEncodeTypeName && !opts.discriminatorEncodeMode.all() { return '{' } discriminatorValue := opts.discriminatorValueFn(v.Type()) if discriminatorValue == "" { return '{' } e.WriteString(`{"`) e.WriteString(opts.discriminatorTypeFieldName) e.WriteString(`":"`) e.WriteString(discriminatorValue) e.WriteByte('"') e.discriminatorEncodeTypeName = false return ',' } var unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() // Discriminator is nested in map and struct unless they implement Unmarshaler. func useNestedDiscriminator(t reflect.Type) bool { if t.Implements(unmarshalerType) || reflect.PtrTo(t).Implements(unmarshalerType) { return false } kind := t.Kind() if kind == reflect.Struct || kind == reflect.Map { return true } return false } var discriminatorTypeRegistry = map[string]reflect.Type{ "uint": reflect.TypeOf(uint(0)), "uint8": reflect.TypeOf(uint8(0)), "uint16": reflect.TypeOf(uint16(0)), "uint32": reflect.TypeOf(uint32(0)), "uint64": reflect.TypeOf(uint64(0)), "uintptr": reflect.TypeOf(uintptr(0)), "int": reflect.TypeOf(int(0)), "int8": reflect.TypeOf(int8(0)), "int16": reflect.TypeOf(int16(0)), "int32": reflect.TypeOf(int32(0)), "int64": reflect.TypeOf(int64(0)), "float32": reflect.TypeOf(float32(0)), "float64": reflect.TypeOf(float64(0)), "bool": reflect.TypeOf(true), "string": reflect.TypeOf(""), "any": reflect.TypeOf((*interface{})(nil)).Elem(), "interface{}": reflect.TypeOf((*interface{})(nil)).Elem(), "interface {}": reflect.TypeOf((*interface{})(nil)).Elem(), // Not supported, but here to prevent the decoder from panicing // if encountered. "complex64": reflect.TypeOf(complex64(0)), "complex128": reflect.TypeOf(complex128(0)), } // discriminatorPointerTypeCache caches the pointer type for another type. // For example, a key that was the int type would have a value that is the // *int type. var discriminatorPointerTypeCache sync.Map // map[reflect.Type]reflect.Type // cachedPointerType returns the pointer type for another and avoids repeated // work by using a cache. func cachedPointerType(t reflect.Type) reflect.Type { if value, ok := discriminatorPointerTypeCache.Load(t); ok { return value.(reflect.Type) } pt := reflect.New(t).Type() value, _ := discriminatorPointerTypeCache.LoadOrStore(t, pt) return value.(reflect.Type) } var ( mapPatt = regexp.MustCompile(`^\*?map\[([^\]]+)\](.+)$`) arrayPatt = regexp.MustCompile(`^\*?\[(\d+)\](.+)$`) slicePatt = regexp.MustCompile(`^\*?\[\](.+)$`) ) // discriminatorParseTypeName returns a reflect.Type for the given type name. func discriminatorParseTypeName( typeName string, typeFn DiscriminatorToTypeFunc) (reflect.Type, error) { // Check to see if the type is an array, map, or slice. var ( aln = -1 // array length etn string // map or slice element type name ktn string // map key type name ) if m := arrayPatt.FindStringSubmatch(typeName); len(m) > 0 { i, err := strconv.Atoi(m[1]) if err != nil { return nil, err } aln = i etn = m[2] } else if m := slicePatt.FindStringSubmatch(typeName); len(m) > 0 { etn = m[1] } else if m := mapPatt.FindStringSubmatch(typeName); len(m) > 0 { ktn = m[1] etn = m[2] } // indirectTypeName checks to see if the type name begins with a // "*" characters. If it does, then the type name sans the "*" // character is returned along with a true value indicating the // type is a pointer. Otherwise the original type name is returned // along with a false value. indirectTypeName := func(tn string) (string, bool) { if len(tn) > 1 && tn[0] == '*' { return tn[1:], true } return tn, false } lookupType := func(tn string) (reflect.Type, bool) { // Get the actual type name and a flag indicating whether the // type is a pointer. n, p := indirectTypeName(tn) var t reflect.Type ok := false // look up the type in the external registry to allow name override. if typeFn != nil { t, ok = typeFn(n) } if !ok { // Use the built-in registry if the external registry fails if t, ok = discriminatorTypeRegistry[n]; !ok { return nil, false } } // If the type was a pointer then get the type's pointer type. if p { t = cachedPointerType(t) } return t, true } var t reflect.Type if ktn == "" && etn != "" { et, ok := lookupType(etn) if !ok { return nil, fmt.Errorf("json: invalid array/slice element type: %s", etn) } if aln > -1 { // Array t = reflect.ArrayOf(aln, et) } else { // Slice t = reflect.SliceOf(et) } } else if ktn != "" && etn != "" { // Map kt, ok := lookupType(ktn) if !ok { return nil, fmt.Errorf("json: invalid map key type: %s", ktn) } et, ok := lookupType(etn) if !ok { return nil, fmt.Errorf("json: invalid map element type: %s", etn) } t = reflect.MapOf(kt, et) } else { var ok bool if t, ok = lookupType(typeName); !ok { return nil, fmt.Errorf("json: invalid discriminator type: %s", typeName) } } return t, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/json/scanner.go
vendor/github.com/vmware/govmomi/vim25/json/scanner.go
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package json // JSON value parser state machine. // Just about at the limit of what is reasonable to write by hand. // Some parts are a bit tedious, but overall it nicely factors out the // otherwise common code from the multiple scanning functions // in this package (Compact, Indent, checkValid, etc). // // This file starts with two simple examples using the scanner // before diving into the scanner itself. import ( "strconv" "sync" ) // Valid reports whether data is a valid JSON encoding. func Valid(data []byte) bool { scan := newScanner() defer freeScanner(scan) return checkValid(data, scan) == nil } // checkValid verifies that data is valid JSON-encoded data. // scan is passed in for use by checkValid to avoid an allocation. func checkValid(data []byte, scan *scanner) error { scan.reset() for _, c := range data { scan.bytes++ if scan.step(scan, c) == scanError { return scan.err } } if scan.eof() == scanError { return scan.err } return nil } // A SyntaxError is a description of a JSON syntax error. type SyntaxError struct { msg string // description of error Offset int64 // error occurred after reading Offset bytes } func (e *SyntaxError) Error() string { return e.msg } // A scanner is a JSON scanning state machine. // Callers call scan.reset and then pass bytes in one at a time // by calling scan.step(&scan, c) for each byte. // The return value, referred to as an opcode, tells the // caller about significant parsing events like beginning // and ending literals, objects, and arrays, so that the // caller can follow along if it wishes. // The return value scanEnd indicates that a single top-level // JSON value has been completed, *before* the byte that // just got passed in. (The indication must be delayed in order // to recognize the end of numbers: is 123 a whole value or // the beginning of 12345e+6?). type scanner struct { // The step is a func to be called to execute the next transition. // Also tried using an integer constant and a single func // with a switch, but using the func directly was 10% faster // on a 64-bit Mac Mini, and it's nicer to read. step func(*scanner, byte) int // Reached end of top-level value. endTop bool // Stack of what we're in the middle of - array values, object keys, object values. parseState []int // Error that happened, if any. err error // total bytes consumed, updated by decoder.Decode (and deliberately // not set to zero by scan.reset) bytes int64 } var scannerPool = sync.Pool{ New: func() interface{} { return &scanner{} }, } func newScanner() *scanner { scan := scannerPool.Get().(*scanner) // scan.reset by design doesn't set bytes to zero scan.bytes = 0 scan.reset() return scan } func freeScanner(scan *scanner) { // Avoid hanging on to too much memory in extreme cases. if len(scan.parseState) > 1024 { scan.parseState = nil } scannerPool.Put(scan) } // These values are returned by the state transition functions // assigned to scanner.state and the method scanner.eof. // They give details about the current state of the scan that // callers might be interested to know about. // It is okay to ignore the return value of any particular // call to scanner.state: if one call returns scanError, // every subsequent call will return scanError too. const ( // Continue. scanContinue = iota // uninteresting byte scanBeginLiteral // end implied by next result != scanContinue scanBeginObject // begin object scanObjectKey // just finished object key (string) scanObjectValue // just finished non-last object value scanEndObject // end object (implies scanObjectValue if possible) scanBeginArray // begin array scanArrayValue // just finished array value scanEndArray // end array (implies scanArrayValue if possible) scanSkipSpace // space byte; can skip; known to be last "continue" result // Stop. scanEnd // top-level value ended *before* this byte; known to be first "stop" result scanError // hit an error, scanner.err. ) // These values are stored in the parseState stack. // They give the current state of a composite value // being scanned. If the parser is inside a nested value // the parseState describes the nested state, outermost at entry 0. const ( parseObjectKey = iota // parsing object key (before colon) parseObjectValue // parsing object value (after colon) parseArrayValue // parsing array value ) // This limits the max nesting depth to prevent stack overflow. // This is permitted by https://tools.ietf.org/html/rfc7159#section-9 const maxNestingDepth = 10000 // reset prepares the scanner for use. // It must be called before calling s.step. func (s *scanner) reset() { s.step = stateBeginValue s.parseState = s.parseState[0:0] s.err = nil s.endTop = false } // eof tells the scanner that the end of input has been reached. // It returns a scan status just as s.step does. func (s *scanner) eof() int { if s.err != nil { return scanError } if s.endTop { return scanEnd } s.step(s, ' ') if s.endTop { return scanEnd } if s.err == nil { s.err = &SyntaxError{"unexpected end of JSON input", s.bytes} } return scanError } // pushParseState pushes a new parse state p onto the parse stack. // an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned. func (s *scanner) pushParseState(c byte, newParseState int, successState int) int { s.parseState = append(s.parseState, newParseState) if len(s.parseState) <= maxNestingDepth { return successState } return s.error(c, "exceeded max depth") } // popParseState pops a parse state (already obtained) off the stack // and updates s.step accordingly. func (s *scanner) popParseState() { n := len(s.parseState) - 1 s.parseState = s.parseState[0:n] if n == 0 { s.step = stateEndTop s.endTop = true } else { s.step = stateEndValue } } func isSpace(c byte) bool { return c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') } // stateBeginValueOrEmpty is the state after reading `[`. func stateBeginValueOrEmpty(s *scanner, c byte) int { if isSpace(c) { return scanSkipSpace } if c == ']' { return stateEndValue(s, c) } return stateBeginValue(s, c) } // stateBeginValue is the state at the beginning of the input. func stateBeginValue(s *scanner, c byte) int { if isSpace(c) { return scanSkipSpace } switch c { case '{': s.step = stateBeginStringOrEmpty return s.pushParseState(c, parseObjectKey, scanBeginObject) case '[': s.step = stateBeginValueOrEmpty return s.pushParseState(c, parseArrayValue, scanBeginArray) case '"': s.step = stateInString return scanBeginLiteral case '-': s.step = stateNeg return scanBeginLiteral case '0': // beginning of 0.123 s.step = state0 return scanBeginLiteral case 't': // beginning of true s.step = stateT return scanBeginLiteral case 'f': // beginning of false s.step = stateF return scanBeginLiteral case 'n': // beginning of null s.step = stateN return scanBeginLiteral } if '1' <= c && c <= '9' { // beginning of 1234.5 s.step = state1 return scanBeginLiteral } return s.error(c, "looking for beginning of value") } // stateBeginStringOrEmpty is the state after reading `{`. func stateBeginStringOrEmpty(s *scanner, c byte) int { if isSpace(c) { return scanSkipSpace } if c == '}' { n := len(s.parseState) s.parseState[n-1] = parseObjectValue return stateEndValue(s, c) } return stateBeginString(s, c) } // stateBeginString is the state after reading `{"key": value,`. func stateBeginString(s *scanner, c byte) int { if isSpace(c) { return scanSkipSpace } if c == '"' { s.step = stateInString return scanBeginLiteral } return s.error(c, "looking for beginning of object key string") } // stateEndValue is the state after completing a value, // such as after reading `{}` or `true` or `["x"`. func stateEndValue(s *scanner, c byte) int { n := len(s.parseState) if n == 0 { // Completed top-level before the current byte. s.step = stateEndTop s.endTop = true return stateEndTop(s, c) } if isSpace(c) { s.step = stateEndValue return scanSkipSpace } ps := s.parseState[n-1] switch ps { case parseObjectKey: if c == ':' { s.parseState[n-1] = parseObjectValue s.step = stateBeginValue return scanObjectKey } return s.error(c, "after object key") case parseObjectValue: if c == ',' { s.parseState[n-1] = parseObjectKey s.step = stateBeginString return scanObjectValue } if c == '}' { s.popParseState() return scanEndObject } return s.error(c, "after object key:value pair") case parseArrayValue: if c == ',' { s.step = stateBeginValue return scanArrayValue } if c == ']' { s.popParseState() return scanEndArray } return s.error(c, "after array element") } return s.error(c, "") } // stateEndTop is the state after finishing the top-level value, // such as after reading `{}` or `[1,2,3]`. // Only space characters should be seen now. func stateEndTop(s *scanner, c byte) int { if !isSpace(c) { // Complain about non-space byte on next call. s.error(c, "after top-level value") } return scanEnd } // stateInString is the state after reading `"`. func stateInString(s *scanner, c byte) int { if c == '"' { s.step = stateEndValue return scanContinue } if c == '\\' { s.step = stateInStringEsc return scanContinue } if c < 0x20 { return s.error(c, "in string literal") } return scanContinue } // stateInStringEsc is the state after reading `"\` during a quoted string. func stateInStringEsc(s *scanner, c byte) int { switch c { case 'b', 'f', 'n', 'r', 't', '\\', '/', '"': s.step = stateInString return scanContinue case 'u': s.step = stateInStringEscU return scanContinue } return s.error(c, "in string escape code") } // stateInStringEscU is the state after reading `"\u` during a quoted string. func stateInStringEscU(s *scanner, c byte) int { if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' { s.step = stateInStringEscU1 return scanContinue } // numbers return s.error(c, "in \\u hexadecimal character escape") } // stateInStringEscU1 is the state after reading `"\u1` during a quoted string. func stateInStringEscU1(s *scanner, c byte) int { if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' { s.step = stateInStringEscU12 return scanContinue } // numbers return s.error(c, "in \\u hexadecimal character escape") } // stateInStringEscU12 is the state after reading `"\u12` during a quoted string. func stateInStringEscU12(s *scanner, c byte) int { if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' { s.step = stateInStringEscU123 return scanContinue } // numbers return s.error(c, "in \\u hexadecimal character escape") } // stateInStringEscU123 is the state after reading `"\u123` during a quoted string. func stateInStringEscU123(s *scanner, c byte) int { if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' { s.step = stateInString return scanContinue } // numbers return s.error(c, "in \\u hexadecimal character escape") } // stateNeg is the state after reading `-` during a number. func stateNeg(s *scanner, c byte) int { if c == '0' { s.step = state0 return scanContinue } if '1' <= c && c <= '9' { s.step = state1 return scanContinue } return s.error(c, "in numeric literal") } // state1 is the state after reading a non-zero integer during a number, // such as after reading `1` or `100` but not `0`. func state1(s *scanner, c byte) int { if '0' <= c && c <= '9' { s.step = state1 return scanContinue } return state0(s, c) } // state0 is the state after reading `0` during a number. func state0(s *scanner, c byte) int { if c == '.' { s.step = stateDot return scanContinue } if c == 'e' || c == 'E' { s.step = stateE return scanContinue } return stateEndValue(s, c) } // stateDot is the state after reading the integer and decimal point in a number, // such as after reading `1.`. func stateDot(s *scanner, c byte) int { if '0' <= c && c <= '9' { s.step = stateDot0 return scanContinue } return s.error(c, "after decimal point in numeric literal") } // stateDot0 is the state after reading the integer, decimal point, and subsequent // digits of a number, such as after reading `3.14`. func stateDot0(s *scanner, c byte) int { if '0' <= c && c <= '9' { return scanContinue } if c == 'e' || c == 'E' { s.step = stateE return scanContinue } return stateEndValue(s, c) } // stateE is the state after reading the mantissa and e in a number, // such as after reading `314e` or `0.314e`. func stateE(s *scanner, c byte) int { if c == '+' || c == '-' { s.step = stateESign return scanContinue } return stateESign(s, c) } // stateESign is the state after reading the mantissa, e, and sign in a number, // such as after reading `314e-` or `0.314e+`. func stateESign(s *scanner, c byte) int { if '0' <= c && c <= '9' { s.step = stateE0 return scanContinue } return s.error(c, "in exponent of numeric literal") } // stateE0 is the state after reading the mantissa, e, optional sign, // and at least one digit of the exponent in a number, // such as after reading `314e-2` or `0.314e+1` or `3.14e0`. func stateE0(s *scanner, c byte) int { if '0' <= c && c <= '9' { return scanContinue } return stateEndValue(s, c) } // stateT is the state after reading `t`. func stateT(s *scanner, c byte) int { if c == 'r' { s.step = stateTr return scanContinue } return s.error(c, "in literal true (expecting 'r')") } // stateTr is the state after reading `tr`. func stateTr(s *scanner, c byte) int { if c == 'u' { s.step = stateTru return scanContinue } return s.error(c, "in literal true (expecting 'u')") } // stateTru is the state after reading `tru`. func stateTru(s *scanner, c byte) int { if c == 'e' { s.step = stateEndValue return scanContinue } return s.error(c, "in literal true (expecting 'e')") } // stateF is the state after reading `f`. func stateF(s *scanner, c byte) int { if c == 'a' { s.step = stateFa return scanContinue } return s.error(c, "in literal false (expecting 'a')") } // stateFa is the state after reading `fa`. func stateFa(s *scanner, c byte) int { if c == 'l' { s.step = stateFal return scanContinue } return s.error(c, "in literal false (expecting 'l')") } // stateFal is the state after reading `fal`. func stateFal(s *scanner, c byte) int { if c == 's' { s.step = stateFals return scanContinue } return s.error(c, "in literal false (expecting 's')") } // stateFals is the state after reading `fals`. func stateFals(s *scanner, c byte) int { if c == 'e' { s.step = stateEndValue return scanContinue } return s.error(c, "in literal false (expecting 'e')") } // stateN is the state after reading `n`. func stateN(s *scanner, c byte) int { if c == 'u' { s.step = stateNu return scanContinue } return s.error(c, "in literal null (expecting 'u')") } // stateNu is the state after reading `nu`. func stateNu(s *scanner, c byte) int { if c == 'l' { s.step = stateNul return scanContinue } return s.error(c, "in literal null (expecting 'l')") } // stateNul is the state after reading `nul`. func stateNul(s *scanner, c byte) int { if c == 'l' { s.step = stateEndValue return scanContinue } return s.error(c, "in literal null (expecting 'l')") } // stateError is the state after reaching a syntax error, // such as after reading `[1}` or `5.1.2`. func stateError(s *scanner, c byte) int { return scanError } // error records an error and switches to the error state. func (s *scanner) error(c byte, context string) int { s.step = stateError s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes} return scanError } // quoteChar formats c as a quoted character literal func quoteChar(c byte) string { // special cases - different from quoted strings if c == '\'' { return `'\''` } if c == '"' { return `'"'` } // use quoted string with different quotation marks s := strconv.Quote(string(c)) return "'" + s[1:len(s)-1] + "'" }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/json/decode.go
vendor/github.com/vmware/govmomi/vim25/json/decode.go
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Represents JSON data structure using native Go types: booleans, floats, // strings, arrays, and maps. package json import ( "encoding" "encoding/base64" "fmt" "reflect" "strconv" "strings" "unicode" "unicode/utf16" "unicode/utf8" ) // Unmarshal parses the JSON-encoded data and stores the result // in the value pointed to by v. If v is nil or not a pointer, // Unmarshal returns an InvalidUnmarshalError. // // Unmarshal uses the inverse of the encodings that // Marshal uses, allocating maps, slices, and pointers as necessary, // with the following additional rules: // // To unmarshal JSON into a pointer, Unmarshal first handles the case of // the JSON being the JSON literal null. In that case, Unmarshal sets // the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into // the value pointed at by the pointer. If the pointer is nil, Unmarshal // allocates a new value for it to point to. // // To unmarshal JSON into a value implementing the Unmarshaler interface, // Unmarshal calls that value's UnmarshalJSON method, including // when the input is a JSON null. // Otherwise, if the value implements encoding.TextUnmarshaler // and the input is a JSON quoted string, Unmarshal calls that value's // UnmarshalText method with the unquoted form of the string. // // To unmarshal JSON into a struct, Unmarshal matches incoming object // keys to the keys used by Marshal (either the struct field name or its tag), // preferring an exact match but also accepting a case-insensitive match. By // default, object keys which don't have a corresponding struct field are // ignored (see Decoder.DisallowUnknownFields for an alternative). // // To unmarshal JSON into an interface value, // Unmarshal stores one of these in the interface value: // // bool, for JSON booleans // float64, for JSON numbers // string, for JSON strings // []interface{}, for JSON arrays // map[string]interface{}, for JSON objects // nil for JSON null // // To unmarshal a JSON array into a slice, Unmarshal resets the slice length // to zero and then appends each element to the slice. // As a special case, to unmarshal an empty JSON array into a slice, // Unmarshal replaces the slice with a new empty slice. // // To unmarshal a JSON array into a Go array, Unmarshal decodes // JSON array elements into corresponding Go array elements. // If the Go array is smaller than the JSON array, // the additional JSON array elements are discarded. // If the JSON array is smaller than the Go array, // the additional Go array elements are set to zero values. // // To unmarshal a JSON object into a map, Unmarshal first establishes a map to // use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal // reuses the existing map, keeping existing entries. Unmarshal then stores // key-value pairs from the JSON object into the map. The map's key type must // either be any string type, an integer, implement json.Unmarshaler, or // implement encoding.TextUnmarshaler. // // If a JSON value is not appropriate for a given target type, // or if a JSON number overflows the target type, Unmarshal // skips that field and completes the unmarshaling as best it can. // If no more serious errors are encountered, Unmarshal returns // an UnmarshalTypeError describing the earliest such error. In any // case, it's not guaranteed that all the remaining fields following // the problematic one will be unmarshaled into the target object. // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean // ``not present,'' unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. // func Unmarshal(data []byte, v interface{}) error { // Check for well-formedness. // Avoids filling out half a data structure // before discovering a JSON syntax error. var d decodeState err := checkValid(data, &d.scan) if err != nil { return err } d.init(data) return d.unmarshal(v) } // Unmarshaler is the interface implemented by types // that can unmarshal a JSON description of themselves. // The input can be assumed to be a valid encoding of // a JSON value. UnmarshalJSON must copy the JSON data // if it wishes to retain the data after returning. // // By convention, to approximate the behavior of Unmarshal itself, // Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. type Unmarshaler interface { UnmarshalJSON([]byte) error } // An UnmarshalTypeError describes a JSON value that was // not appropriate for a value of a specific Go type. type UnmarshalTypeError struct { Value string // description of JSON value - "bool", "array", "number -5" Type reflect.Type // type of Go value it could not be assigned to Offset int64 // error occurred after reading Offset bytes Struct string // name of the struct type containing the field Field string // the full path from root node to the field } func (e *UnmarshalTypeError) Error() string { if e.Struct != "" || e.Field != "" { return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() } return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() } // An UnmarshalFieldError describes a JSON object key that // led to an unexported (and therefore unwritable) struct field. // // Deprecated: No longer used; kept for compatibility. type UnmarshalFieldError struct { Key string Type reflect.Type Field reflect.StructField } func (e *UnmarshalFieldError) Error() string { return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() } // An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. // (The argument to Unmarshal must be a non-nil pointer.) type InvalidUnmarshalError struct { Type reflect.Type } func (e *InvalidUnmarshalError) Error() string { if e.Type == nil { return "json: Unmarshal(nil)" } if e.Type.Kind() != reflect.Ptr { return "json: Unmarshal(non-pointer " + e.Type.String() + ")" } return "json: Unmarshal(nil " + e.Type.String() + ")" } func (d *decodeState) unmarshal(v interface{}) error { rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr || rv.IsNil() { return &InvalidUnmarshalError{reflect.TypeOf(v)} } d.scan.reset() d.scanWhile(scanSkipSpace) // We decode rv not rv.Elem because the Unmarshaler interface // test must be applied at the top level of the value. err := d.value(rv) if err != nil { return d.addErrorContext(err) } return d.savedError } // A Number represents a JSON number literal. type Number string // String returns the literal text of the number. func (n Number) String() string { return string(n) } // Float64 returns the number as a float64. func (n Number) Float64() (float64, error) { return strconv.ParseFloat(string(n), 64) } // Int64 returns the number as an int64. func (n Number) Int64() (int64, error) { return strconv.ParseInt(string(n), 10, 64) } // An errorContext provides context for type errors during decoding. type errorContext struct { Struct reflect.Type FieldStack []string } // decodeState represents the state while decoding a JSON value. type decodeState struct { data []byte off int // next read offset in data opcode int // last read result scan scanner errorContext *errorContext savedError error useNumber bool disallowUnknownFields bool discriminatorTypeFieldName string discriminatorValueFieldName string discriminatorToTypeFn DiscriminatorToTypeFunc } // readIndex returns the position of the last byte read. func (d *decodeState) readIndex() int { return d.off - 1 } // phasePanicMsg is used as a panic message when we end up with something that // shouldn't happen. It can indicate a bug in the JSON decoder, or that // something is editing the data slice while the decoder executes. const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?" func (d *decodeState) init(data []byte) *decodeState { d.data = data d.off = 0 d.savedError = nil if d.errorContext != nil { d.errorContext.Struct = nil // Reuse the allocated space for the FieldStack slice. d.errorContext.FieldStack = d.errorContext.FieldStack[:0] } return d } // saveError saves the first err it is called with, // for reporting at the end of the unmarshal. func (d *decodeState) saveError(err error) { if d.savedError == nil { d.savedError = d.addErrorContext(err) } } // addErrorContext returns a new error enhanced with information from d.errorContext func (d *decodeState) addErrorContext(err error) error { if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { switch err := err.(type) { case *UnmarshalTypeError: err.Struct = d.errorContext.Struct.Name() err.Field = strings.Join(d.errorContext.FieldStack, ".") } } return err } // skip scans to the end of what was started. func (d *decodeState) skip() { s, data, i := &d.scan, d.data, d.off depth := len(s.parseState) for { op := s.step(s, data[i]) i++ if len(s.parseState) < depth { d.off = i d.opcode = op return } } } // scanNext processes the byte at d.data[d.off]. func (d *decodeState) scanNext() { if d.off < len(d.data) { d.opcode = d.scan.step(&d.scan, d.data[d.off]) d.off++ } else { d.opcode = d.scan.eof() d.off = len(d.data) + 1 // mark processed EOF with len+1 } } // scanWhile processes bytes in d.data[d.off:] until it // receives a scan code not equal to op. func (d *decodeState) scanWhile(op int) { s, data, i := &d.scan, d.data, d.off for i < len(data) { newOp := s.step(s, data[i]) i++ if newOp != op { d.opcode = newOp d.off = i return } } d.off = len(data) + 1 // mark processed EOF with len+1 d.opcode = d.scan.eof() } // rescanLiteral is similar to scanWhile(scanContinue), but it specialises the // common case where we're decoding a literal. The decoder scans the input // twice, once for syntax errors and to check the length of the value, and the // second to perform the decoding. // // Only in the second step do we use decodeState to tokenize literals, so we // know there aren't any syntax errors. We can take advantage of that knowledge, // and scan a literal's bytes much more quickly. func (d *decodeState) rescanLiteral() { data, i := d.data, d.off Switch: switch data[i-1] { case '"': // string for ; i < len(data); i++ { switch data[i] { case '\\': i++ // escaped char case '"': i++ // tokenize the closing quote too break Switch } } case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number for ; i < len(data); i++ { switch data[i] { case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', 'e', 'E', '+', '-': default: break Switch } } case 't': // true i += len("rue") case 'f': // false i += len("alse") case 'n': // null i += len("ull") } if i < len(data) { d.opcode = stateEndValue(&d.scan, data[i]) } else { d.opcode = scanEnd } d.off = i + 1 } // value consumes a JSON value from d.data[d.off-1:], decoding into v, and // reads the following byte ahead. If v is invalid, the value is discarded. // The first byte of the value has been read already. func (d *decodeState) value(v reflect.Value) error { switch d.opcode { default: panic(phasePanicMsg) case scanBeginArray: if v.IsValid() { if err := d.array(v); err != nil { return err } } else { d.skip() } d.scanNext() case scanBeginObject: if v.IsValid() { if err := d.object(v); err != nil { return err } } else { d.skip() } d.scanNext() case scanBeginLiteral: // All bytes inside literal return scanContinue op code. start := d.readIndex() d.rescanLiteral() if v.IsValid() { if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil { return err } } } return nil } type unquotedValue struct{} // valueQuoted is like value but decodes a // quoted string literal or literal null into an interface value. // If it finds anything other than a quoted string literal or null, // valueQuoted returns unquotedValue{}. func (d *decodeState) valueQuoted() interface{} { switch d.opcode { default: panic(phasePanicMsg) case scanBeginArray, scanBeginObject: d.skip() d.scanNext() case scanBeginLiteral: v := d.literalInterface() switch v.(type) { case nil, string: return v } } return unquotedValue{} } // indirect walks down v allocating pointers as needed, // until it gets to a non-pointer. // If it encounters an Unmarshaler, indirect stops and returns that. // If decodingNull is true, indirect stops at the first settable pointer so it // can be set to nil. func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { // Issue #24153 indicates that it is generally not a guaranteed property // that you may round-trip a reflect.Value by calling Value.Addr().Elem() // and expect the value to still be settable for values derived from // unexported embedded struct fields. // // The logic below effectively does this when it first addresses the value // (to satisfy possible pointer methods) and continues to dereference // subsequent pointers as necessary. // // After the first round-trip, we set v back to the original value to // preserve the original RW flags contained in reflect.Value. v0 := v haveAddr := false // If v is a named type and is addressable, // start with its address, so that if the type has pointer methods, // we find them. if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { haveAddr = true v = v.Addr() } for { // Load value from interface, but only if the result will be // usefully addressable. if v.Kind() == reflect.Interface && !v.IsNil() { e := v.Elem() if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { haveAddr = false v = e continue } } if v.Kind() != reflect.Ptr { break } if decodingNull && v.CanSet() { break } // Prevent infinite loop if v is an interface pointing to its own address: // var v interface{} // v = &v if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { v = v.Elem() break } if v.IsNil() { v.Set(reflect.New(v.Type().Elem())) } if v.Type().NumMethod() > 0 && v.CanInterface() { if u, ok := v.Interface().(Unmarshaler); ok { return u, nil, reflect.Value{} } if !decodingNull { if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { return nil, u, reflect.Value{} } } } if haveAddr { v = v0 // restore original value after round-trip Value.Addr().Elem() haveAddr = false } else { v = v.Elem() } } return nil, nil, v } // array consumes an array from d.data[d.off-1:], decoding into v. // The first byte of the array ('[') has been read already. func (d *decodeState) array(v reflect.Value) error { // Check for unmarshaler. u, ut, pv := indirect(v, false) if u != nil { start := d.readIndex() d.skip() return u.UnmarshalJSON(d.data[start:d.off]) } if ut != nil { d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) d.skip() return nil } v = pv // Check type of target. switch v.Kind() { case reflect.Interface: if v.NumMethod() == 0 { // Decoding into nil interface? Switch to non-reflect code. ai := d.arrayInterface() v.Set(reflect.ValueOf(ai)) return nil } // Otherwise it's invalid. fallthrough default: d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) d.skip() return nil case reflect.Array, reflect.Slice: break } i := 0 for { // Look ahead for ] - can only happen on first iteration. d.scanWhile(scanSkipSpace) if d.opcode == scanEndArray { break } // Get element of array, growing if necessary. if v.Kind() == reflect.Slice { // Grow slice if necessary if i >= v.Cap() { newcap := v.Cap() + v.Cap()/2 if newcap < 4 { newcap = 4 } newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) reflect.Copy(newv, v) v.Set(newv) } if i >= v.Len() { v.SetLen(i + 1) } } if i < v.Len() { // Decode into element. if err := d.value(v.Index(i)); err != nil { return err } } else { // Ran out of fixed array: skip. if err := d.value(reflect.Value{}); err != nil { return err } } i++ // Next token must be , or ]. if d.opcode == scanSkipSpace { d.scanWhile(scanSkipSpace) } if d.opcode == scanEndArray { break } if d.opcode != scanArrayValue { panic(phasePanicMsg) } } if i < v.Len() { if v.Kind() == reflect.Array { // Array. Zero the rest. z := reflect.Zero(v.Type().Elem()) for ; i < v.Len(); i++ { v.Index(i).Set(z) } } else { v.SetLen(i) } } if i == 0 && v.Kind() == reflect.Slice { v.Set(reflect.MakeSlice(v.Type(), 0, 0)) } return nil } var nullLiteral = []byte("null") var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() // object consumes an object from d.data[d.off-1:], decoding into v. // The first byte ('{') of the object has been read already. func (d *decodeState) object(v reflect.Value) error { // Check for unmarshaler. u, ut, pv := indirect(v, false) if u != nil { start := d.readIndex() d.skip() return u.UnmarshalJSON(d.data[start:d.off]) } if ut != nil { d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)}) d.skip() return nil } v = pv t := v.Type() // Decoding into nil interface? Switch to non-reflect code. if v.Kind() == reflect.Interface && v.NumMethod() == 0 && !d.isDiscriminatorSet() { oi := d.objectInterface() v.Set(reflect.ValueOf(oi)) return nil } var fields structFields // Check type of target: // struct or // map[T1]T2 where T1 is string, an integer type, // or an encoding.TextUnmarshaler switch v.Kind() { case reflect.Map: // Map key must either have string kind, have an integer kind, // or be an encoding.TextUnmarshaler. switch t.Key().Kind() { case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: default: if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) d.skip() return nil } } if v.IsNil() { v.Set(reflect.MakeMap(t)) } case reflect.Struct: fields = cachedTypeFields(t) // ok default: if d.isDiscriminatorSet() { return d.discriminatorInterfaceDecode(t, v) } d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) d.skip() return nil } var mapElem reflect.Value var origErrorContext errorContext if d.errorContext != nil { origErrorContext = *d.errorContext } for { // Read opening " of string key or closing }. d.scanWhile(scanSkipSpace) if d.opcode == scanEndObject { // closing } - can only happen on first iteration. break } if d.opcode != scanBeginLiteral { panic(phasePanicMsg) } // Read key. start := d.readIndex() d.rescanLiteral() item := d.data[start:d.readIndex()] key, ok := unquoteBytes(item) if !ok { panic(phasePanicMsg) } // Figure out field corresponding to key. var subv reflect.Value destring := false // whether the value is wrapped in a string to be decoded first if v.Kind() == reflect.Map { elemType := t.Elem() if !mapElem.IsValid() { mapElem = reflect.New(elemType).Elem() } else { mapElem.Set(reflect.Zero(elemType)) } subv = mapElem } else { var f *field if i, ok := fields.nameIndex[string(key)]; ok { // Found an exact name match. f = &fields.list[i] } else { // Fall back to the expensive case-insensitive // linear search. for i := range fields.list { ff := &fields.list[i] if ff.equalFold(ff.nameBytes, key) { f = ff break } } } if f != nil { subv = v destring = f.quoted for _, i := range f.index { if subv.Kind() == reflect.Ptr { if subv.IsNil() { // If a struct embeds a pointer to an unexported type, // it is not possible to set a newly allocated value // since the field is unexported. // // See https://golang.org/issue/21357 if !subv.CanSet() { d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem())) // Invalidate subv to ensure d.value(subv) skips over // the JSON value without assigning it to subv. subv = reflect.Value{} destring = false break } subv.Set(reflect.New(subv.Type().Elem())) } subv = subv.Elem() } subv = subv.Field(i) } if d.errorContext == nil { d.errorContext = new(errorContext) } d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) d.errorContext.Struct = t } else if d.disallowUnknownFields { d.saveError(fmt.Errorf("json: unknown field %q", key)) } } // Read : before value. if d.opcode == scanSkipSpace { d.scanWhile(scanSkipSpace) } if d.opcode != scanObjectKey { panic(phasePanicMsg) } d.scanWhile(scanSkipSpace) if destring { switch qv := d.valueQuoted().(type) { case nil: if err := d.literalStore(nullLiteral, subv, false); err != nil { return err } case string: if err := d.literalStore([]byte(qv), subv, true); err != nil { return err } default: d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) } } else { if err := d.value(subv); err != nil { return err } } // Write value back to map; // if using struct, subv points into struct already. if v.Kind() == reflect.Map { kt := t.Key() var kv reflect.Value switch { case reflect.PtrTo(kt).Implements(textUnmarshalerType): kv = reflect.New(kt) if err := d.literalStore(item, kv, true); err != nil { return err } kv = kv.Elem() case kt.Kind() == reflect.String: kv = reflect.ValueOf(key).Convert(kt) default: switch kt.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: s := string(key) n, err := strconv.ParseInt(s, 10, 64) if err != nil || reflect.Zero(kt).OverflowInt(n) { d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) break } kv = reflect.ValueOf(n).Convert(kt) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: s := string(key) n, err := strconv.ParseUint(s, 10, 64) if err != nil || reflect.Zero(kt).OverflowUint(n) { d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) break } kv = reflect.ValueOf(n).Convert(kt) default: panic("json: Unexpected key type") // should never occur } } if kv.IsValid() { if !d.isDiscriminatorSet() || kv.String() != d.discriminatorTypeFieldName { v.SetMapIndex(kv, subv) } } } // Next token must be , or }. if d.opcode == scanSkipSpace { d.scanWhile(scanSkipSpace) } if d.errorContext != nil { // Reset errorContext to its original state. // Keep the same underlying array for FieldStack, to reuse the // space and avoid unnecessary allocs. d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] d.errorContext.Struct = origErrorContext.Struct } if d.opcode == scanEndObject { break } if d.opcode != scanObjectValue { panic(phasePanicMsg) } } return nil } // convertNumber converts the number literal s to a float64 or a Number // depending on the setting of d.useNumber. func (d *decodeState) convertNumber(s string) (interface{}, error) { if d.useNumber { return Number(s), nil } f, err := strconv.ParseFloat(s, 64) if err != nil { return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)} } return f, nil } var numberType = reflect.TypeOf(Number("")) // literalStore decodes a literal stored in item into v. // // fromQuoted indicates whether this literal came from unwrapping a // string from the ",string" struct tag option. this is used only to // produce more helpful error messages. func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error { // Check for unmarshaler. if len(item) == 0 { //Empty string given d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) return nil } isNull := item[0] == 'n' // null u, ut, pv := indirect(v, isNull) if u != nil { return u.UnmarshalJSON(item) } if ut != nil { if item[0] != '"' { if fromQuoted { d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) return nil } val := "number" switch item[0] { case 'n': val = "null" case 't', 'f': val = "bool" } d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())}) return nil } s, ok := unquoteBytes(item) if !ok { if fromQuoted { return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) } panic(phasePanicMsg) } return ut.UnmarshalText(s) } v = pv switch c := item[0]; c { case 'n': // null // The main parser checks that only true and false can reach here, // but if this was a quoted string input, it could be anything. if fromQuoted && string(item) != "null" { d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) break } switch v.Kind() { case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: v.Set(reflect.Zero(v.Type())) // otherwise, ignore null for primitives/string } case 't', 'f': // true, false value := item[0] == 't' // The main parser checks that only true and false can reach here, // but if this was a quoted string input, it could be anything. if fromQuoted && string(item) != "true" && string(item) != "false" { d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) break } switch v.Kind() { default: if fromQuoted { d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) } else { d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) } case reflect.Bool: v.SetBool(value) case reflect.Interface: if v.NumMethod() == 0 { v.Set(reflect.ValueOf(value)) } else { d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) } } case '"': // string s, ok := unquoteBytes(item) if !ok { if fromQuoted { return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) } panic(phasePanicMsg) } switch v.Kind() { default: d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) case reflect.Slice: if v.Type().Elem().Kind() != reflect.Uint8 { d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) break } b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) n, err := base64.StdEncoding.Decode(b, s) if err != nil { d.saveError(err) break } v.SetBytes(b[:n]) case reflect.String: if v.Type() == numberType && !isValidNumber(string(s)) { return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item) } v.SetString(string(s)) case reflect.Interface: if v.NumMethod() == 0 { v.Set(reflect.ValueOf(string(s))) } else { d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) } } default: // number if c != '-' && (c < '0' || c > '9') { if fromQuoted { return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) } panic(phasePanicMsg) } s := string(item) switch v.Kind() { default: if v.Kind() == reflect.String && v.Type() == numberType { // s must be a valid number, because it's // already been tokenized. v.SetString(s) break } if fromQuoted { return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) } d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) case reflect.Interface: n, err := d.convertNumber(s) if err != nil { d.saveError(err) break } if v.NumMethod() != 0 { d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) break } v.Set(reflect.ValueOf(n)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: n, err := strconv.ParseInt(s, 10, 64) if err != nil || v.OverflowInt(n) { d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) break } v.SetInt(n) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: n, err := strconv.ParseUint(s, 10, 64) if err != nil || v.OverflowUint(n) { d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) break } v.SetUint(n) case reflect.Float32, reflect.Float64: n, err := strconv.ParseFloat(s, v.Type().Bits()) if err != nil || v.OverflowFloat(n) { d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) break } v.SetFloat(n) } } return nil } // The xxxInterface routines build up a value to be stored // in an empty interface. They are not strictly necessary, // but they avoid the weight of reflection in this common case. // valueInterface is like value but returns interface{} func (d *decodeState) valueInterface() (val interface{}) { switch d.opcode { default: panic(phasePanicMsg) case scanBeginArray: val = d.arrayInterface() d.scanNext() case scanBeginObject: val = d.objectInterface() d.scanNext() case scanBeginLiteral: val = d.literalInterface() } return } // arrayInterface is like array but returns []interface{}. func (d *decodeState) arrayInterface() []interface{} { var v = make([]interface{}, 0) for { // Look ahead for ] - can only happen on first iteration. d.scanWhile(scanSkipSpace) if d.opcode == scanEndArray { break } v = append(v, d.valueInterface()) // Next token must be , or ]. if d.opcode == scanSkipSpace { d.scanWhile(scanSkipSpace) } if d.opcode == scanEndArray { break } if d.opcode != scanArrayValue { panic(phasePanicMsg) } } return v } // objectInterface is like object but returns map[string]interface{}. func (d *decodeState) objectInterface() map[string]interface{} { m := make(map[string]interface{}) for { // Read opening " of string key or closing }. d.scanWhile(scanSkipSpace) if d.opcode == scanEndObject { // closing } - can only happen on first iteration. break } if d.opcode != scanBeginLiteral { panic(phasePanicMsg) } // Read string key. start := d.readIndex() d.rescanLiteral() item := d.data[start:d.readIndex()] key, ok := unquote(item) if !ok { panic(phasePanicMsg) } // Read : before value. if d.opcode == scanSkipSpace { d.scanWhile(scanSkipSpace) } if d.opcode != scanObjectKey { panic(phasePanicMsg) } d.scanWhile(scanSkipSpace) // Read value. m[key] = d.valueInterface() // Next token must be , or }. if d.opcode == scanSkipSpace {
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/progress/prefix.go
vendor/github.com/vmware/govmomi/vim25/progress/prefix.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package progress import "fmt" type prefixedReport struct { Report prefix string } func (r prefixedReport) Detail() string { if d := r.Report.Detail(); d != "" { return fmt.Sprintf("%s: %s", r.prefix, d) } return r.prefix } func prefixLoop(upstream <-chan Report, downstream chan<- Report, prefix string) { defer close(downstream) for r := range upstream { downstream <- prefixedReport{ Report: r, prefix: prefix, } } } func Prefix(s Sinker, prefix string) Sinker { fn := func() chan<- Report { upstream := make(chan Report) downstream := s.Sink() go prefixLoop(upstream, downstream, prefix) return upstream } return SinkFunc(fn) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/progress/tee.go
vendor/github.com/vmware/govmomi/vim25/progress/tee.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package progress // Tee works like Unix tee; it forwards all progress reports it receives to the // specified sinks func Tee(s1, s2 Sinker) Sinker { fn := func() chan<- Report { d1 := s1.Sink() d2 := s2.Sink() u := make(chan Report) go tee(u, d1, d2) return u } return SinkFunc(fn) } func tee(u <-chan Report, d1, d2 chan<- Report) { defer close(d1) defer close(d2) for r := range u { d1 <- r d2 <- r } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/progress/reader.go
vendor/github.com/vmware/govmomi/vim25/progress/reader.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package progress import ( "container/list" "context" "fmt" "io" "sync/atomic" "time" ) type readerReport struct { pos int64 // Keep first to ensure 64-bit alignment size int64 // Keep first to ensure 64-bit alignment bps *uint64 // Keep first to ensure 64-bit alignment t time.Time err error } func (r readerReport) Percentage() float32 { if r.size <= 0 { return 0 } return 100.0 * float32(r.pos) / float32(r.size) } func (r readerReport) Detail() string { const ( KiB = 1024 MiB = 1024 * KiB GiB = 1024 * MiB ) // Use the reader's bps field, so this report returns an up-to-date number. // // For example: if there hasn't been progress for the last 5 seconds, the // most recent report should return "0B/s". // bps := atomic.LoadUint64(r.bps) switch { case bps >= GiB: return fmt.Sprintf("%.1fGiB/s", float32(bps)/float32(GiB)) case bps >= MiB: return fmt.Sprintf("%.1fMiB/s", float32(bps)/float32(MiB)) case bps >= KiB: return fmt.Sprintf("%.1fKiB/s", float32(bps)/float32(KiB)) default: return fmt.Sprintf("%dB/s", bps) } } func (p readerReport) Error() error { return p.err } // reader wraps an io.Reader and sends a progress report over a channel for // every read it handles. type reader struct { r io.Reader pos int64 size int64 bps uint64 ch chan<- Report ctx context.Context } func NewReader(ctx context.Context, s Sinker, r io.Reader, size int64) *reader { pr := reader{ r: r, ctx: ctx, size: size, } // Reports must be sent downstream and to the bps computation loop. pr.ch = Tee(s, newBpsLoop(&pr.bps)).Sink() return &pr } // Read calls the Read function on the underlying io.Reader. Additionally, // every read causes a progress report to be sent to the progress reader's // underlying channel. func (r *reader) Read(b []byte) (int, error) { n, err := r.r.Read(b) r.pos += int64(n) if err != nil && err != io.EOF { return n, err } q := readerReport{ t: time.Now(), pos: r.pos, size: r.size, bps: &r.bps, } select { case r.ch <- q: case <-r.ctx.Done(): } return n, err } // Done marks the progress reader as done, optionally including an error in the // progress report. After sending it, the underlying channel is closed. func (r *reader) Done(err error) { q := readerReport{ t: time.Now(), pos: r.pos, size: r.size, bps: &r.bps, err: err, } select { case r.ch <- q: close(r.ch) case <-r.ctx.Done(): } } // newBpsLoop returns a sink that monitors and stores throughput. func newBpsLoop(dst *uint64) SinkFunc { fn := func() chan<- Report { sink := make(chan Report) go bpsLoop(sink, dst) return sink } return fn } func bpsLoop(ch <-chan Report, dst *uint64) { l := list.New() for { var tch <-chan time.Time // Setup timer for front of list to become stale. if e := l.Front(); e != nil { dt := time.Second - time.Since(e.Value.(readerReport).t) tch = time.After(dt) } select { case q, ok := <-ch: if !ok { return } l.PushBack(q) case <-tch: l.Remove(l.Front()) } // Compute new bps if l.Len() == 0 { atomic.StoreUint64(dst, 0) } else { f := l.Front().Value.(readerReport) b := l.Back().Value.(readerReport) atomic.StoreUint64(dst, uint64(b.pos-f.pos)) } } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/progress/aggregator.go
vendor/github.com/vmware/govmomi/vim25/progress/aggregator.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package progress import "sync" type Aggregator struct { downstream Sinker upstream chan (<-chan Report) done chan struct{} w sync.WaitGroup } func NewAggregator(s Sinker) *Aggregator { a := &Aggregator{ downstream: s, upstream: make(chan (<-chan Report)), done: make(chan struct{}), } a.w.Add(1) go a.loop() return a } func (a *Aggregator) loop() { defer a.w.Done() dch := a.downstream.Sink() defer close(dch) for { select { case uch := <-a.upstream: // Drain upstream channel for e := range uch { dch <- e } case <-a.done: return } } } func (a *Aggregator) Sink() chan<- Report { ch := make(chan Report) a.upstream <- ch return ch } // Done marks the aggregator as done. No more calls to Sink() may be made and // the downstream progress report channel will be closed when Done() returns. func (a *Aggregator) Done() { close(a.done) a.w.Wait() }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/vmware/govmomi/vim25/progress/sinker.go
vendor/github.com/vmware/govmomi/vim25/progress/sinker.go
// © Broadcom. All Rights Reserved. // The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. // SPDX-License-Identifier: Apache-2.0 package progress // Sinker defines what is expected of a type that can act as a sink for // progress reports. The semantics are as follows. If you call Sink(), you are // responsible for closing the returned channel. Closing this channel means // that the related task is done, or resulted in error. type Sinker interface { Sink() chan<- Report } // SinkFunc defines a function that returns a progress report channel. type SinkFunc func() chan<- Report // Sink makes the SinkFunc implement the Sinker interface. func (fn SinkFunc) Sink() chan<- Report { return fn() }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false