repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/types_windows_amd64.go | cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/types_windows_amd64.go | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package windows
type WSAData struct {
Version uint16
HighVersion uint16
MaxSockets uint16
MaxUdpDg uint16
VendorInfo *byte
Description [WSADESCRIPTION_LEN + 1]byte
SystemStatus [WSASYS_STATUS_LEN + 1]byte
}
type Servent struct {
Name *byte
Aliases **byte
Proto *byte
Port uint16
}
type JOBOBJECT_BASIC_LIMIT_INFORMATION struct {
PerProcessUserTimeLimit int64
PerJobUserTimeLimit int64
LimitFlags uint32
MinimumWorkingSetSize uintptr
MaximumWorkingSetSize uintptr
ActiveProcessLimit uint32
Affinity uintptr
PriorityClass uint32
SchedulingClass uint32
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/race.go | cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/race.go | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build windows && race
package windows
import (
"runtime"
"unsafe"
)
const raceenabled = true
func raceAcquire(addr unsafe.Pointer) {
runtime.RaceAcquire(addr)
}
func raceReleaseMerge(addr unsafe.Pointer) {
runtime.RaceReleaseMerge(addr)
}
func raceReadRange(addr unsafe.Pointer, len int) {
runtime.RaceReadRange(addr, len)
}
func raceWriteRange(addr unsafe.Pointer, len int) {
runtime.RaceWriteRange(addr, len)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/str.go | cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/str.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build windows
package windows
func itoa(val int) string { // do it here rather than with fmt to avoid dependency
if val < 0 {
return "-" + itoa(-val)
}
var buf [32]byte // big enough for int64
i := len(buf) - 1
for val >= 10 {
buf[i] = byte(val%10 + '0')
i--
val /= 10
}
buf[i] = byte(val + '0')
return string(buf[i:])
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/types_windows_arm.go | cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/types_windows_arm.go | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package windows
type WSAData struct {
Version uint16
HighVersion uint16
Description [WSADESCRIPTION_LEN + 1]byte
SystemStatus [WSASYS_STATUS_LEN + 1]byte
MaxSockets uint16
MaxUdpDg uint16
VendorInfo *byte
}
type Servent struct {
Name *byte
Aliases **byte
Port uint16
Proto *byte
}
type JOBOBJECT_BASIC_LIMIT_INFORMATION struct {
PerProcessUserTimeLimit int64
PerJobUserTimeLimit int64
LimitFlags uint32
MinimumWorkingSetSize uintptr
MaximumWorkingSetSize uintptr
ActiveProcessLimit uint32
Affinity uintptr
PriorityClass uint32
SchedulingClass uint32
_ uint32 // pad to 8 byte boundary
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go | cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go | // Code generated by 'go generate'; DO NOT EDIT.
package registry
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW")
procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW")
procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW")
procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW")
procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW")
procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW")
procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW")
procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW")
)
func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) {
r0, _, _ := syscall.SyscallN(procRegConnectRegistryW.Addr(), uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
return
}
func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) {
r0, _, _ := syscall.SyscallN(procRegCreateKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
return
}
func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) {
r0, _, _ := syscall.SyscallN(procRegDeleteKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
return
}
func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) {
r0, _, _ := syscall.SyscallN(procRegDeleteValueW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
return
}
func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
r0, _, _ := syscall.SyscallN(procRegEnumValueW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
return
}
func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) {
r0, _, _ := syscall.SyscallN(procRegLoadMUIStringW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
return
}
func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) {
r0, _, _ := syscall.SyscallN(procRegSetValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
return
}
func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
}
return
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/registry/mksyscall.go | cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/registry/mksyscall.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build generate
package registry
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/registry/syscall.go | cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/registry/syscall.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build windows
package registry
import "syscall"
const (
_REG_OPTION_NON_VOLATILE = 0
_REG_CREATED_NEW_KEY = 1
_REG_OPENED_EXISTING_KEY = 2
_ERROR_NO_MORE_ITEMS syscall.Errno = 259
)
func LoadRegLoadMUIString() error {
return procRegLoadMUIStringW.Find()
}
//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW
//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW
//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW
//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW
//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW
//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW
//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW
//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/registry/value.go | cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/registry/value.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build windows
package registry
import (
"errors"
"io"
"syscall"
"unicode/utf16"
"unsafe"
)
const (
// Registry value types.
NONE = 0
SZ = 1
EXPAND_SZ = 2
BINARY = 3
DWORD = 4
DWORD_BIG_ENDIAN = 5
LINK = 6
MULTI_SZ = 7
RESOURCE_LIST = 8
FULL_RESOURCE_DESCRIPTOR = 9
RESOURCE_REQUIREMENTS_LIST = 10
QWORD = 11
)
var (
// ErrShortBuffer is returned when the buffer was too short for the operation.
ErrShortBuffer = syscall.ERROR_MORE_DATA
// ErrNotExist is returned when a registry key or value does not exist.
ErrNotExist = syscall.ERROR_FILE_NOT_FOUND
// ErrUnexpectedType is returned by Get*Value when the value's type was unexpected.
ErrUnexpectedType = errors.New("unexpected key value type")
)
// GetValue retrieves the type and data for the specified value associated
// with an open key k. It fills up buffer buf and returns the retrieved
// byte count n. If buf is too small to fit the stored value it returns
// ErrShortBuffer error along with the required buffer size n.
// If no buffer is provided, it returns true and actual buffer size n.
// If no buffer is provided, GetValue returns the value's type only.
// If the value does not exist, the error returned is ErrNotExist.
//
// GetValue is a low level function. If value's type is known, use the appropriate
// Get*Value function instead.
func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) {
pname, err := syscall.UTF16PtrFromString(name)
if err != nil {
return 0, 0, err
}
var pbuf *byte
if len(buf) > 0 {
pbuf = (*byte)(unsafe.Pointer(&buf[0]))
}
l := uint32(len(buf))
err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l)
if err != nil {
return int(l), valtype, err
}
return int(l), valtype, nil
}
func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) {
p, err := syscall.UTF16PtrFromString(name)
if err != nil {
return nil, 0, err
}
var t uint32
n := uint32(len(buf))
for {
err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n)
if err == nil {
return buf[:n], t, nil
}
if err != syscall.ERROR_MORE_DATA {
return nil, 0, err
}
if n <= uint32(len(buf)) {
return nil, 0, err
}
buf = make([]byte, n)
}
}
// GetStringValue retrieves the string value for the specified
// value name associated with an open key k. It also returns the value's type.
// If value does not exist, GetStringValue returns ErrNotExist.
// If value is not SZ or EXPAND_SZ, it will return the correct value
// type and ErrUnexpectedType.
func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) {
data, typ, err2 := k.getValue(name, make([]byte, 64))
if err2 != nil {
return "", typ, err2
}
switch typ {
case SZ, EXPAND_SZ:
default:
return "", typ, ErrUnexpectedType
}
if len(data) == 0 {
return "", typ, nil
}
u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
return syscall.UTF16ToString(u), typ, nil
}
// GetMUIStringValue retrieves the localized string value for
// the specified value name associated with an open key k.
// If the value name doesn't exist or the localized string value
// can't be resolved, GetMUIStringValue returns ErrNotExist.
// GetMUIStringValue panics if the system doesn't support
// regLoadMUIString; use LoadRegLoadMUIString to check if
// regLoadMUIString is supported before calling this function.
func (k Key) GetMUIStringValue(name string) (string, error) {
pname, err := syscall.UTF16PtrFromString(name)
if err != nil {
return "", err
}
buf := make([]uint16, 1024)
var buflen uint32
var pdir *uint16
err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path
// Try to resolve the string value using the system directory as
// a DLL search path; this assumes the string value is of the form
// @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320.
// This approach works with tzres.dll but may have to be revised
// in the future to allow callers to provide custom search paths.
var s string
s, err = ExpandString("%SystemRoot%\\system32\\")
if err != nil {
return "", err
}
pdir, err = syscall.UTF16PtrFromString(s)
if err != nil {
return "", err
}
err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
}
for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed
if buflen <= uint32(len(buf)) {
break // Buffer not growing, assume race; break
}
buf = make([]uint16, buflen)
err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
}
if err != nil {
return "", err
}
return syscall.UTF16ToString(buf), nil
}
// ExpandString expands environment-variable strings and replaces
// them with the values defined for the current user.
// Use ExpandString to expand EXPAND_SZ strings.
func ExpandString(value string) (string, error) {
if value == "" {
return "", nil
}
p, err := syscall.UTF16PtrFromString(value)
if err != nil {
return "", err
}
r := make([]uint16, 100)
for {
n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r)))
if err != nil {
return "", err
}
if n <= uint32(len(r)) {
return syscall.UTF16ToString(r[:n]), nil
}
r = make([]uint16, n)
}
}
// GetStringsValue retrieves the []string value for the specified
// value name associated with an open key k. It also returns the value's type.
// If value does not exist, GetStringsValue returns ErrNotExist.
// If value is not MULTI_SZ, it will return the correct value
// type and ErrUnexpectedType.
func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) {
data, typ, err2 := k.getValue(name, make([]byte, 64))
if err2 != nil {
return nil, typ, err2
}
if typ != MULTI_SZ {
return nil, typ, ErrUnexpectedType
}
if len(data) == 0 {
return nil, typ, nil
}
p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
if len(p) == 0 {
return nil, typ, nil
}
if p[len(p)-1] == 0 {
p = p[:len(p)-1] // remove terminating null
}
val = make([]string, 0, 5)
from := 0
for i, c := range p {
if c == 0 {
val = append(val, string(utf16.Decode(p[from:i])))
from = i + 1
}
}
return val, typ, nil
}
// GetIntegerValue retrieves the integer value for the specified
// value name associated with an open key k. It also returns the value's type.
// If value does not exist, GetIntegerValue returns ErrNotExist.
// If value is not DWORD or QWORD, it will return the correct value
// type and ErrUnexpectedType.
func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) {
data, typ, err2 := k.getValue(name, make([]byte, 8))
if err2 != nil {
return 0, typ, err2
}
switch typ {
case DWORD:
if len(data) != 4 {
return 0, typ, errors.New("DWORD value is not 4 bytes long")
}
var val32 uint32
copy((*[4]byte)(unsafe.Pointer(&val32))[:], data)
return uint64(val32), DWORD, nil
case QWORD:
if len(data) != 8 {
return 0, typ, errors.New("QWORD value is not 8 bytes long")
}
copy((*[8]byte)(unsafe.Pointer(&val))[:], data)
return val, QWORD, nil
default:
return 0, typ, ErrUnexpectedType
}
}
// GetBinaryValue retrieves the binary value for the specified
// value name associated with an open key k. It also returns the value's type.
// If value does not exist, GetBinaryValue returns ErrNotExist.
// If value is not BINARY, it will return the correct value
// type and ErrUnexpectedType.
func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) {
data, typ, err2 := k.getValue(name, make([]byte, 64))
if err2 != nil {
return nil, typ, err2
}
if typ != BINARY {
return nil, typ, ErrUnexpectedType
}
return data, typ, nil
}
func (k Key) setValue(name string, valtype uint32, data []byte) error {
p, err := syscall.UTF16PtrFromString(name)
if err != nil {
return err
}
if len(data) == 0 {
return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0)
}
return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data)))
}
// SetDWordValue sets the data and type of a name value
// under key k to value and DWORD.
func (k Key) SetDWordValue(name string, value uint32) error {
return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:])
}
// SetQWordValue sets the data and type of a name value
// under key k to value and QWORD.
func (k Key) SetQWordValue(name string, value uint64) error {
return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:])
}
func (k Key) setStringValue(name string, valtype uint32, value string) error {
v, err := syscall.UTF16FromString(value)
if err != nil {
return err
}
buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
return k.setValue(name, valtype, buf)
}
// SetStringValue sets the data and type of a name value
// under key k to value and SZ. The value must not contain a zero byte.
func (k Key) SetStringValue(name, value string) error {
return k.setStringValue(name, SZ, value)
}
// SetExpandStringValue sets the data and type of a name value
// under key k to value and EXPAND_SZ. The value must not contain a zero byte.
func (k Key) SetExpandStringValue(name, value string) error {
return k.setStringValue(name, EXPAND_SZ, value)
}
// SetStringsValue sets the data and type of a name value
// under key k to value and MULTI_SZ. The value strings
// must not contain a zero byte.
func (k Key) SetStringsValue(name string, value []string) error {
ss := ""
for _, s := range value {
for i := 0; i < len(s); i++ {
if s[i] == 0 {
return errors.New("string cannot have 0 inside")
}
}
ss += s + "\x00"
}
v := utf16.Encode([]rune(ss + "\x00"))
buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
return k.setValue(name, MULTI_SZ, buf)
}
// SetBinaryValue sets the data and type of a name value
// under key k to value and BINARY.
func (k Key) SetBinaryValue(name string, value []byte) error {
return k.setValue(name, BINARY, value)
}
// DeleteValue removes a named value from the key k.
func (k Key) DeleteValue(name string) error {
namePointer, err := syscall.UTF16PtrFromString(name)
if err != nil {
return err
}
return regDeleteValue(syscall.Handle(k), namePointer)
}
// ReadValueNames returns the value names of key k.
// The parameter n controls the number of returned names,
// analogous to the way os.File.Readdirnames works.
func (k Key) ReadValueNames(n int) ([]string, error) {
ki, err := k.Stat()
if err != nil {
return nil, err
}
names := make([]string, 0, ki.ValueCount)
buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character
loopItems:
for i := uint32(0); ; i++ {
if n > 0 {
if len(names) == n {
return names, nil
}
}
l := uint32(len(buf))
for {
err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
if err == nil {
break
}
if err == syscall.ERROR_MORE_DATA {
// Double buffer size and try again.
l = uint32(2 * len(buf))
buf = make([]uint16, l)
continue
}
if err == _ERROR_NO_MORE_ITEMS {
break loopItems
}
return names, err
}
names = append(names, syscall.UTF16ToString(buf[:l]))
}
if n > len(names) {
return names, io.EOF
}
return names, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/registry/key.go | cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sys/windows/registry/key.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build windows
// Package registry provides access to the Windows registry.
//
// Here is a simple example, opening a registry key and reading a string value from it.
//
// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
// if err != nil {
// log.Fatal(err)
// }
// defer k.Close()
//
// s, _, err := k.GetStringValue("SystemRoot")
// if err != nil {
// log.Fatal(err)
// }
// fmt.Printf("Windows system root is %q\n", s)
package registry
import (
"io"
"runtime"
"syscall"
"time"
)
const (
// Registry key security and access rights.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx
// for details.
ALL_ACCESS = 0xf003f
CREATE_LINK = 0x00020
CREATE_SUB_KEY = 0x00004
ENUMERATE_SUB_KEYS = 0x00008
EXECUTE = 0x20019
NOTIFY = 0x00010
QUERY_VALUE = 0x00001
READ = 0x20019
SET_VALUE = 0x00002
WOW64_32KEY = 0x00200
WOW64_64KEY = 0x00100
WRITE = 0x20006
)
// Key is a handle to an open Windows registry key.
// Keys can be obtained by calling OpenKey; there are
// also some predefined root keys such as CURRENT_USER.
// Keys can be used directly in the Windows API.
type Key syscall.Handle
const (
// Windows defines some predefined root keys that are always open.
// An application can use these keys as entry points to the registry.
// Normally these keys are used in OpenKey to open new keys,
// but they can also be used anywhere a Key is required.
CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT)
CURRENT_USER = Key(syscall.HKEY_CURRENT_USER)
LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE)
USERS = Key(syscall.HKEY_USERS)
CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG)
PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA)
)
// Close closes open key k.
func (k Key) Close() error {
return syscall.RegCloseKey(syscall.Handle(k))
}
// OpenKey opens a new key with path name relative to key k.
// It accepts any open key, including CURRENT_USER and others,
// and returns the new key and an error.
// The access parameter specifies desired access rights to the
// key to be opened.
func OpenKey(k Key, path string, access uint32) (Key, error) {
p, err := syscall.UTF16PtrFromString(path)
if err != nil {
return 0, err
}
var subkey syscall.Handle
err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey)
if err != nil {
return 0, err
}
return Key(subkey), nil
}
// OpenRemoteKey opens a predefined registry key on another
// computer pcname. The key to be opened is specified by k, but
// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS.
// If pcname is "", OpenRemoteKey returns local computer key.
func OpenRemoteKey(pcname string, k Key) (Key, error) {
var err error
var p *uint16
if pcname != "" {
p, err = syscall.UTF16PtrFromString(`\\` + pcname)
if err != nil {
return 0, err
}
}
var remoteKey syscall.Handle
err = regConnectRegistry(p, syscall.Handle(k), &remoteKey)
if err != nil {
return 0, err
}
return Key(remoteKey), nil
}
// ReadSubKeyNames returns the names of subkeys of key k.
// The parameter n controls the number of returned names,
// analogous to the way os.File.Readdirnames works.
func (k Key) ReadSubKeyNames(n int) ([]string, error) {
// RegEnumKeyEx must be called repeatedly and to completion.
// During this time, this goroutine cannot migrate away from
// its current thread. See https://golang.org/issue/49320 and
// https://golang.org/issue/49466.
runtime.LockOSThread()
defer runtime.UnlockOSThread()
names := make([]string, 0)
// Registry key size limit is 255 bytes and described there:
// https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx
buf := make([]uint16, 256) //plus extra room for terminating zero byte
loopItems:
for i := uint32(0); ; i++ {
if n > 0 {
if len(names) == n {
return names, nil
}
}
l := uint32(len(buf))
for {
err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
if err == nil {
break
}
if err == syscall.ERROR_MORE_DATA {
// Double buffer size and try again.
l = uint32(2 * len(buf))
buf = make([]uint16, l)
continue
}
if err == _ERROR_NO_MORE_ITEMS {
break loopItems
}
return names, err
}
names = append(names, syscall.UTF16ToString(buf[:l]))
}
if n > len(names) {
return names, io.EOF
}
return names, nil
}
// CreateKey creates a key named path under open key k.
// CreateKey returns the new key and a boolean flag that reports
// whether the key already existed.
// The access parameter specifies the access rights for the key
// to be created.
func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) {
var h syscall.Handle
var d uint32
var pathPointer *uint16
pathPointer, err = syscall.UTF16PtrFromString(path)
if err != nil {
return 0, false, err
}
err = regCreateKeyEx(syscall.Handle(k), pathPointer,
0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d)
if err != nil {
return 0, false, err
}
return Key(h), d == _REG_OPENED_EXISTING_KEY, nil
}
// DeleteKey deletes the subkey path of key k and its values.
func DeleteKey(k Key, path string) error {
pathPointer, err := syscall.UTF16PtrFromString(path)
if err != nil {
return err
}
return regDeleteKey(syscall.Handle(k), pathPointer)
}
// A KeyInfo describes the statistics of a key. It is returned by Stat.
type KeyInfo struct {
SubKeyCount uint32
MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte
ValueCount uint32
MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte
MaxValueLen uint32 // longest data component among the key's values, in bytes
lastWriteTime syscall.Filetime
}
// ModTime returns the key's last write time.
func (ki *KeyInfo) ModTime() time.Time {
return time.Unix(0, ki.lastWriteTime.Nanoseconds())
}
// Stat retrieves information about the open key k.
func (k Key) Stat() (*KeyInfo, error) {
var ki KeyInfo
err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil,
&ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount,
&ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime)
if err != nil {
return nil, err
}
return &ki, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sync/errgroup/errgroup.go | cmd/vsphere-xcopy-volume-populator/vendor/golang.org/x/sync/errgroup/errgroup.go | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package errgroup provides synchronization, error propagation, and Context
// cancelation for groups of goroutines working on subtasks of a common task.
//
// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks
// returning errors.
package errgroup
import (
"context"
"fmt"
"sync"
)
type token struct{}
// A Group is a collection of goroutines working on subtasks that are part of
// the same overall task. A Group should not be reused for different tasks.
//
// A zero Group is valid, has no limit on the number of active goroutines,
// and does not cancel on error.
type Group struct {
cancel func(error)
wg sync.WaitGroup
sem chan token
errOnce sync.Once
err error
}
func (g *Group) done() {
if g.sem != nil {
<-g.sem
}
g.wg.Done()
}
// WithContext returns a new Group and an associated Context derived from ctx.
//
// The derived Context is canceled the first time a function passed to Go
// returns a non-nil error or the first time Wait returns, whichever occurs
// first.
func WithContext(ctx context.Context) (*Group, context.Context) {
ctx, cancel := context.WithCancelCause(ctx)
return &Group{cancel: cancel}, ctx
}
// Wait blocks until all function calls from the Go method have returned, then
// returns the first non-nil error (if any) from them.
func (g *Group) Wait() error {
g.wg.Wait()
if g.cancel != nil {
g.cancel(g.err)
}
return g.err
}
// Go calls the given function in a new goroutine.
//
// The first call to Go must happen before a Wait.
// It blocks until the new goroutine can be added without the number of
// goroutines in the group exceeding the configured limit.
//
// The first goroutine in the group that returns a non-nil error will
// cancel the associated Context, if any. The error will be returned
// by Wait.
func (g *Group) Go(f func() error) {
if g.sem != nil {
g.sem <- token{}
}
g.wg.Add(1)
go func() {
defer g.done()
// It is tempting to propagate panics from f()
// up to the goroutine that calls Wait, but
// it creates more problems than it solves:
// - it delays panics arbitrarily,
// making bugs harder to detect;
// - it turns f's panic stack into a mere value,
// hiding it from crash-monitoring tools;
// - it risks deadlocks that hide the panic entirely,
// if f's panic leaves the program in a state
// that prevents the Wait call from being reached.
// See #53757, #74275, #74304, #74306.
if err := f(); err != nil {
g.errOnce.Do(func() {
g.err = err
if g.cancel != nil {
g.cancel(g.err)
}
})
}
}()
}
// TryGo calls the given function in a new goroutine only if the number of
// active goroutines in the group is currently below the configured limit.
//
// The return value reports whether the goroutine was started.
func (g *Group) TryGo(f func() error) bool {
if g.sem != nil {
select {
case g.sem <- token{}:
// Note: this allows barging iff channels in general allow barging.
default:
return false
}
}
g.wg.Add(1)
go func() {
defer g.done()
if err := f(); err != nil {
g.errOnce.Do(func() {
g.err = err
if g.cancel != nil {
g.cancel(g.err)
}
})
}
}()
return true
}
// SetLimit limits the number of active goroutines in this group to at most n.
// A negative value indicates no limit.
// A limit of zero will prevent any new goroutines from being added.
//
// Any subsequent call to the Go method will block until it can add an active
// goroutine without exceeding the configured limit.
//
// The limit must not be modified while any goroutines in the group are active.
func (g *Group) SetLimit(n int) {
if n < 0 {
g.sem = nil
return
}
if len(g.sem) != 0 {
panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem)))
}
g.sem = make(chan token, n)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/fuzz.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/fuzz.go | // +build gofuzz
package plist
import (
"bytes"
)
func Fuzz(data []byte) int {
buf := bytes.NewReader(data)
var obj interface{}
if err := NewDecoder(buf).Decode(&obj); err != nil {
return 0
}
return 1
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/text_generator.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/text_generator.go | package plist
import (
"encoding/hex"
"io"
"strconv"
"time"
)
type textPlistGenerator struct {
writer io.Writer
format int
quotableTable *characterSet
indent string
depth int
dictKvDelimiter, dictEntryDelimiter, arrayDelimiter []byte
}
var (
textPlistTimeLayout = "2006-01-02 15:04:05 -0700"
padding = "0000"
)
func (p *textPlistGenerator) generateDocument(pval cfValue) {
p.writePlistValue(pval)
}
func (p *textPlistGenerator) plistQuotedString(str string) string {
if str == "" {
return `""`
}
s := ""
quot := false
for _, r := range str {
if r > 0xFF {
quot = true
s += `\U`
us := strconv.FormatInt(int64(r), 16)
s += padding[len(us):]
s += us
} else if r > 0x7F {
quot = true
s += `\`
us := strconv.FormatInt(int64(r), 8)
s += padding[1+len(us):]
s += us
} else {
c := uint8(r)
if p.quotableTable.ContainsByte(c) {
quot = true
}
switch c {
case '\a':
s += `\a`
case '\b':
s += `\b`
case '\v':
s += `\v`
case '\f':
s += `\f`
case '\\':
s += `\\`
case '"':
s += `\"`
case '\t', '\r', '\n':
fallthrough
default:
s += string(c)
}
}
}
if quot {
s = `"` + s + `"`
}
return s
}
func (p *textPlistGenerator) deltaIndent(depthDelta int) {
if depthDelta < 0 {
p.depth--
} else if depthDelta > 0 {
p.depth++
}
}
func (p *textPlistGenerator) writeIndent() {
if len(p.indent) == 0 {
return
}
if len(p.indent) > 0 {
p.writer.Write([]byte("\n"))
for i := 0; i < p.depth; i++ {
io.WriteString(p.writer, p.indent)
}
}
}
func (p *textPlistGenerator) writePlistValue(pval cfValue) {
if pval == nil {
return
}
switch pval := pval.(type) {
case *cfDictionary:
pval.sort()
p.writer.Write([]byte(`{`))
p.deltaIndent(1)
for i, k := range pval.keys {
p.writeIndent()
io.WriteString(p.writer, p.plistQuotedString(k))
p.writer.Write(p.dictKvDelimiter)
p.writePlistValue(pval.values[i])
p.writer.Write(p.dictEntryDelimiter)
}
p.deltaIndent(-1)
p.writeIndent()
p.writer.Write([]byte(`}`))
case *cfArray:
p.writer.Write([]byte(`(`))
p.deltaIndent(1)
for _, v := range pval.values {
p.writeIndent()
p.writePlistValue(v)
p.writer.Write(p.arrayDelimiter)
}
p.deltaIndent(-1)
p.writeIndent()
p.writer.Write([]byte(`)`))
case cfString:
io.WriteString(p.writer, p.plistQuotedString(string(pval)))
case *cfNumber:
if p.format == GNUStepFormat {
p.writer.Write([]byte(`<*I`))
}
if pval.signed {
io.WriteString(p.writer, strconv.FormatInt(int64(pval.value), 10))
} else {
io.WriteString(p.writer, strconv.FormatUint(pval.value, 10))
}
if p.format == GNUStepFormat {
p.writer.Write([]byte(`>`))
}
case *cfReal:
if p.format == GNUStepFormat {
p.writer.Write([]byte(`<*R`))
}
// GNUstep does not differentiate between 32/64-bit floats.
io.WriteString(p.writer, strconv.FormatFloat(pval.value, 'g', -1, 64))
if p.format == GNUStepFormat {
p.writer.Write([]byte(`>`))
}
case cfBoolean:
if p.format == GNUStepFormat {
if pval {
p.writer.Write([]byte(`<*BY>`))
} else {
p.writer.Write([]byte(`<*BN>`))
}
} else {
if pval {
p.writer.Write([]byte(`1`))
} else {
p.writer.Write([]byte(`0`))
}
}
case cfData:
var hexencoded [9]byte
var l int
var asc = 9
hexencoded[8] = ' '
p.writer.Write([]byte(`<`))
b := []byte(pval)
for i := 0; i < len(b); i += 4 {
l = i + 4
if l >= len(b) {
l = len(b)
// We no longer need the space - or the rest of the buffer.
// (we used >= above to get this part without another conditional :P)
asc = (l - i) * 2
}
// Fill the buffer (only up to 8 characters, to preserve the space we implicitly include
// at the end of every encode)
hex.Encode(hexencoded[:8], b[i:l])
io.WriteString(p.writer, string(hexencoded[:asc]))
}
p.writer.Write([]byte(`>`))
case cfDate:
if p.format == GNUStepFormat {
p.writer.Write([]byte(`<*D`))
io.WriteString(p.writer, time.Time(pval).In(time.UTC).Format(textPlistTimeLayout))
p.writer.Write([]byte(`>`))
} else {
io.WriteString(p.writer, p.plistQuotedString(time.Time(pval).In(time.UTC).Format(textPlistTimeLayout)))
}
}
}
func (p *textPlistGenerator) Indent(i string) {
p.indent = i
if i == "" {
p.dictKvDelimiter = []byte(`=`)
} else {
// For pretty-printing
p.dictKvDelimiter = []byte(` = `)
}
}
func newTextPlistGenerator(w io.Writer, format int) *textPlistGenerator {
table := &osQuotable
if format == GNUStepFormat {
table = &gsQuotable
}
return &textPlistGenerator{
writer: mustWriter{w},
format: format,
quotableTable: table,
dictKvDelimiter: []byte(`=`),
arrayDelimiter: []byte(`,`),
dictEntryDelimiter: []byte(`;`),
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/xml_generator.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/xml_generator.go | package plist
import (
"bufio"
"encoding/base64"
"encoding/xml"
"io"
"math"
"strconv"
"time"
)
const (
xmlHEADER string = `<?xml version="1.0" encoding="UTF-8"?>` + "\n"
xmlDOCTYPE = `<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">` + "\n"
xmlArrayTag = "array"
xmlDataTag = "data"
xmlDateTag = "date"
xmlDictTag = "dict"
xmlFalseTag = "false"
xmlIntegerTag = "integer"
xmlKeyTag = "key"
xmlPlistTag = "plist"
xmlRealTag = "real"
xmlStringTag = "string"
xmlTrueTag = "true"
// magic value used in the XML encoding of UIDs
// (stored as a dictionary mapping CF$UID->integer)
xmlCFUIDMagic = "CF$UID"
)
func formatXMLFloat(f float64) string {
switch {
case math.IsInf(f, 1):
return "inf"
case math.IsInf(f, -1):
return "-inf"
case math.IsNaN(f):
return "nan"
}
return strconv.FormatFloat(f, 'g', -1, 64)
}
type xmlPlistGenerator struct {
*bufio.Writer
indent string
depth int
putNewline bool
}
func (p *xmlPlistGenerator) generateDocument(root cfValue) {
p.WriteString(xmlHEADER)
p.WriteString(xmlDOCTYPE)
p.openTag(`plist version="1.0"`)
p.writePlistValue(root)
p.closeTag(xmlPlistTag)
p.Flush()
}
func (p *xmlPlistGenerator) openTag(n string) {
p.writeIndent(1)
p.WriteByte('<')
p.WriteString(n)
p.WriteByte('>')
}
func (p *xmlPlistGenerator) closeTag(n string) {
p.writeIndent(-1)
p.WriteString("</")
p.WriteString(n)
p.WriteByte('>')
}
func (p *xmlPlistGenerator) element(n string, v string) {
p.writeIndent(0)
if len(v) == 0 {
p.WriteByte('<')
p.WriteString(n)
p.WriteString("/>")
} else {
p.WriteByte('<')
p.WriteString(n)
p.WriteByte('>')
err := xml.EscapeText(p.Writer, []byte(v))
if err != nil {
panic(err)
}
p.WriteString("</")
p.WriteString(n)
p.WriteByte('>')
}
}
func (p *xmlPlistGenerator) writeDictionary(dict *cfDictionary) {
dict.sort()
p.openTag(xmlDictTag)
for i, k := range dict.keys {
p.element(xmlKeyTag, k)
p.writePlistValue(dict.values[i])
}
p.closeTag(xmlDictTag)
}
func (p *xmlPlistGenerator) writeArray(a *cfArray) {
p.openTag(xmlArrayTag)
for _, v := range a.values {
p.writePlistValue(v)
}
p.closeTag(xmlArrayTag)
}
func (p *xmlPlistGenerator) writePlistValue(pval cfValue) {
if pval == nil {
return
}
switch pval := pval.(type) {
case cfString:
p.element(xmlStringTag, string(pval))
case *cfNumber:
if pval.signed {
p.element(xmlIntegerTag, strconv.FormatInt(int64(pval.value), 10))
} else {
p.element(xmlIntegerTag, strconv.FormatUint(pval.value, 10))
}
case *cfReal:
p.element(xmlRealTag, formatXMLFloat(pval.value))
case cfBoolean:
if bool(pval) {
p.element(xmlTrueTag, "")
} else {
p.element(xmlFalseTag, "")
}
case cfData:
p.element(xmlDataTag, base64.StdEncoding.EncodeToString([]byte(pval)))
case cfDate:
p.element(xmlDateTag, time.Time(pval).In(time.UTC).Format(time.RFC3339))
case *cfDictionary:
p.writeDictionary(pval)
case *cfArray:
p.writeArray(pval)
case cfUID:
p.openTag(xmlDictTag)
p.element(xmlKeyTag, xmlCFUIDMagic)
p.element(xmlIntegerTag, strconv.FormatUint(uint64(pval), 10))
p.closeTag(xmlDictTag)
}
}
func (p *xmlPlistGenerator) writeIndent(delta int) {
if len(p.indent) == 0 {
return
}
if delta < 0 {
p.depth--
}
if p.putNewline {
// from encoding/xml/marshal.go; it seems to be intended
// to suppress the first newline.
p.WriteByte('\n')
} else {
p.putNewline = true
}
for i := 0; i < p.depth; i++ {
p.WriteString(p.indent)
}
if delta > 0 {
p.depth++
}
}
func (p *xmlPlistGenerator) Indent(i string) {
p.indent = i
}
func newXMLPlistGenerator(w io.Writer) *xmlPlistGenerator {
return &xmlPlistGenerator{Writer: bufio.NewWriter(w)}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/typeinfo.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/typeinfo.go | package plist
import (
"reflect"
"strings"
"sync"
)
// typeInfo holds details for the plist representation of a type.
type typeInfo struct {
fields []fieldInfo
}
// fieldInfo holds details for the plist representation of a single field.
type fieldInfo struct {
idx []int
name string
omitEmpty bool
}
var tinfoMap = make(map[reflect.Type]*typeInfo)
var tinfoLock sync.RWMutex
// getTypeInfo returns the typeInfo structure with details necessary
// for marshalling and unmarshalling typ.
func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
tinfoLock.RLock()
tinfo, ok := tinfoMap[typ]
tinfoLock.RUnlock()
if ok {
return tinfo, nil
}
tinfo = &typeInfo{}
if typ.Kind() == reflect.Struct {
n := typ.NumField()
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.PkgPath != "" || f.Tag.Get("plist") == "-" {
continue // Private field
}
// For embedded structs, embed its fields.
if f.Anonymous {
t := f.Type
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() == reflect.Struct {
inner, err := getTypeInfo(t)
if err != nil {
return nil, err
}
for _, finfo := range inner.fields {
finfo.idx = append([]int{i}, finfo.idx...)
if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
return nil, err
}
}
continue
}
}
finfo, err := structFieldInfo(typ, &f)
if err != nil {
return nil, err
}
// Add the field if it doesn't conflict with other fields.
if err := addFieldInfo(typ, tinfo, finfo); err != nil {
return nil, err
}
}
}
tinfoLock.Lock()
tinfoMap[typ] = tinfo
tinfoLock.Unlock()
return tinfo, nil
}
// structFieldInfo builds and returns a fieldInfo for f.
func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
finfo := &fieldInfo{idx: f.Index}
// Split the tag from the xml namespace if necessary.
tag := f.Tag.Get("plist")
// Parse flags.
tokens := strings.Split(tag, ",")
tag = tokens[0]
if len(tokens) > 1 {
tag = tokens[0]
for _, flag := range tokens[1:] {
switch flag {
case "omitempty":
finfo.omitEmpty = true
}
}
}
if tag == "" {
// If the name part of the tag is completely empty,
// use the field name
finfo.name = f.Name
return finfo, nil
}
finfo.name = tag
return finfo, nil
}
// addFieldInfo adds finfo to tinfo.fields if there are no
// conflicts, or if conflicts arise from previous fields that were
// obtained from deeper embedded structures than finfo. In the latter
// case, the conflicting entries are dropped.
// A conflict occurs when the path (parent + name) to a field is
// itself a prefix of another path, or when two paths match exactly.
// It is okay for field paths to share a common, shorter prefix.
func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
var conflicts []int
// First, figure all conflicts. Most working code will have none.
for i := range tinfo.fields {
oldf := &tinfo.fields[i]
if newf.name == oldf.name {
conflicts = append(conflicts, i)
}
}
// Without conflicts, add the new field and return.
if conflicts == nil {
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// If any conflict is shallower, ignore the new field.
// This matches the Go field resolution on embedding.
for _, i := range conflicts {
if len(tinfo.fields[i].idx) < len(newf.idx) {
return nil
}
}
// Otherwise, the new field is shallower, and thus takes precedence,
// so drop the conflicting fields from tinfo and append the new one.
for c := len(conflicts) - 1; c >= 0; c-- {
i := conflicts[c]
copy(tinfo.fields[i:], tinfo.fields[i+1:])
tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
}
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// value returns v's field value corresponding to finfo.
// It's equivalent to v.FieldByIndex(finfo.idx), but initializes
// and dereferences pointers as necessary.
func (finfo *fieldInfo) value(v reflect.Value) reflect.Value {
for i, x := range finfo.idx {
if i > 0 {
t := v.Type()
if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
}
v = v.Field(x)
}
return v
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/plist_types.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/plist_types.go | package plist
import (
"hash/crc32"
"sort"
"time"
)
type cfValue interface {
typeName() string
hash() interface{}
}
type cfDictionary struct {
keys sort.StringSlice
values []cfValue
}
func (*cfDictionary) typeName() string {
return "dictionary"
}
func (p *cfDictionary) hash() interface{} {
return p
}
func (p *cfDictionary) Len() int {
return len(p.keys)
}
func (p *cfDictionary) Less(i, j int) bool {
return p.keys.Less(i, j)
}
func (p *cfDictionary) Swap(i, j int) {
p.keys.Swap(i, j)
p.values[i], p.values[j] = p.values[j], p.values[i]
}
func (p *cfDictionary) sort() {
sort.Sort(p)
}
type cfArray struct {
values []cfValue
}
func (*cfArray) typeName() string {
return "array"
}
func (p *cfArray) hash() interface{} {
return p
}
type cfString string
func (cfString) typeName() string {
return "string"
}
func (p cfString) hash() interface{} {
return string(p)
}
type cfNumber struct {
signed bool
value uint64
}
func (*cfNumber) typeName() string {
return "integer"
}
func (p *cfNumber) hash() interface{} {
if p.signed {
return int64(p.value)
}
return p.value
}
type cfReal struct {
wide bool
value float64
}
func (cfReal) typeName() string {
return "real"
}
func (p *cfReal) hash() interface{} {
if p.wide {
return p.value
}
return float32(p.value)
}
type cfBoolean bool
func (cfBoolean) typeName() string {
return "boolean"
}
func (p cfBoolean) hash() interface{} {
return bool(p)
}
type cfUID UID
func (cfUID) typeName() string {
return "UID"
}
func (p cfUID) hash() interface{} {
return p
}
type cfData []byte
func (cfData) typeName() string {
return "data"
}
func (p cfData) hash() interface{} {
// Data are uniqued by their checksums.
// Todo: Look at calculating this only once and storing it somewhere;
// crc32 is fairly quick, however.
return crc32.ChecksumIEEE([]byte(p))
}
type cfDate time.Time
func (cfDate) typeName() string {
return "date"
}
func (p cfDate) hash() interface{} {
return time.Time(p)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/util.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/util.go | package plist
import "io"
type countedWriter struct {
io.Writer
nbytes int
}
func (w *countedWriter) Write(p []byte) (int, error) {
n, err := w.Writer.Write(p)
w.nbytes += n
return n, err
}
func (w *countedWriter) BytesWritten() int {
return w.nbytes
}
func unsignedGetBase(s string) (string, int) {
if len(s) > 1 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') {
return s[2:], 16
}
return s, 10
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/zerocopy_appengine.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/zerocopy_appengine.go | // +build appengine
package plist
func zeroCopy8BitString(buf []byte, off int, len int) string {
return string(buf[off : off+len])
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/marshal.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/marshal.go | package plist
import (
"encoding"
"reflect"
"time"
)
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
var (
plistMarshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
)
func implementsInterface(val reflect.Value, interfaceType reflect.Type) (interface{}, bool) {
if val.CanInterface() && val.Type().Implements(interfaceType) {
return val.Interface(), true
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(interfaceType) {
return pv.Interface(), true
}
}
return nil, false
}
func (p *Encoder) marshalPlistInterface(marshalable Marshaler) cfValue {
value, err := marshalable.MarshalPlist()
if err != nil {
panic(err)
}
return p.marshal(reflect.ValueOf(value))
}
// marshalTextInterface marshals a TextMarshaler to a plist string.
func (p *Encoder) marshalTextInterface(marshalable encoding.TextMarshaler) cfValue {
s, err := marshalable.MarshalText()
if err != nil {
panic(err)
}
return cfString(s)
}
// marshalStruct marshals a reflected struct value to a plist dictionary
func (p *Encoder) marshalStruct(typ reflect.Type, val reflect.Value) cfValue {
tinfo, _ := getTypeInfo(typ)
dict := &cfDictionary{
keys: make([]string, 0, len(tinfo.fields)),
values: make([]cfValue, 0, len(tinfo.fields)),
}
for _, finfo := range tinfo.fields {
value := finfo.value(val)
if !value.IsValid() || finfo.omitEmpty && isEmptyValue(value) {
continue
}
dict.keys = append(dict.keys, finfo.name)
dict.values = append(dict.values, p.marshal(value))
}
return dict
}
func (p *Encoder) marshalTime(val reflect.Value) cfValue {
time := val.Interface().(time.Time)
return cfDate(time)
}
func (p *Encoder) marshal(val reflect.Value) cfValue {
if !val.IsValid() {
return nil
}
if receiver, can := implementsInterface(val, plistMarshalerType); can {
return p.marshalPlistInterface(receiver.(Marshaler))
}
// time.Time implements TextMarshaler, but we need to store it in RFC3339
if val.Type() == timeType {
return p.marshalTime(val)
}
if val.Kind() == reflect.Ptr || (val.Kind() == reflect.Interface && val.NumMethod() == 0) {
ival := val.Elem()
if ival.IsValid() && ival.Type() == timeType {
return p.marshalTime(ival)
}
}
// Check for text marshaler.
if receiver, can := implementsInterface(val, textMarshalerType); can {
return p.marshalTextInterface(receiver.(encoding.TextMarshaler))
}
// Descend into pointers or interfaces
if val.Kind() == reflect.Ptr || (val.Kind() == reflect.Interface && val.NumMethod() == 0) {
val = val.Elem()
}
// We got this far and still may have an invalid anything or nil ptr/interface
if !val.IsValid() || ((val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface) && val.IsNil()) {
return nil
}
typ := val.Type()
if typ == uidType {
return cfUID(val.Uint())
}
if val.Kind() == reflect.Struct {
return p.marshalStruct(typ, val)
}
switch val.Kind() {
case reflect.String:
return cfString(val.String())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return &cfNumber{signed: true, value: uint64(val.Int())}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return &cfNumber{signed: false, value: val.Uint()}
case reflect.Float32:
return &cfReal{wide: false, value: val.Float()}
case reflect.Float64:
return &cfReal{wide: true, value: val.Float()}
case reflect.Bool:
return cfBoolean(val.Bool())
case reflect.Slice, reflect.Array:
if typ.Elem().Kind() == reflect.Uint8 {
bytes := []byte(nil)
if val.CanAddr() {
bytes = val.Bytes()
} else {
bytes = make([]byte, val.Len())
reflect.Copy(reflect.ValueOf(bytes), val)
}
return cfData(bytes)
} else {
values := make([]cfValue, val.Len())
for i, length := 0, val.Len(); i < length; i++ {
if subpval := p.marshal(val.Index(i)); subpval != nil {
values[i] = subpval
}
}
return &cfArray{values}
}
case reflect.Map:
if typ.Key().Kind() != reflect.String {
panic(&unknownTypeError{typ})
}
l := val.Len()
dict := &cfDictionary{
keys: make([]string, 0, l),
values: make([]cfValue, 0, l),
}
for _, keyv := range val.MapKeys() {
if subpval := p.marshal(val.MapIndex(keyv)); subpval != nil {
dict.keys = append(dict.keys, keyv.String())
dict.values = append(dict.values, subpval)
}
}
return dict
default:
panic(&unknownTypeError{typ})
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/unmarshal.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/unmarshal.go | package plist
import (
"encoding"
"fmt"
"reflect"
"runtime"
"time"
)
type incompatibleDecodeTypeError struct {
dest reflect.Type
src string // type name (from cfValue)
}
func (u *incompatibleDecodeTypeError) Error() string {
return fmt.Sprintf("plist: type mismatch: tried to decode plist type `%v' into value of type `%v'", u.src, u.dest)
}
var (
plistUnmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
uidType = reflect.TypeOf(UID(0))
)
func isEmptyInterface(v reflect.Value) bool {
return v.Kind() == reflect.Interface && v.NumMethod() == 0
}
func (p *Decoder) unmarshalPlistInterface(pval cfValue, unmarshalable Unmarshaler) {
err := unmarshalable.UnmarshalPlist(func(i interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
err = r.(error)
}
}()
p.unmarshal(pval, reflect.ValueOf(i))
return
})
if err != nil {
panic(err)
}
}
func (p *Decoder) unmarshalTextInterface(pval cfString, unmarshalable encoding.TextUnmarshaler) {
err := unmarshalable.UnmarshalText([]byte(pval))
if err != nil {
panic(err)
}
}
func (p *Decoder) unmarshalTime(pval cfDate, val reflect.Value) {
val.Set(reflect.ValueOf(time.Time(pval)))
}
func (p *Decoder) unmarshalLaxString(s string, val reflect.Value) {
switch val.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i := mustParseInt(s, 10, 64)
val.SetInt(i)
return
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
i := mustParseUint(s, 10, 64)
val.SetUint(i)
return
case reflect.Float32, reflect.Float64:
f := mustParseFloat(s, 64)
val.SetFloat(f)
return
case reflect.Bool:
b := mustParseBool(s)
val.SetBool(b)
return
case reflect.Struct:
if val.Type() == timeType {
t, err := time.Parse(textPlistTimeLayout, s)
if err != nil {
panic(err)
}
val.Set(reflect.ValueOf(t.In(time.UTC)))
return
}
fallthrough
default:
panic(&incompatibleDecodeTypeError{val.Type(), "string"})
}
}
func (p *Decoder) unmarshal(pval cfValue, val reflect.Value) {
if pval == nil {
return
}
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
if isEmptyInterface(val) {
v := p.valueInterface(pval)
val.Set(reflect.ValueOf(v))
return
}
incompatibleTypeError := &incompatibleDecodeTypeError{val.Type(), pval.typeName()}
// time.Time implements TextMarshaler, but we need to parse it as RFC3339
if date, ok := pval.(cfDate); ok {
if val.Type() == timeType {
p.unmarshalTime(date, val)
return
}
panic(incompatibleTypeError)
}
if receiver, can := implementsInterface(val, plistUnmarshalerType); can {
p.unmarshalPlistInterface(pval, receiver.(Unmarshaler))
return
}
if val.Type() != timeType {
if receiver, can := implementsInterface(val, textUnmarshalerType); can {
if str, ok := pval.(cfString); ok {
p.unmarshalTextInterface(str, receiver.(encoding.TextUnmarshaler))
} else {
panic(incompatibleTypeError)
}
return
}
}
typ := val.Type()
switch pval := pval.(type) {
case cfString:
if val.Kind() == reflect.String {
val.SetString(string(pval))
return
}
if p.lax {
p.unmarshalLaxString(string(pval), val)
return
}
panic(incompatibleTypeError)
case *cfNumber:
switch val.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val.SetInt(int64(pval.value))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
val.SetUint(pval.value)
default:
panic(incompatibleTypeError)
}
case *cfReal:
if val.Kind() == reflect.Float32 || val.Kind() == reflect.Float64 {
// TODO: Consider warning on a downcast (storing a 64-bit value in a 32-bit reflect)
val.SetFloat(pval.value)
} else {
panic(incompatibleTypeError)
}
case cfBoolean:
if val.Kind() == reflect.Bool {
val.SetBool(bool(pval))
} else {
panic(incompatibleTypeError)
}
case cfData:
if val.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 {
val.SetBytes([]byte(pval))
} else {
panic(incompatibleTypeError)
}
case cfUID:
if val.Type() == uidType {
val.SetUint(uint64(pval))
} else {
switch val.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val.SetInt(int64(pval))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
val.SetUint(uint64(pval))
default:
panic(incompatibleTypeError)
}
}
case *cfArray:
p.unmarshalArray(pval, val)
case *cfDictionary:
p.unmarshalDictionary(pval, val)
}
}
func (p *Decoder) unmarshalArray(a *cfArray, val reflect.Value) {
var n int
if val.Kind() == reflect.Slice {
// Slice of element values.
// Grow slice.
cnt := len(a.values) + val.Len()
if cnt >= val.Cap() {
ncap := 2 * cnt
if ncap < 4 {
ncap = 4
}
new := reflect.MakeSlice(val.Type(), val.Len(), ncap)
reflect.Copy(new, val)
val.Set(new)
}
n = val.Len()
val.SetLen(cnt)
} else if val.Kind() == reflect.Array {
if len(a.values) > val.Cap() {
panic(fmt.Errorf("plist: attempted to unmarshal %d values into an array of size %d", len(a.values), val.Cap()))
}
} else {
panic(&incompatibleDecodeTypeError{val.Type(), a.typeName()})
}
// Recur to read element into slice.
for _, sval := range a.values {
p.unmarshal(sval, val.Index(n))
n++
}
return
}
func (p *Decoder) unmarshalDictionary(dict *cfDictionary, val reflect.Value) {
typ := val.Type()
switch val.Kind() {
case reflect.Struct:
tinfo, err := getTypeInfo(typ)
if err != nil {
panic(err)
}
entries := make(map[string]cfValue, len(dict.keys))
for i, k := range dict.keys {
sval := dict.values[i]
entries[k] = sval
}
for _, finfo := range tinfo.fields {
p.unmarshal(entries[finfo.name], finfo.value(val))
}
case reflect.Map:
if val.IsNil() {
val.Set(reflect.MakeMap(typ))
}
for i, k := range dict.keys {
sval := dict.values[i]
keyv := reflect.ValueOf(k).Convert(typ.Key())
mapElem := reflect.New(typ.Elem()).Elem()
p.unmarshal(sval, mapElem)
val.SetMapIndex(keyv, mapElem)
}
default:
panic(&incompatibleDecodeTypeError{typ, dict.typeName()})
}
}
/* *Interface is modelled after encoding/json */
func (p *Decoder) valueInterface(pval cfValue) interface{} {
switch pval := pval.(type) {
case cfString:
return string(pval)
case *cfNumber:
if pval.signed {
return int64(pval.value)
}
return pval.value
case *cfReal:
if pval.wide {
return pval.value
} else {
return float32(pval.value)
}
case cfBoolean:
return bool(pval)
case *cfArray:
return p.arrayInterface(pval)
case *cfDictionary:
return p.dictionaryInterface(pval)
case cfData:
return []byte(pval)
case cfDate:
return time.Time(pval)
case cfUID:
return UID(pval)
}
return nil
}
func (p *Decoder) arrayInterface(a *cfArray) []interface{} {
out := make([]interface{}, len(a.values))
for i, subv := range a.values {
out[i] = p.valueInterface(subv)
}
return out
}
func (p *Decoder) dictionaryInterface(dict *cfDictionary) map[string]interface{} {
out := make(map[string]interface{})
for i, k := range dict.keys {
subv := dict.values[i]
out[k] = p.valueInterface(subv)
}
return out
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/xml_parser.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/xml_parser.go | package plist
import (
"encoding/base64"
"encoding/xml"
"errors"
"fmt"
"io"
"runtime"
"strings"
"time"
)
type xmlPlistParser struct {
reader io.Reader
xmlDecoder *xml.Decoder
whitespaceReplacer *strings.Replacer
ntags int
}
func (p *xmlPlistParser) parseDocument() (pval cfValue, parseError error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
if _, ok := r.(invalidPlistError); ok {
parseError = r.(error)
} else {
// Wrap all non-invalid-plist errors.
parseError = plistParseError{"XML", r.(error)}
}
}
}()
for {
if token, err := p.xmlDecoder.Token(); err == nil {
if element, ok := token.(xml.StartElement); ok {
pval = p.parseXMLElement(element)
if p.ntags == 0 {
panic(invalidPlistError{"XML", errors.New("no elements encountered")})
}
return
}
} else {
// The first XML parse turned out to be invalid:
// we do not have an XML property list.
panic(invalidPlistError{"XML", err})
}
}
}
func (p *xmlPlistParser) parseXMLElement(element xml.StartElement) cfValue {
var charData xml.CharData
switch element.Name.Local {
case "plist":
p.ntags++
for {
token, err := p.xmlDecoder.Token()
if err != nil {
panic(err)
}
if el, ok := token.(xml.EndElement); ok && el.Name.Local == "plist" {
break
}
if el, ok := token.(xml.StartElement); ok {
return p.parseXMLElement(el)
}
}
return nil
case "string":
p.ntags++
err := p.xmlDecoder.DecodeElement(&charData, &element)
if err != nil {
panic(err)
}
return cfString(charData)
case "integer":
p.ntags++
err := p.xmlDecoder.DecodeElement(&charData, &element)
if err != nil {
panic(err)
}
s := string(charData)
if len(s) == 0 {
panic(errors.New("invalid empty <integer/>"))
}
if s[0] == '-' {
s, base := unsignedGetBase(s[1:])
n := mustParseInt("-"+s, base, 64)
return &cfNumber{signed: true, value: uint64(n)}
} else {
s, base := unsignedGetBase(s)
n := mustParseUint(s, base, 64)
return &cfNumber{signed: false, value: n}
}
case "real":
p.ntags++
err := p.xmlDecoder.DecodeElement(&charData, &element)
if err != nil {
panic(err)
}
n := mustParseFloat(string(charData), 64)
return &cfReal{wide: true, value: n}
case "true", "false":
p.ntags++
p.xmlDecoder.Skip()
b := element.Name.Local == "true"
return cfBoolean(b)
case "date":
p.ntags++
err := p.xmlDecoder.DecodeElement(&charData, &element)
if err != nil {
panic(err)
}
t, err := time.ParseInLocation(time.RFC3339, string(charData), time.UTC)
if err != nil {
panic(err)
}
return cfDate(t)
case "data":
p.ntags++
err := p.xmlDecoder.DecodeElement(&charData, &element)
if err != nil {
panic(err)
}
str := p.whitespaceReplacer.Replace(string(charData))
l := base64.StdEncoding.DecodedLen(len(str))
bytes := make([]uint8, l)
l, err = base64.StdEncoding.Decode(bytes, []byte(str))
if err != nil {
panic(err)
}
return cfData(bytes[:l])
case "dict":
p.ntags++
var key *string
keys := make([]string, 0, 32)
values := make([]cfValue, 0, 32)
for {
token, err := p.xmlDecoder.Token()
if err != nil {
panic(err)
}
if el, ok := token.(xml.EndElement); ok && el.Name.Local == "dict" {
if key != nil {
panic(errors.New("missing value in dictionary"))
}
break
}
if el, ok := token.(xml.StartElement); ok {
if el.Name.Local == "key" {
var k string
p.xmlDecoder.DecodeElement(&k, &el)
key = &k
} else {
if key == nil {
panic(errors.New("missing key in dictionary"))
}
keys = append(keys, *key)
values = append(values, p.parseXMLElement(el))
key = nil
}
}
}
if len(keys) == 1 && keys[0] == "CF$UID" && len(values) == 1 {
if integer, ok := values[0].(*cfNumber); ok {
return cfUID(integer.value)
}
}
return &cfDictionary{keys: keys, values: values}
case "array":
p.ntags++
values := make([]cfValue, 0, 10)
for {
token, err := p.xmlDecoder.Token()
if err != nil {
panic(err)
}
if el, ok := token.(xml.EndElement); ok && el.Name.Local == "array" {
break
}
if el, ok := token.(xml.StartElement); ok {
values = append(values, p.parseXMLElement(el))
}
}
return &cfArray{values}
}
err := fmt.Errorf("encountered unknown element %s", element.Name.Local)
if p.ntags == 0 {
// If out first XML tag is invalid, it might be an openstep data element, ala <abab> or <0101>
panic(invalidPlistError{"XML", err})
}
panic(err)
}
func newXMLPlistParser(r io.Reader) *xmlPlistParser {
return &xmlPlistParser{r, xml.NewDecoder(r), strings.NewReplacer("\t", "", "\n", "", " ", "", "\r", ""), 0}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/text_parser.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/text_parser.go | package plist
import (
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"runtime"
"strings"
"time"
"unicode/utf16"
"unicode/utf8"
)
type textPlistParser struct {
reader io.Reader
format int
input string
start int
pos int
width int
}
func convertU16(buffer []byte, bo binary.ByteOrder) (string, error) {
if len(buffer)%2 != 0 {
return "", errors.New("truncated utf16")
}
tmp := make([]uint16, len(buffer)/2)
for i := 0; i < len(buffer); i += 2 {
tmp[i/2] = bo.Uint16(buffer[i : i+2])
}
return string(utf16.Decode(tmp)), nil
}
func guessEncodingAndConvert(buffer []byte) (string, error) {
if len(buffer) >= 3 && buffer[0] == 0xEF && buffer[1] == 0xBB && buffer[2] == 0xBF {
// UTF-8 BOM
return zeroCopy8BitString(buffer, 3, len(buffer)-3), nil
} else if len(buffer) >= 2 {
// UTF-16 guesses
switch {
// stream is big-endian (BOM is FE FF or head is 00 XX)
case (buffer[0] == 0xFE && buffer[1] == 0xFF):
return convertU16(buffer[2:], binary.BigEndian)
case (buffer[0] == 0 && buffer[1] != 0):
return convertU16(buffer, binary.BigEndian)
// stream is little-endian (BOM is FE FF or head is XX 00)
case (buffer[0] == 0xFF && buffer[1] == 0xFE):
return convertU16(buffer[2:], binary.LittleEndian)
case (buffer[0] != 0 && buffer[1] == 0):
return convertU16(buffer, binary.LittleEndian)
}
}
// fallback: assume ASCII (not great!)
return zeroCopy8BitString(buffer, 0, len(buffer)), nil
}
func (p *textPlistParser) parseDocument() (pval cfValue, parseError error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
// Wrap all non-invalid-plist errors.
parseError = plistParseError{"text", r.(error)}
}
}()
buffer, err := ioutil.ReadAll(p.reader)
if err != nil {
panic(err)
}
p.input, err = guessEncodingAndConvert(buffer)
if err != nil {
panic(err)
}
val := p.parsePlistValue()
p.skipWhitespaceAndComments()
if p.peek() != eof {
if _, ok := val.(cfString); !ok {
p.error("garbage after end of document")
}
p.start = 0
p.pos = 0
val = p.parseDictionary(true)
}
pval = val
return
}
const eof rune = -1
func (p *textPlistParser) error(e string, args ...interface{}) {
line := strings.Count(p.input[:p.pos], "\n")
char := p.pos - strings.LastIndex(p.input[:p.pos], "\n") - 1
panic(fmt.Errorf("%s at line %d character %d", fmt.Sprintf(e, args...), line, char))
}
func (p *textPlistParser) next() rune {
if int(p.pos) >= len(p.input) {
p.width = 0
return eof
}
r, w := utf8.DecodeRuneInString(p.input[p.pos:])
p.width = w
p.pos += p.width
return r
}
func (p *textPlistParser) backup() {
p.pos -= p.width
}
func (p *textPlistParser) peek() rune {
r := p.next()
p.backup()
return r
}
func (p *textPlistParser) emit() string {
s := p.input[p.start:p.pos]
p.start = p.pos
return s
}
func (p *textPlistParser) ignore() {
p.start = p.pos
}
func (p *textPlistParser) empty() bool {
return p.start == p.pos
}
func (p *textPlistParser) scanUntil(ch rune) {
if x := strings.IndexRune(p.input[p.pos:], ch); x >= 0 {
p.pos += x
return
}
p.pos = len(p.input)
}
func (p *textPlistParser) scanUntilAny(chs string) {
if x := strings.IndexAny(p.input[p.pos:], chs); x >= 0 {
p.pos += x
return
}
p.pos = len(p.input)
}
func (p *textPlistParser) scanCharactersInSet(ch *characterSet) {
for ch.Contains(p.next()) {
}
p.backup()
}
func (p *textPlistParser) scanCharactersNotInSet(ch *characterSet) {
var r rune
for {
r = p.next()
if r == eof || ch.Contains(r) {
break
}
}
p.backup()
}
func (p *textPlistParser) skipWhitespaceAndComments() {
for {
p.scanCharactersInSet(&whitespace)
if strings.HasPrefix(p.input[p.pos:], "//") {
p.scanCharactersNotInSet(&newlineCharacterSet)
} else if strings.HasPrefix(p.input[p.pos:], "/*") {
if x := strings.Index(p.input[p.pos:], "*/"); x >= 0 {
p.pos += x + 2 // skip the */ as well
continue // consume more whitespace
} else {
p.error("unexpected eof in block comment")
}
} else {
break
}
}
p.ignore()
}
func (p *textPlistParser) parseOctalDigits(max int) uint64 {
var val uint64
for i := 0; i < max; i++ {
r := p.next()
if r >= '0' && r <= '7' {
val <<= 3
val |= uint64((r - '0'))
} else {
p.backup()
break
}
}
return val
}
func (p *textPlistParser) parseHexDigits(max int) uint64 {
var val uint64
for i := 0; i < max; i++ {
r := p.next()
if r >= 'a' && r <= 'f' {
val <<= 4
val |= 10 + uint64((r - 'a'))
} else if r >= 'A' && r <= 'F' {
val <<= 4
val |= 10 + uint64((r - 'A'))
} else if r >= '0' && r <= '9' {
val <<= 4
val |= uint64((r - '0'))
} else {
p.backup()
break
}
}
return val
}
// the \ has already been consumed
func (p *textPlistParser) parseEscape() string {
var s string
switch p.next() {
case 'a':
s = "\a"
case 'b':
s = "\b"
case 'v':
s = "\v"
case 'f':
s = "\f"
case 't':
s = "\t"
case 'r':
s = "\r"
case 'n':
s = "\n"
case '\\':
s = `\`
case '"':
s = `"`
case 'x':
s = string(rune(p.parseHexDigits(2)))
case 'u', 'U':
s = string(rune(p.parseHexDigits(4)))
case '0', '1', '2', '3', '4', '5', '6', '7':
p.backup() // we've already consumed one of the digits
s = string(rune(p.parseOctalDigits(3)))
default:
p.backup() // everything else should be accepted
}
p.ignore() // skip the entire escape sequence
return s
}
// the " has already been consumed
func (p *textPlistParser) parseQuotedString() cfString {
p.ignore() // ignore the "
slowPath := false
s := ""
for {
p.scanUntilAny(`"\`)
switch p.peek() {
case eof:
p.error("unexpected eof in quoted string")
case '"':
section := p.emit()
p.pos++ // skip "
if !slowPath {
return cfString(section)
} else {
s += section
return cfString(s)
}
case '\\':
slowPath = true
s += p.emit()
p.next() // consume \
s += p.parseEscape()
}
}
}
func (p *textPlistParser) parseUnquotedString() cfString {
p.scanCharactersNotInSet(&gsQuotable)
s := p.emit()
if s == "" {
p.error("invalid unquoted string (found an unquoted character that should be quoted?)")
}
return cfString(s)
}
// the { has already been consumed
func (p *textPlistParser) parseDictionary(ignoreEof bool) *cfDictionary {
//p.ignore() // ignore the {
var keypv cfValue
keys := make([]string, 0, 32)
values := make([]cfValue, 0, 32)
outer:
for {
p.skipWhitespaceAndComments()
switch p.next() {
case eof:
if !ignoreEof {
p.error("unexpected eof in dictionary")
}
fallthrough
case '}':
break outer
case '"':
keypv = p.parseQuotedString()
default:
p.backup()
keypv = p.parseUnquotedString()
}
// INVARIANT: key can't be nil; parseQuoted and parseUnquoted
// will panic out before they return nil.
p.skipWhitespaceAndComments()
var val cfValue
n := p.next()
if n == ';' {
val = keypv
} else if n == '=' {
// whitespace is consumed within
val = p.parsePlistValue()
p.skipWhitespaceAndComments()
if p.next() != ';' {
p.error("missing ; in dictionary")
}
} else {
p.error("missing = in dictionary")
}
keys = append(keys, string(keypv.(cfString)))
values = append(values, val)
}
return &cfDictionary{keys: keys, values: values}
}
// the ( has already been consumed
func (p *textPlistParser) parseArray() *cfArray {
//p.ignore() // ignore the (
values := make([]cfValue, 0, 32)
outer:
for {
p.skipWhitespaceAndComments()
switch p.next() {
case eof:
p.error("unexpected eof in array")
case ')':
break outer // done here
case ',':
continue // restart; ,) is valid and we don't want to blow it
default:
p.backup()
}
pval := p.parsePlistValue() // whitespace is consumed within
if str, ok := pval.(cfString); ok && string(str) == "" {
// Empty strings in arrays are apparently skipped?
// TODO: Figure out why this was implemented.
continue
}
values = append(values, pval)
}
return &cfArray{values}
}
// the <* have already been consumed
func (p *textPlistParser) parseGNUStepValue() cfValue {
typ := p.next()
p.ignore()
p.scanUntil('>')
if typ == eof || typ == '>' || p.empty() || p.peek() == eof {
p.error("invalid GNUStep extended value")
}
v := p.emit()
p.next() // consume the >
switch typ {
case 'I':
if v[0] == '-' {
n := mustParseInt(v, 10, 64)
return &cfNumber{signed: true, value: uint64(n)}
} else {
n := mustParseUint(v, 10, 64)
return &cfNumber{signed: false, value: n}
}
case 'R':
n := mustParseFloat(v, 64)
return &cfReal{wide: true, value: n} // TODO(DH) 32/64
case 'B':
b := v[0] == 'Y'
return cfBoolean(b)
case 'D':
t, err := time.Parse(textPlistTimeLayout, v)
if err != nil {
p.error(err.Error())
}
return cfDate(t.In(time.UTC))
}
p.error("invalid GNUStep type " + string(typ))
return nil
}
// The < has already been consumed
func (p *textPlistParser) parseHexData() cfData {
buf := make([]byte, 256)
i := 0
c := 0
for {
r := p.next()
switch r {
case eof:
p.error("unexpected eof in data")
case '>':
if c&1 == 1 {
p.error("uneven number of hex digits in data")
}
p.ignore()
return cfData(buf[:i])
case ' ', '\t', '\n', '\r', '\u2028', '\u2029': // more lax than apple here: skip spaces
continue
}
buf[i] <<= 4
if r >= 'a' && r <= 'f' {
buf[i] |= 10 + byte((r - 'a'))
} else if r >= 'A' && r <= 'F' {
buf[i] |= 10 + byte((r - 'A'))
} else if r >= '0' && r <= '9' {
buf[i] |= byte((r - '0'))
} else {
p.error("unexpected hex digit `%c'", r)
}
c++
if c&1 == 0 {
i++
if i >= len(buf) {
realloc := make([]byte, len(buf)*2)
copy(realloc, buf)
buf = realloc
}
}
}
}
func (p *textPlistParser) parsePlistValue() cfValue {
for {
p.skipWhitespaceAndComments()
switch p.next() {
case eof:
return &cfDictionary{}
case '<':
if p.next() == '*' {
p.format = GNUStepFormat
return p.parseGNUStepValue()
}
p.backup()
return p.parseHexData()
case '"':
return p.parseQuotedString()
case '{':
return p.parseDictionary(false)
case '(':
return p.parseArray()
default:
p.backup()
return p.parseUnquotedString()
}
}
}
func newTextPlistParser(r io.Reader) *textPlistParser {
return &textPlistParser{
reader: r,
format: OpenStepFormat,
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/encode.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/encode.go | package plist
import (
"bytes"
"errors"
"io"
"reflect"
"runtime"
)
type generator interface {
generateDocument(cfValue)
Indent(string)
}
// An Encoder writes a property list to an output stream.
type Encoder struct {
writer io.Writer
format int
indent string
}
// Encode writes the property list encoding of v to the stream.
func (p *Encoder) Encode(v interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
err = r.(error)
}
}()
pval := p.marshal(reflect.ValueOf(v))
if pval == nil {
panic(errors.New("plist: no root element to encode"))
}
var g generator
switch p.format {
case XMLFormat:
g = newXMLPlistGenerator(p.writer)
case BinaryFormat, AutomaticFormat:
g = newBplistGenerator(p.writer)
case OpenStepFormat, GNUStepFormat:
g = newTextPlistGenerator(p.writer, p.format)
}
g.Indent(p.indent)
g.generateDocument(pval)
return
}
// Indent turns on pretty-printing for the XML and Text property list formats.
// Each element begins on a new line and is preceded by one or more copies of indent according to its nesting depth.
func (p *Encoder) Indent(indent string) {
p.indent = indent
}
// NewEncoder returns an Encoder that writes an XML property list to w.
func NewEncoder(w io.Writer) *Encoder {
return NewEncoderForFormat(w, XMLFormat)
}
// NewEncoderForFormat returns an Encoder that writes a property list to w in the specified format.
// Pass AutomaticFormat to allow the library to choose the best encoding (currently BinaryFormat).
func NewEncoderForFormat(w io.Writer, format int) *Encoder {
return &Encoder{
writer: w,
format: format,
}
}
// NewBinaryEncoder returns an Encoder that writes a binary property list to w.
func NewBinaryEncoder(w io.Writer) *Encoder {
return NewEncoderForFormat(w, BinaryFormat)
}
// Marshal returns the property list encoding of v in the specified format.
//
// Pass AutomaticFormat to allow the library to choose the best encoding (currently BinaryFormat).
//
// Marshal traverses the value v recursively.
// Any nil values encountered, other than the root, will be silently discarded as
// the property list format bears no representation for nil values.
//
// Strings, integers of varying size, floats and booleans are encoded unchanged.
// Strings bearing non-ASCII runes will be encoded differently depending upon the property list format:
// UTF-8 for XML property lists and UTF-16 for binary property lists.
//
// Slice and Array values are encoded as property list arrays, except for
// []byte values, which are encoded as data.
//
// Map values encode as dictionaries. The map's key type must be string; there is no provision for encoding non-string dictionary keys.
//
// Struct values are encoded as dictionaries, with only exported fields being serialized. Struct field encoding may be influenced with the use of tags.
// The tag format is:
//
// `plist:"<key>[,flags...]"`
//
// The following flags are supported:
//
// omitempty Only include the field if it is not set to the zero value for its type.
//
// If the key is "-", the field is ignored.
//
// Anonymous struct fields are encoded as if their exported fields were exposed via the outer struct.
//
// Pointer values encode as the value pointed to.
//
// Channel, complex and function values cannot be encoded. Any attempt to do so causes Marshal to return an error.
func Marshal(v interface{}, format int) ([]byte, error) {
return MarshalIndent(v, format, "")
}
// MarshalIndent works like Marshal, but each property list element
// begins on a new line and is preceded by one or more copies of indent according to its nesting depth.
func MarshalIndent(v interface{}, format int, indent string) ([]byte, error) {
buf := &bytes.Buffer{}
enc := NewEncoderForFormat(buf, format)
enc.Indent(indent)
if err := enc.Encode(v); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/bplist_generator.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/bplist_generator.go | package plist
import (
"encoding/binary"
"errors"
"fmt"
"io"
"time"
"unicode/utf16"
)
func bplistMinimumIntSize(n uint64) int {
switch {
case n <= uint64(0xff):
return 1
case n <= uint64(0xffff):
return 2
case n <= uint64(0xffffffff):
return 4
default:
return 8
}
}
func bplistValueShouldUnique(pval cfValue) bool {
switch pval.(type) {
case cfString, *cfNumber, *cfReal, cfDate, cfData:
return true
}
return false
}
type bplistGenerator struct {
writer *countedWriter
objmap map[interface{}]uint64 // maps pValue.hash()es to object locations
objtable []cfValue
trailer bplistTrailer
}
func (p *bplistGenerator) flattenPlistValue(pval cfValue) {
key := pval.hash()
if bplistValueShouldUnique(pval) {
if _, ok := p.objmap[key]; ok {
return
}
}
p.objmap[key] = uint64(len(p.objtable))
p.objtable = append(p.objtable, pval)
switch pval := pval.(type) {
case *cfDictionary:
pval.sort()
for _, k := range pval.keys {
p.flattenPlistValue(cfString(k))
}
for _, v := range pval.values {
p.flattenPlistValue(v)
}
case *cfArray:
for _, v := range pval.values {
p.flattenPlistValue(v)
}
}
}
func (p *bplistGenerator) indexForPlistValue(pval cfValue) (uint64, bool) {
v, ok := p.objmap[pval.hash()]
return v, ok
}
func (p *bplistGenerator) generateDocument(root cfValue) {
p.objtable = make([]cfValue, 0, 16)
p.objmap = make(map[interface{}]uint64)
p.flattenPlistValue(root)
p.trailer.NumObjects = uint64(len(p.objtable))
p.trailer.ObjectRefSize = uint8(bplistMinimumIntSize(p.trailer.NumObjects))
p.writer.Write([]byte("bplist00"))
offtable := make([]uint64, p.trailer.NumObjects)
for i, pval := range p.objtable {
offtable[i] = uint64(p.writer.BytesWritten())
p.writePlistValue(pval)
}
p.trailer.OffsetIntSize = uint8(bplistMinimumIntSize(uint64(p.writer.BytesWritten())))
p.trailer.TopObject = p.objmap[root.hash()]
p.trailer.OffsetTableOffset = uint64(p.writer.BytesWritten())
for _, offset := range offtable {
p.writeSizedInt(offset, int(p.trailer.OffsetIntSize))
}
binary.Write(p.writer, binary.BigEndian, p.trailer)
}
func (p *bplistGenerator) writePlistValue(pval cfValue) {
if pval == nil {
return
}
switch pval := pval.(type) {
case *cfDictionary:
p.writeDictionaryTag(pval)
case *cfArray:
p.writeArrayTag(pval.values)
case cfString:
p.writeStringTag(string(pval))
case *cfNumber:
p.writeIntTag(pval.signed, pval.value)
case *cfReal:
if pval.wide {
p.writeRealTag(pval.value, 64)
} else {
p.writeRealTag(pval.value, 32)
}
case cfBoolean:
p.writeBoolTag(bool(pval))
case cfData:
p.writeDataTag([]byte(pval))
case cfDate:
p.writeDateTag(time.Time(pval))
case cfUID:
p.writeUIDTag(UID(pval))
default:
panic(fmt.Errorf("unknown plist type %t", pval))
}
}
func (p *bplistGenerator) writeSizedInt(n uint64, nbytes int) {
var val interface{}
switch nbytes {
case 1:
val = uint8(n)
case 2:
val = uint16(n)
case 4:
val = uint32(n)
case 8:
val = n
default:
panic(errors.New("illegal integer size"))
}
binary.Write(p.writer, binary.BigEndian, val)
}
func (p *bplistGenerator) writeBoolTag(v bool) {
tag := uint8(bpTagBoolFalse)
if v {
tag = bpTagBoolTrue
}
binary.Write(p.writer, binary.BigEndian, tag)
}
func (p *bplistGenerator) writeIntTag(signed bool, n uint64) {
var tag uint8
var val interface{}
switch {
case n <= uint64(0xff):
val = uint8(n)
tag = bpTagInteger | 0x0
case n <= uint64(0xffff):
val = uint16(n)
tag = bpTagInteger | 0x1
case n <= uint64(0xffffffff):
val = uint32(n)
tag = bpTagInteger | 0x2
case n > uint64(0x7fffffffffffffff) && !signed:
// 64-bit values are always *signed* in format 00.
// Any unsigned value that doesn't intersect with the signed
// range must be sign-extended and stored as a SInt128
val = n
tag = bpTagInteger | 0x4
default:
val = n
tag = bpTagInteger | 0x3
}
binary.Write(p.writer, binary.BigEndian, tag)
if tag&0xF == 0x4 {
// SInt128; in the absence of true 128-bit integers in Go,
// we'll just fake the top half. We only got here because
// we had an unsigned 64-bit int that didn't fit,
// so sign extend it with zeroes.
binary.Write(p.writer, binary.BigEndian, uint64(0))
}
binary.Write(p.writer, binary.BigEndian, val)
}
func (p *bplistGenerator) writeUIDTag(u UID) {
nbytes := bplistMinimumIntSize(uint64(u))
tag := uint8(bpTagUID | (nbytes - 1))
binary.Write(p.writer, binary.BigEndian, tag)
p.writeSizedInt(uint64(u), nbytes)
}
func (p *bplistGenerator) writeRealTag(n float64, bits int) {
var tag uint8 = bpTagReal | 0x3
var val interface{} = n
if bits == 32 {
val = float32(n)
tag = bpTagReal | 0x2
}
binary.Write(p.writer, binary.BigEndian, tag)
binary.Write(p.writer, binary.BigEndian, val)
}
func (p *bplistGenerator) writeDateTag(t time.Time) {
tag := uint8(bpTagDate) | 0x3
val := float64(t.In(time.UTC).UnixNano()) / float64(time.Second)
val -= 978307200 // Adjust to Apple Epoch
binary.Write(p.writer, binary.BigEndian, tag)
binary.Write(p.writer, binary.BigEndian, val)
}
func (p *bplistGenerator) writeCountedTag(tag uint8, count uint64) {
marker := tag
if count >= 0xF {
marker |= 0xF
} else {
marker |= uint8(count)
}
binary.Write(p.writer, binary.BigEndian, marker)
if count >= 0xF {
p.writeIntTag(false, count)
}
}
func (p *bplistGenerator) writeDataTag(data []byte) {
p.writeCountedTag(bpTagData, uint64(len(data)))
binary.Write(p.writer, binary.BigEndian, data)
}
func (p *bplistGenerator) writeStringTag(str string) {
for _, r := range str {
if r > 0x7F {
utf16Runes := utf16.Encode([]rune(str))
p.writeCountedTag(bpTagUTF16String, uint64(len(utf16Runes)))
binary.Write(p.writer, binary.BigEndian, utf16Runes)
return
}
}
p.writeCountedTag(bpTagASCIIString, uint64(len(str)))
binary.Write(p.writer, binary.BigEndian, []byte(str))
}
func (p *bplistGenerator) writeDictionaryTag(dict *cfDictionary) {
// assumption: sorted already; flattenPlistValue did this.
cnt := len(dict.keys)
p.writeCountedTag(bpTagDictionary, uint64(cnt))
vals := make([]uint64, cnt*2)
for i, k := range dict.keys {
// invariant: keys have already been "uniqued" (as PStrings)
keyIdx, ok := p.objmap[cfString(k).hash()]
if !ok {
panic(errors.New("failed to find key " + k + " in object map during serialization"))
}
vals[i] = keyIdx
}
for i, v := range dict.values {
// invariant: values have already been "uniqued"
objIdx, ok := p.indexForPlistValue(v)
if !ok {
panic(errors.New("failed to find value in object map during serialization"))
}
vals[i+cnt] = objIdx
}
for _, v := range vals {
p.writeSizedInt(v, int(p.trailer.ObjectRefSize))
}
}
func (p *bplistGenerator) writeArrayTag(arr []cfValue) {
p.writeCountedTag(bpTagArray, uint64(len(arr)))
for _, v := range arr {
objIdx, ok := p.indexForPlistValue(v)
if !ok {
panic(errors.New("failed to find value in object map during serialization"))
}
p.writeSizedInt(objIdx, int(p.trailer.ObjectRefSize))
}
}
func (p *bplistGenerator) Indent(i string) {
// There's nothing to indent.
}
func newBplistGenerator(w io.Writer) *bplistGenerator {
return &bplistGenerator{
writer: &countedWriter{Writer: mustWriter{w}},
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/text_tables.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/text_tables.go | package plist
type characterSet [4]uint64
func (s *characterSet) Contains(ch rune) bool {
return ch >= 0 && ch <= 255 && s.ContainsByte(byte(ch))
}
func (s *characterSet) ContainsByte(ch byte) bool {
return (s[ch/64]&(1<<(ch%64)) > 0)
}
// Bitmap of characters that must be inside a quoted string
// when written to an old-style property list
// Low bits represent lower characters, and each uint64 represents 64 characters.
var gsQuotable = characterSet{
0x78001385ffffffff,
0xa800000138000000,
0xffffffffffffffff,
0xffffffffffffffff,
}
// 7f instead of 3f in the top line: CFOldStylePlist.c says . is valid, but they quote it.
var osQuotable = characterSet{
0xf4007f6fffffffff,
0xf8000001f8000001,
0xffffffffffffffff,
0xffffffffffffffff,
}
var whitespace = characterSet{
0x0000000100003f00,
0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
}
var newlineCharacterSet = characterSet{
0x0000000000002400,
0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/zerocopy.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/zerocopy.go | // +build !appengine
package plist
import (
"reflect"
"unsafe"
)
func zeroCopy8BitString(buf []byte, off int, len int) string {
if len == 0 {
return ""
}
var s string
hdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
hdr.Data = uintptr(unsafe.Pointer(&buf[off]))
hdr.Len = len
return s
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/bplist.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/bplist.go | package plist
type bplistTrailer struct {
Unused [5]uint8
SortVersion uint8
OffsetIntSize uint8
ObjectRefSize uint8
NumObjects uint64
TopObject uint64
OffsetTableOffset uint64
}
const (
bpTagNull uint8 = 0x00
bpTagBoolFalse = 0x08
bpTagBoolTrue = 0x09
bpTagInteger = 0x10
bpTagReal = 0x20
bpTagDate = 0x30
bpTagData = 0x40
bpTagASCIIString = 0x50
bpTagUTF16String = 0x60
bpTagUID = 0x80
bpTagArray = 0xA0
bpTagDictionary = 0xD0
)
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/plist.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/plist.go | package plist
import (
"reflect"
)
// Property list format constants
const (
// Used by Decoder to represent an invalid property list.
InvalidFormat int = 0
// Used to indicate total abandon with regards to Encoder's output format.
AutomaticFormat = 0
XMLFormat = 1
BinaryFormat = 2
OpenStepFormat = 3
GNUStepFormat = 4
)
var FormatNames = map[int]string{
InvalidFormat: "unknown/invalid",
XMLFormat: "XML",
BinaryFormat: "Binary",
OpenStepFormat: "OpenStep",
GNUStepFormat: "GNUStep",
}
type unknownTypeError struct {
typ reflect.Type
}
func (u *unknownTypeError) Error() string {
return "plist: can't marshal value of type " + u.typ.String()
}
type invalidPlistError struct {
format string
err error
}
func (e invalidPlistError) Error() string {
s := "plist: invalid " + e.format + " property list"
if e.err != nil {
s += ": " + e.err.Error()
}
return s
}
type plistParseError struct {
format string
err error
}
func (e plistParseError) Error() string {
s := "plist: error parsing " + e.format + " property list"
if e.err != nil {
s += ": " + e.err.Error()
}
return s
}
// A UID represents a unique object identifier. UIDs are serialized in a manner distinct from
// that of integers.
//
// UIDs cannot be serialized in OpenStepFormat or GNUStepFormat property lists.
type UID uint64
// Marshaler is the interface implemented by types that can marshal themselves into valid
// property list objects. The returned value is marshaled in place of the original value
// implementing Marshaler
//
// If an error is returned by MarshalPlist, marshaling stops and the error is returned.
type Marshaler interface {
MarshalPlist() (interface{}, error)
}
// Unmarshaler is the interface implemented by types that can unmarshal themselves from
// property list objects. The UnmarshalPlist method receives a function that may
// be called to unmarshal the original property list value into a field or variable.
//
// It is safe to call the unmarshal function more than once.
type Unmarshaler interface {
UnmarshalPlist(unmarshal func(interface{}) error) error
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/bplist_parser.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/bplist_parser.go | package plist
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"runtime"
"time"
"unicode/utf16"
)
const (
signedHighBits = 0xFFFFFFFFFFFFFFFF
)
type offset uint64
type bplistParser struct {
buffer []byte
reader io.ReadSeeker
version int
objects []cfValue // object ID to object
trailer bplistTrailer
trailerOffset uint64
containerStack []offset // slice of object offsets; manipulated during container deserialization
}
func (p *bplistParser) validateDocumentTrailer() {
if p.trailer.OffsetTableOffset >= p.trailerOffset {
panic(fmt.Errorf("offset table beyond beginning of trailer (0x%x, trailer@0x%x)", p.trailer.OffsetTableOffset, p.trailerOffset))
}
if p.trailer.OffsetTableOffset < 9 {
panic(fmt.Errorf("offset table begins inside header (0x%x)", p.trailer.OffsetTableOffset))
}
if p.trailerOffset > (p.trailer.NumObjects*uint64(p.trailer.OffsetIntSize))+p.trailer.OffsetTableOffset {
panic(errors.New("garbage between offset table and trailer"))
}
if p.trailer.OffsetTableOffset+(uint64(p.trailer.OffsetIntSize)*p.trailer.NumObjects) > p.trailerOffset {
panic(errors.New("offset table isn't long enough to address every object"))
}
maxObjectRef := uint64(1) << (8 * p.trailer.ObjectRefSize)
if p.trailer.NumObjects > maxObjectRef {
panic(fmt.Errorf("more objects (%v) than object ref size (%v bytes) can support", p.trailer.NumObjects, p.trailer.ObjectRefSize))
}
if p.trailer.OffsetIntSize < uint8(8) && (uint64(1)<<(8*p.trailer.OffsetIntSize)) <= p.trailer.OffsetTableOffset {
panic(errors.New("offset size isn't big enough to address entire file"))
}
if p.trailer.TopObject >= p.trailer.NumObjects {
panic(fmt.Errorf("top object #%d is out of range (only %d exist)", p.trailer.TopObject, p.trailer.NumObjects))
}
}
func (p *bplistParser) parseDocument() (pval cfValue, parseError error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
parseError = plistParseError{"binary", r.(error)}
}
}()
p.buffer, _ = ioutil.ReadAll(p.reader)
l := len(p.buffer)
if l < 40 {
panic(errors.New("not enough data"))
}
if !bytes.Equal(p.buffer[0:6], []byte{'b', 'p', 'l', 'i', 's', 't'}) {
panic(errors.New("incomprehensible magic"))
}
p.version = int(((p.buffer[6] - '0') * 10) + (p.buffer[7] - '0'))
if p.version > 1 {
panic(fmt.Errorf("unexpected version %d", p.version))
}
p.trailerOffset = uint64(l - 32)
p.trailer = bplistTrailer{
SortVersion: p.buffer[p.trailerOffset+5],
OffsetIntSize: p.buffer[p.trailerOffset+6],
ObjectRefSize: p.buffer[p.trailerOffset+7],
NumObjects: binary.BigEndian.Uint64(p.buffer[p.trailerOffset+8:]),
TopObject: binary.BigEndian.Uint64(p.buffer[p.trailerOffset+16:]),
OffsetTableOffset: binary.BigEndian.Uint64(p.buffer[p.trailerOffset+24:]),
}
p.validateDocumentTrailer()
// INVARIANTS:
// - Entire offset table is before trailer
// - Offset table begins after header
// - Offset table can address entire document
// - Object IDs are big enough to support the number of objects in this plist
// - Top object is in range
p.objects = make([]cfValue, p.trailer.NumObjects)
pval = p.objectAtIndex(p.trailer.TopObject)
return
}
// parseSizedInteger returns a 128-bit integer as low64, high64
func (p *bplistParser) parseSizedInteger(off offset, nbytes int) (lo uint64, hi uint64, newOffset offset) {
// Per comments in CoreFoundation, format version 00 requires that all
// 1, 2 or 4-byte integers be interpreted as unsigned. 8-byte integers are
// signed (always?) and therefore must be sign extended here.
// negative 1, 2, or 4-byte integers are always emitted as 64-bit.
switch nbytes {
case 1:
lo, hi = uint64(p.buffer[off]), 0
case 2:
lo, hi = uint64(binary.BigEndian.Uint16(p.buffer[off:])), 0
case 4:
lo, hi = uint64(binary.BigEndian.Uint32(p.buffer[off:])), 0
case 8:
lo = binary.BigEndian.Uint64(p.buffer[off:])
if p.buffer[off]&0x80 != 0 {
// sign extend if lo is signed
hi = signedHighBits
}
case 16:
lo, hi = binary.BigEndian.Uint64(p.buffer[off+8:]), binary.BigEndian.Uint64(p.buffer[off:])
default:
panic(errors.New("illegal integer size"))
}
newOffset = off + offset(nbytes)
return
}
func (p *bplistParser) parseObjectRefAtOffset(off offset) (uint64, offset) {
oid, _, next := p.parseSizedInteger(off, int(p.trailer.ObjectRefSize))
return oid, next
}
func (p *bplistParser) parseOffsetAtOffset(off offset) (offset, offset) {
parsedOffset, _, next := p.parseSizedInteger(off, int(p.trailer.OffsetIntSize))
return offset(parsedOffset), next
}
func (p *bplistParser) objectAtIndex(index uint64) cfValue {
if index >= p.trailer.NumObjects {
panic(fmt.Errorf("invalid object#%d (max %d)", index, p.trailer.NumObjects))
}
if pval := p.objects[index]; pval != nil {
return pval
}
off, _ := p.parseOffsetAtOffset(offset(p.trailer.OffsetTableOffset + (index * uint64(p.trailer.OffsetIntSize))))
if off > offset(p.trailer.OffsetTableOffset-1) {
panic(fmt.Errorf("object#%d starts beyond beginning of object table (0x%x, table@0x%x)", index, off, p.trailer.OffsetTableOffset))
}
pval := p.parseTagAtOffset(off)
p.objects[index] = pval
return pval
}
func (p *bplistParser) pushNestedObject(off offset) {
for _, v := range p.containerStack {
if v == off {
p.panicNestedObject(off)
}
}
p.containerStack = append(p.containerStack, off)
}
func (p *bplistParser) panicNestedObject(off offset) {
ids := ""
for _, v := range p.containerStack {
ids += fmt.Sprintf("0x%x > ", v)
}
// %s0x%d: ids above ends with " > "
panic(fmt.Errorf("self-referential collection@0x%x (%s0x%x) cannot be deserialized", off, ids, off))
}
func (p *bplistParser) popNestedObject() {
p.containerStack = p.containerStack[:len(p.containerStack)-1]
}
func (p *bplistParser) parseTagAtOffset(off offset) cfValue {
tag := p.buffer[off]
switch tag & 0xF0 {
case bpTagNull:
switch tag & 0x0F {
case bpTagBoolTrue, bpTagBoolFalse:
return cfBoolean(tag == bpTagBoolTrue)
}
case bpTagInteger:
lo, hi, _ := p.parseIntegerAtOffset(off)
return &cfNumber{
signed: hi == signedHighBits, // a signed integer is stored as a 128-bit integer with the top 64 bits set
value: lo,
}
case bpTagReal:
nbytes := 1 << (tag & 0x0F)
switch nbytes {
case 4:
bits := binary.BigEndian.Uint32(p.buffer[off+1:])
return &cfReal{wide: false, value: float64(math.Float32frombits(bits))}
case 8:
bits := binary.BigEndian.Uint64(p.buffer[off+1:])
return &cfReal{wide: true, value: math.Float64frombits(bits)}
}
panic(errors.New("illegal float size"))
case bpTagDate:
bits := binary.BigEndian.Uint64(p.buffer[off+1:])
val := math.Float64frombits(bits)
// Apple Epoch is 20110101000000Z
// Adjust for UNIX Time
val += 978307200
sec, fsec := math.Modf(val)
time := time.Unix(int64(sec), int64(fsec*float64(time.Second))).In(time.UTC)
return cfDate(time)
case bpTagData:
data := p.parseDataAtOffset(off)
return cfData(data)
case bpTagASCIIString:
str := p.parseASCIIStringAtOffset(off)
return cfString(str)
case bpTagUTF16String:
str := p.parseUTF16StringAtOffset(off)
return cfString(str)
case bpTagUID: // Somehow different than int: low half is nbytes - 1 instead of log2(nbytes)
lo, _, _ := p.parseSizedInteger(off+1, int(tag&0xF)+1)
return cfUID(lo)
case bpTagDictionary:
return p.parseDictionaryAtOffset(off)
case bpTagArray:
return p.parseArrayAtOffset(off)
}
panic(fmt.Errorf("unexpected atom 0x%2.02x at offset 0x%x", tag, off))
}
func (p *bplistParser) parseIntegerAtOffset(off offset) (uint64, uint64, offset) {
tag := p.buffer[off]
return p.parseSizedInteger(off+1, 1<<(tag&0xF))
}
func (p *bplistParser) countForTagAtOffset(off offset) (uint64, offset) {
tag := p.buffer[off]
cnt := uint64(tag & 0x0F)
if cnt == 0xF {
cnt, _, off = p.parseIntegerAtOffset(off + 1)
return cnt, off
}
return cnt, off + 1
}
func (p *bplistParser) parseDataAtOffset(off offset) []byte {
len, start := p.countForTagAtOffset(off)
if start+offset(len) > offset(p.trailer.OffsetTableOffset) {
panic(fmt.Errorf("data@0x%x too long (%v bytes, max is %v)", off, len, p.trailer.OffsetTableOffset-uint64(start)))
}
return p.buffer[start : start+offset(len)]
}
func (p *bplistParser) parseASCIIStringAtOffset(off offset) string {
len, start := p.countForTagAtOffset(off)
if start+offset(len) > offset(p.trailer.OffsetTableOffset) {
panic(fmt.Errorf("ascii string@0x%x too long (%v bytes, max is %v)", off, len, p.trailer.OffsetTableOffset-uint64(start)))
}
return zeroCopy8BitString(p.buffer, int(start), int(len))
}
func (p *bplistParser) parseUTF16StringAtOffset(off offset) string {
len, start := p.countForTagAtOffset(off)
bytes := len * 2
if start+offset(bytes) > offset(p.trailer.OffsetTableOffset) {
panic(fmt.Errorf("utf16 string@0x%x too long (%v bytes, max is %v)", off, bytes, p.trailer.OffsetTableOffset-uint64(start)))
}
u16s := make([]uint16, len)
for i := offset(0); i < offset(len); i++ {
u16s[i] = binary.BigEndian.Uint16(p.buffer[start+(i*2):])
}
runes := utf16.Decode(u16s)
return string(runes)
}
func (p *bplistParser) parseObjectListAtOffset(off offset, count uint64) []cfValue {
if off+offset(count*uint64(p.trailer.ObjectRefSize)) > offset(p.trailer.OffsetTableOffset) {
panic(fmt.Errorf("list@0x%x length (%v) puts its end beyond the offset table at 0x%x", off, count, p.trailer.OffsetTableOffset))
}
objects := make([]cfValue, count)
next := off
var oid uint64
for i := uint64(0); i < count; i++ {
oid, next = p.parseObjectRefAtOffset(next)
objects[i] = p.objectAtIndex(oid)
}
return objects
}
func (p *bplistParser) parseDictionaryAtOffset(off offset) *cfDictionary {
p.pushNestedObject(off)
defer p.popNestedObject()
// a dictionary is an object list of [key key key val val val]
cnt, start := p.countForTagAtOffset(off)
objects := p.parseObjectListAtOffset(start, cnt*2)
keys := make([]string, cnt)
for i := uint64(0); i < cnt; i++ {
if str, ok := objects[i].(cfString); ok {
keys[i] = string(str)
} else {
panic(fmt.Errorf("dictionary@0x%x contains non-string key at index %d", off, i))
}
}
return &cfDictionary{
keys: keys,
values: objects[cnt:],
}
}
func (p *bplistParser) parseArrayAtOffset(off offset) *cfArray {
p.pushNestedObject(off)
defer p.popNestedObject()
// an array is just an object list
cnt, start := p.countForTagAtOffset(off)
return &cfArray{p.parseObjectListAtOffset(start, cnt)}
}
func newBplistParser(r io.ReadSeeker) *bplistParser {
return &bplistParser{reader: r}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/must.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/must.go | package plist
import (
"io"
"strconv"
)
type mustWriter struct {
io.Writer
}
func (w mustWriter) Write(p []byte) (int, error) {
n, err := w.Writer.Write(p)
if err != nil {
panic(err)
}
return n, nil
}
func mustParseInt(str string, base, bits int) int64 {
i, err := strconv.ParseInt(str, base, bits)
if err != nil {
panic(err)
}
return i
}
func mustParseUint(str string, base, bits int) uint64 {
i, err := strconv.ParseUint(str, base, bits)
if err != nil {
panic(err)
}
return i
}
func mustParseFloat(str string, bits int) float64 {
i, err := strconv.ParseFloat(str, bits)
if err != nil {
panic(err)
}
return i
}
func mustParseBool(str string) bool {
i, err := strconv.ParseBool(str)
if err != nil {
panic(err)
}
return i
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/doc.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/doc.go | // Package plist implements encoding and decoding of Apple's "property list" format.
// Property lists come in three sorts: plain text (GNUStep and OpenStep), XML and binary.
// plist supports all of them.
// The mapping between property list and Go objects is described in the documentation for the Marshal and Unmarshal functions.
package plist
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/decode.go | cmd/vsphere-xcopy-volume-populator/vendor/howett.net/plist/decode.go | package plist
import (
"bytes"
"io"
"reflect"
"runtime"
)
type parser interface {
parseDocument() (cfValue, error)
}
// A Decoder reads a property list from an input stream.
type Decoder struct {
// the format of the most-recently-decoded property list
Format int
reader io.ReadSeeker
lax bool
}
// Decode works like Unmarshal, except it reads the decoder stream to find property list elements.
//
// After Decoding, the Decoder's Format field will be set to one of the plist format constants.
func (p *Decoder) Decode(v interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
err = r.(error)
}
}()
header := make([]byte, 6)
p.reader.Read(header)
p.reader.Seek(0, 0)
var parser parser
var pval cfValue
if bytes.Equal(header, []byte("bplist")) {
parser = newBplistParser(p.reader)
pval, err = parser.parseDocument()
if err != nil {
// Had a bplist header, but still got an error: we have to die here.
return err
}
p.Format = BinaryFormat
} else {
parser = newXMLPlistParser(p.reader)
pval, err = parser.parseDocument()
if _, ok := err.(invalidPlistError); ok {
// Rewind: the XML parser might have exhausted the file.
p.reader.Seek(0, 0)
// We don't use parser here because we want the textPlistParser type
tp := newTextPlistParser(p.reader)
pval, err = tp.parseDocument()
if err != nil {
return err
}
p.Format = tp.format
if p.Format == OpenStepFormat {
// OpenStep property lists can only store strings,
// so we have to turn on lax mode here for the unmarshal step later.
p.lax = true
}
} else {
if err != nil {
return err
}
p.Format = XMLFormat
}
}
p.unmarshal(pval, reflect.ValueOf(v))
return
}
// NewDecoder returns a Decoder that reads property list elements from a stream reader, r.
// NewDecoder requires a Seekable stream for the purposes of file type detection.
func NewDecoder(r io.ReadSeeker) *Decoder {
return &Decoder{Format: InvalidFormat, reader: r, lax: false}
}
// Unmarshal parses a property list document and stores the result in the value pointed to by v.
//
// Unmarshal uses the inverse of the type encodings that Marshal uses, allocating heap-borne types as necessary.
//
// When given a nil pointer, Unmarshal allocates a new value for it to point to.
//
// To decode property list values into an interface value, Unmarshal decodes the property list into the concrete value contained
// in the interface value. If the interface value is nil, Unmarshal stores one of the following in the interface value:
//
// string, bool, uint64, float64
// plist.UID for "CoreFoundation Keyed Archiver UIDs" (convertible to uint64)
// []byte, for plist data
// []interface{}, for plist arrays
// map[string]interface{}, for plist dictionaries
//
// If a property list value is not appropriate for a given value type, Unmarshal aborts immediately and returns an error.
//
// As Go does not support 128-bit types, and we don't want to pretend we're giving the user integer types (as opposed to
// secretly passing them structs), Unmarshal will drop the high 64 bits of any 128-bit integers encoded in binary property lists.
// (This is important because CoreFoundation serializes some large 64-bit values as 128-bit values with an empty high half.)
//
// When Unmarshal encounters an OpenStep property list, it will enter a relaxed parsing mode: OpenStep property lists can only store
// plain old data as strings, so we will attempt to recover integer, floating-point, boolean and date values wherever they are necessary.
// (for example, if Unmarshal attempts to unmarshal an OpenStep property list into a time.Time, it will try to parse the string it
// receives as a time.)
//
// Unmarshal returns the detected property list format and an error, if any.
func Unmarshal(data []byte, v interface{}) (format int, err error) {
r := bytes.NewReader(data)
dec := NewDecoder(r)
err = dec.Decode(v)
format = dec.Format
return
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/controller-lifecycle-operator-sdk/api/types.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/controller-lifecycle-operator-sdk/api/types.go | package api
import (
conditions "github.com/openshift/custom-resource-status/conditions/v1"
corev1 "k8s.io/api/core/v1"
)
// Phase is the current phase of the deployment
type Phase string
const (
// PhaseDeploying signals that the resources are being deployed
PhaseDeploying Phase = "Deploying"
// PhaseDeployed signals that the resources are successfully deployed
PhaseDeployed Phase = "Deployed"
// PhaseDeleting signals that the resources are being removed
PhaseDeleting Phase = "Deleting"
// PhaseDeleted signals that the resources are deleted
PhaseDeleted Phase = "Deleted"
// PhaseError signals that the deployment is in an error state
PhaseError Phase = "Error"
// PhaseUpgrading signals that the resources are being deployed
PhaseUpgrading Phase = "Upgrading"
// PhaseEmpty is an uninitialized phase
PhaseEmpty Phase = ""
)
// Status represents status of a operator configuration resource; must be inlined in the operator configuration resource status
type Status struct {
Phase Phase `json:"phase,omitempty"`
// A list of current conditions of the resource
Conditions []conditions.Condition `json:"conditions,omitempty" optional:"true"`
// The version of the resource as defined by the operator
OperatorVersion string `json:"operatorVersion,omitempty" optional:"true"`
// The desired version of the resource
TargetVersion string `json:"targetVersion,omitempty" optional:"true"`
// The observed version of the resource
ObservedVersion string `json:"observedVersion,omitempty" optional:"true"`
}
// NodePlacement describes node scheduling configuration.
// +k8s:openapi-gen=true
type NodePlacement struct {
// nodeSelector is the node selector applied to the relevant kind of pods
// It specifies a map of key-value pairs: for the pod to be eligible to run on a node,
// the node must have each of the indicated key-value pairs as labels
// (it can have additional labels as well).
// See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
// +kubebuilder:validation:Optional
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// affinity enables pod affinity/anti-affinity placement expanding the types of constraints
// that can be expressed with nodeSelector.
// affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector
// See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
// +kubebuilder:validation:Optional
// +optional
Affinity *corev1.Affinity `json:"affinity,omitempty"`
// tolerations is a list of tolerations applied to the relevant kind of pods
// See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info.
// These are additional tolerations other than default ones.
// +kubebuilder:validation:Optional
// +optional
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
}
// DeepCopyInto is copying the receiver, writing into out. in must be non-nil.
func (in *Status) DeepCopyInto(out *Status) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]conditions.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodePlacement) DeepCopyInto(out *NodePlacement) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(corev1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]corev1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePlacement.
func (in *NodePlacement) DeepCopy() *NodePlacement {
if in == nil {
return nil
}
out := new(NodePlacement)
in.DeepCopyInto(out)
return out
}
// SwaggerDoc provides documentation for NodePlacement
func (NodePlacement) SwaggerDoc() map[string]string {
return map[string]string{
"": "NodePlacement describes node scheduling configuration.",
"nodeSelector": "nodeSelector is the node selector applied to the relevant kind of pods\nIt specifies a map of key-value pairs: for the pod to be eligible to run on a node,\nthe node must have each of the indicated key-value pairs as labels\n(it can have additional labels as well).\nSee https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector\n+kubebuilder:validation:Optional\n+optional",
"affinity": "affinity enables pod affinity/anti-affinity placement expanding the types of constraints\nthat can be expressed with nodeSelector.\naffinity is going to be applied to the relevant kind of pods in parallel with nodeSelector\nSee https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity\n+kubebuilder:validation:Optional\n+optional",
"tolerations": "tolerations is a list of tolerations applied to the relevant kind of pods\nSee https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info.\nThese are additional tolerations other than default ones.\n+kubebuilder:validation:Optional\n+optional",
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/register.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/register.go | package core
// GroupName is the group name use in this package
const GroupName = "kubevirt.io"
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/componentconfig.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/componentconfig.go | package v1
// This code is copied from
// https://github.com/kubevirt/controller-lifecycle-operator-sdk/blob/master/pkg/sdk/api/types.go
// in order to avoid dependency loops
import (
corev1 "k8s.io/api/core/v1"
)
// NodePlacement describes node scheduling configuration.
type NodePlacement struct {
// nodeSelector is the node selector applied to the relevant kind of pods
// It specifies a map of key-value pairs: for the pod to be eligible to run on a node,
// the node must have each of the indicated key-value pairs as labels
// (it can have additional labels as well).
// See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
// +kubebuilder:validation:Optional
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// affinity enables pod affinity/anti-affinity placement expanding the types of constraints
// that can be expressed with nodeSelector.
// affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector
// See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
// +kubebuilder:validation:Optional
// +optional
Affinity *corev1.Affinity `json:"affinity,omitempty"`
// tolerations is a list of tolerations applied to the relevant kind of pods
// See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info.
// These are additional tolerations other than default ones.
// +kubebuilder:validation:Optional
// +optional
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
}
type ComponentConfig struct {
// nodePlacement describes scheduling configuration for specific
// KubeVirt components
//+optional
NodePlacement *NodePlacement `json:"nodePlacement,omitempty"`
// replicas indicates how many replicas should be created for each KubeVirt infrastructure
// component (like virt-api or virt-controller). Defaults to 2.
// WARNING: this is an advanced feature that prevents auto-scaling for core kubevirt components. Please use with caution!
//+optional
Replicas *uint8 `json:"replicas,omitempty"`
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/types_swagger_generated.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/types_swagger_generated.go | // Code generated by swagger-doc. DO NOT EDIT.
package v1
func (VirtualMachineInstance) SwaggerDoc() map[string]string {
return map[string]string{
"": "VirtualMachineInstance is *the* VirtualMachineInstance Definition. It represents a virtual machine in the runtime environment of kubernetes.\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+genclient\n+genclient:noStatus",
"spec": "VirtualMachineInstance Spec contains the VirtualMachineInstance specification.",
"status": "Status is the high level overview of how the VirtualMachineInstance is doing. It contains information available to controllers and users.",
}
}
func (VirtualMachineInstanceList) SwaggerDoc() map[string]string {
return map[string]string{
"": "VirtualMachineInstanceList is a list of VirtualMachines\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
}
}
func (VirtualMachineInstanceSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "VirtualMachineInstanceSpec is a description of a VirtualMachineInstance.",
"priorityClassName": "If specified, indicates the pod's priority.\nIf not specified, the pod priority will be default or zero if there is no\ndefault.\n+optional",
"domain": "Specification of the desired behavior of the VirtualMachineInstance on the host.",
"nodeSelector": "NodeSelector is a selector which must be true for the vmi to fit on a node.\nSelector which must match a node's labels for the vmi to be scheduled on that node.\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\n+optional",
"affinity": "If affinity is specifies, obey all the affinity rules",
"schedulerName": "If specified, the VMI will be dispatched by specified scheduler.\nIf not specified, the VMI will be dispatched by default scheduler.\n+optional",
"tolerations": "If toleration is specified, obey all the toleration rules.",
"topologySpreadConstraints": "TopologySpreadConstraints describes how a group of VMIs will be spread across a given topology\ndomains. K8s scheduler will schedule VMI pods in a way which abides by the constraints.\n+optional\n+patchMergeKey=topologyKey\n+patchStrategy=merge\n+listType=map\n+listMapKey=topologyKey\n+listMapKey=whenUnsatisfiable",
"evictionStrategy": "EvictionStrategy describes the strategy to follow when a node drain occurs.\nThe possible options are:\n- \"None\": No action will be taken, according to the specified 'RunStrategy' the VirtualMachine will be restarted or shutdown.\n- \"LiveMigrate\": the VirtualMachineInstance will be migrated instead of being shutdown.\n- \"LiveMigrateIfPossible\": the same as \"LiveMigrate\" but only if the VirtualMachine is Live-Migratable, otherwise it will behave as \"None\".\n- \"External\": the VirtualMachineInstance will be protected and `vmi.Status.EvacuationNodeName` will be set on eviction. This is mainly useful for cluster-api-provider-kubevirt (capk) which needs a way for VMI's to be blocked from eviction, yet signal capk that eviction has been called on the VMI so the capk controller can handle tearing the VMI down. Details can be found in the commit description https://github.com/kubevirt/kubevirt/commit/c1d77face705c8b126696bac9a3ee3825f27f1fa.\n+optional",
"startStrategy": "StartStrategy can be set to \"Paused\" if Virtual Machine should be started in paused state.\n\n+optional",
"terminationGracePeriodSeconds": "Grace period observed after signalling a VirtualMachineInstance to stop after which the VirtualMachineInstance is force terminated.",
"volumes": "List of volumes that can be mounted by disks belonging to the vmi.\n+kubebuilder:validation:MaxItems:=256",
"livenessProbe": "Periodic probe of VirtualMachineInstance liveness.\nVirtualmachineInstances will be stopped if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional",
"readinessProbe": "Periodic probe of VirtualMachineInstance service readiness.\nVirtualmachineInstances will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional",
"hostname": "Specifies the hostname of the vmi\nIf not specified, the hostname will be set to the name of the vmi, if dhcp or cloud-init is configured properly.\n+optional",
"subdomain": "If specified, the fully qualified vmi hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\".\nIf not specified, the vmi will not have a domainname at all. The DNS entry will resolve to the vmi,\nno matter if the vmi itself can pick up a hostname.\n+optional",
"networks": "List of networks that can be attached to a vm's virtual interface.\n+kubebuilder:validation:MaxItems:=256",
"dnsPolicy": "Set DNS policy for the pod.\nDefaults to \"ClusterFirst\".\nValid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.\nDNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.\nTo have DNS options set along with hostNetwork, you have to specify DNS policy\nexplicitly to 'ClusterFirstWithHostNet'.\n+optional",
"dnsConfig": "Specifies the DNS parameters of a pod.\nParameters specified here will be merged to the generated DNS\nconfiguration based on DNSPolicy.\n+optional",
"accessCredentials": "Specifies a set of public keys to inject into the vm guest\n+listType=atomic\n+optional\n+kubebuilder:validation:MaxItems:=256",
"architecture": "Specifies the architecture of the vm guest you are attempting to run. Defaults to the compiled architecture of the KubeVirt components",
"resourceClaims": "ResourceClaims define which ResourceClaims must be allocated\nand reserved before the VMI, hence virt-launcher pod is allowed to start. The resources\nwill be made available to the domain which consumes them\nby name.\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate in kubernetes\n https://kubernetes.io/docs/concepts/scheduling-eviction/dynamic-resource-allocation/\nThis field should only be configured if one of the feature-gates GPUsWithDRA or HostDevicesWithDRA is enabled.\nThis feature is in alpha.\n\n+listType=map\n+listMapKey=name\n+optional",
}
}
func (VirtualMachineInstancePhaseTransitionTimestamp) SwaggerDoc() map[string]string {
return map[string]string{
"": "VirtualMachineInstancePhaseTransitionTimestamp gives a timestamp in relation to when a phase is set on a vmi",
"phase": "Phase is the status of the VirtualMachineInstance in kubernetes world. It is not the VirtualMachineInstance status, but partially correlates to it.",
"phaseTransitionTimestamp": "PhaseTransitionTimestamp is the timestamp of when the phase change occurred",
}
}
func (TopologyHints) SwaggerDoc() map[string]string {
return map[string]string{}
}
func (VirtualMachineInstanceStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "VirtualMachineInstanceStatus represents information about the status of a VirtualMachineInstance. Status may trail the actual\nstate of a system.",
"nodeName": "NodeName is the name where the VirtualMachineInstance is currently running.",
"reason": "A brief CamelCase message indicating details about why the VMI is in this state. e.g. 'NodeUnresponsive'\n+optional",
"conditions": "Conditions are specific points in VirtualMachineInstance's pod runtime.",
"phase": "Phase is the status of the VirtualMachineInstance in kubernetes world. It is not the VirtualMachineInstance status, but partially correlates to it.",
"phaseTransitionTimestamps": "PhaseTransitionTimestamp is the timestamp of when the last phase change occurred\n+listType=atomic\n+optional",
"interfaces": "Interfaces represent the details of available network interfaces.",
"guestOSInfo": "Guest OS Information",
"migrationState": "Represents the status of a live migration",
"migrationMethod": "Represents the method using which the vmi can be migrated: live migration or block migration",
"migrationTransport": "This represents the migration transport",
"qosClass": "The Quality of Service (QOS) classification assigned to the virtual machine instance based on resource requirements\nSee PodQOSClass type for available QOS classes\nMore info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md\n+optional",
"launcherContainerImageVersion": "LauncherContainerImageVersion indicates what container image is currently active for the vmi.",
"evacuationNodeName": "EvacuationNodeName is used to track the eviction process of a VMI. It stores the name of the node that we want\nto evacuate. It is meant to be used by KubeVirt core components only and can't be set or modified by users.\n+optional",
"activePods": "ActivePods is a mapping of pod UID to node name.\nIt is possible for multiple pods to be running for a single VMI during migration.",
"volumeStatus": "VolumeStatus contains the statuses of all the volumes\n+optional\n+listType=atomic",
"kernelBootStatus": "KernelBootStatus contains info about the kernelBootContainer\n+optional",
"fsFreezeStatus": "FSFreezeStatus indicates whether a freeze operation was requested for the guest filesystem.\nIt will be set to \"frozen\" if the request was made, or unset otherwise.\nThis does not reflect the actual state of the guest filesystem.\n+optional",
"topologyHints": "+optional",
"virtualMachineRevisionName": "VirtualMachineRevisionName is used to get the vm revision of the vmi when doing\nan online vm snapshot\n+optional",
"runtimeUser": "RuntimeUser is used to determine what user will be used in launcher\n+optional",
"VSOCKCID": "VSOCKCID is used to track the allocated VSOCK CID in the VM.\n+optional",
"selinuxContext": "SELinuxContext is the actual SELinux context of the virt-launcher pod\n+optional",
"machine": "Machine shows the final resulting qemu machine type. This can be different\nthan the machine type selected in the spec, due to qemus machine type alias mechanism.\n+optional",
"currentCPUTopology": "CurrentCPUTopology specifies the current CPU topology used by the VM workload.\nCurrent topology may differ from the desired topology in the spec while CPU hotplug\ntakes place.",
"memory": "Memory shows various informations about the VirtualMachine memory.\n+optional",
"migratedVolumes": "MigratedVolumes lists the source and destination volumes during the volume migration\n+listType=atomic\n+optional",
"deviceStatus": "DeviceStatus reflects the state of devices requested in spec.domain.devices. This is an optional field available\nonly when DRA feature gate is enabled\nThis field will only be populated if one of the feature-gates GPUsWithDRA or HostDevicesWithDRA is enabled.\nThis feature is in alpha.\n+optional",
}
}
func (DeviceStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "DeviceStatus has the information of all devices allocated spec.domain.devices\n+k8s:openapi-gen=true",
"gpuStatuses": "GPUStatuses reflects the state of GPUs requested in spec.domain.devices.gpus\n+listType=atomic\n+optional",
"hostDeviceStatuses": "HostDeviceStatuses reflects the state of GPUs requested in spec.domain.devices.hostDevices\nDRA\n+listType=atomic\n+optional",
}
}
func (DeviceStatusInfo) SwaggerDoc() map[string]string {
return map[string]string{
"name": "Name of the device as specified in spec.domain.devices.gpus.name or spec.domain.devices.hostDevices.name",
"deviceResourceClaimStatus": "DeviceResourceClaimStatus reflects the DRA related information for the device",
}
}
func (DeviceResourceClaimStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "DeviceResourceClaimStatus has to be before SyncVMI call from virt-handler to virt-launcher",
"name": "Name is the name of actual device on the host provisioned by the driver as reflected in resourceclaim.status\n+optional",
"resourceClaimName": "ResourceClaimName is the name of the resource claims object used to provision this resource\n+optional",
"attributes": "Attributes are properties of the device that could be used by kubevirt and other copmonents to learn more\nabout the device, like pciAddress or mdevUUID\n+optional",
}
}
func (DeviceAttribute) SwaggerDoc() map[string]string {
return map[string]string{
"": "DeviceAttribute must have exactly one field set.",
"pciAddress": "PCIAddress is the PCIe bus address of the allocated device\n+optional",
"mDevUUID": "MDevUUID is the mediated device uuid of the allocated device\n+optional",
}
}
func (StorageMigratedVolumeInfo) SwaggerDoc() map[string]string {
return map[string]string{
"": "StorageMigratedVolumeInfo tracks the information about the source and destination volumes during the volume migration",
"volumeName": "VolumeName is the name of the volume that is being migrated",
"sourcePVCInfo": "SourcePVCInfo contains the information about the source PVC",
"destinationPVCInfo": "DestinationPVCInfo contains the information about the destination PVC",
}
}
func (PersistentVolumeClaimInfo) SwaggerDoc() map[string]string {
return map[string]string{
"": "PersistentVolumeClaimInfo contains the relavant information virt-handler needs cached about a PVC",
"claimName": "ClaimName is the name of the PVC",
"accessModes": "AccessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+listType=atomic\n+optional",
"volumeMode": "VolumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec.\n+optional",
"capacity": "Capacity represents the capacity set on the corresponding PVC status\n+optional",
"requests": "Requests represents the resources requested by the corresponding PVC spec\n+optional",
"preallocated": "Preallocated indicates if the PVC's storage is preallocated or not\n+optional",
"filesystemOverhead": "Percentage of filesystem's size to be reserved when resizing the PVC\n+optional",
}
}
func (VolumeStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "VolumeStatus represents information about the status of volumes attached to the VirtualMachineInstance.",
"name": "Name is the name of the volume",
"target": "Target is the target name used when adding the volume to the VM, eg: vda",
"phase": "Phase is the phase",
"reason": "Reason is a brief description of why we are in the current hotplug volume phase",
"message": "Message is a detailed message about the current hotplug volume phase",
"persistentVolumeClaimInfo": "PersistentVolumeClaimInfo is information about the PVC that handler requires during start flow",
"hotplugVolume": "If the volume is hotplug, this will contain the hotplug status.",
"size": "Represents the size of the volume",
"memoryDumpVolume": "If the volume is memorydump volume, this will contain the memorydump info.",
"containerDiskVolume": "ContainerDiskVolume shows info about the containerdisk, if the volume is a containerdisk",
}
}
func (KernelInfo) SwaggerDoc() map[string]string {
return map[string]string{
"": "KernelInfo show info about the kernel image",
"checksum": "Checksum is the checksum of the kernel image",
}
}
func (InitrdInfo) SwaggerDoc() map[string]string {
return map[string]string{
"": "InitrdInfo show info about the initrd file",
"checksum": "Checksum is the checksum of the initrd file",
}
}
func (KernelBootStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "KernelBootStatus contains info about the kernelBootContainer",
"kernelInfo": "KernelInfo show info about the kernel image",
"initrdInfo": "InitrdInfo show info about the initrd file",
}
}
func (DomainMemoryDumpInfo) SwaggerDoc() map[string]string {
return map[string]string{
"": "DomainMemoryDumpInfo represents the memory dump information",
"startTimestamp": "StartTimestamp is the time when the memory dump started",
"endTimestamp": "EndTimestamp is the time when the memory dump completed",
"claimName": "ClaimName is the name of the pvc the memory was dumped to",
"targetFileName": "TargetFileName is the name of the memory dump output",
}
}
func (HotplugVolumeStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "HotplugVolumeStatus represents the hotplug status of the volume",
"attachPodName": "AttachPodName is the name of the pod used to attach the volume to the node.",
"attachPodUID": "AttachPodUID is the UID of the pod used to attach the volume to the node.",
}
}
func (ContainerDiskInfo) SwaggerDoc() map[string]string {
return map[string]string{
"": "ContainerDiskInfo shows info about the containerdisk",
"checksum": "Checksum is the checksum of the rootdisk or kernel artifacts inside the containerdisk",
}
}
func (VirtualMachineInstanceCondition) SwaggerDoc() map[string]string {
return map[string]string{
"lastProbeTime": "+nullable",
"lastTransitionTime": "+nullable",
}
}
func (VirtualMachineInstanceMigrationCondition) SwaggerDoc() map[string]string {
return map[string]string{
"lastProbeTime": "+nullable",
"lastTransitionTime": "+nullable",
}
}
func (VirtualMachineInstanceNetworkInterface) SwaggerDoc() map[string]string {
return map[string]string{
"ipAddress": "IP address of a Virtual Machine interface. It is always the first item of\nIPs",
"mac": "Hardware address of a Virtual Machine interface",
"name": "Name of the interface, corresponds to name of the network assigned to the interface",
"ipAddresses": "List of all IP addresses of a Virtual Machine interface",
"podInterfaceName": "PodInterfaceName represents the name of the pod network interface",
"interfaceName": "The interface name inside the Virtual Machine",
"infoSource": "Specifies the origin of the interface data collected. values: domain, guest-agent, multus-status.",
"queueCount": "Specifies how many queues are allocated by MultiQueue",
"linkState": "LinkState Reports the current operational link state`. values: up, down.",
}
}
func (VirtualMachineInstanceGuestOSInfo) SwaggerDoc() map[string]string {
return map[string]string{
"name": "Name of the Guest OS",
"kernelRelease": "Guest OS Kernel Release",
"version": "Guest OS Version",
"prettyName": "Guest OS Pretty Name",
"versionId": "Version ID of the Guest OS",
"kernelVersion": "Kernel version of the Guest OS",
"machine": "Machine type of the Guest OS",
"id": "Guest OS Id",
}
}
func (VirtualMachineInstanceCommonMigrationState) SwaggerDoc() map[string]string {
return map[string]string{
"node": "The source node that the VMI originated on",
"pod": "The source pod that the VMI is originated on",
"migrationUID": "The Source VirtualMachineInstanceMigration object associated with this migration",
"domainName": "The name of the domain on the source libvirt domain",
"domainNamespace": "Namespace used in the name of the source libvirt domain. Can be used to find and modify paths in the domain",
"syncAddress": "The ip address/fqdn:port combination to use to synchronize the VMI with the target.",
"persistentStatePVCName": "If the VMI being migrated uses persistent features (backend-storage), its source PVC name is saved here",
"selinuxContext": "SELinuxContext is the actual SELinux context of the pod",
"virtualMachineInstanceUID": "VirtualMachineInstanceUID is the UID of the target virtual machine instance",
}
}
func (VirtualMachineInstanceMigrationSourceState) SwaggerDoc() map[string]string {
return map[string]string{
"": "+k8s:openapi-gen=true",
"nodeSelectors": "Node selectors needed by the target to start the receiving pod.",
}
}
func (VirtualMachineInstanceMigrationTargetState) SwaggerDoc() map[string]string {
return map[string]string{
"": "+k8s:openapi-gen=true",
"domainReadyTimestamp": "The timestamp at which the target node detects the domain is active",
"domainDetected": "The Target Node has seen the Domain Start Event",
"nodeAddress": "The address of the target node to use for the migration",
"directMigrationNodePorts": "The list of ports opened for live migration on the destination node",
"attachmentPodUID": "The UID of the target attachment pod for hotplug volumes",
"cpuSet": "If the VMI requires dedicated CPUs, this field will\nhold the dedicated CPU set on the target node\n+listType=atomic",
"nodeTopology": "If the VMI requires dedicated CPUs, this field will\nhold the numa topology on the target node",
}
}
func (VirtualMachineInstanceMigrationState) SwaggerDoc() map[string]string {
return map[string]string{
"": "+k8s:openapi-gen=true",
"startTimestamp": "The time the migration action began\n+nullable",
"endTimestamp": "The time the migration action ended\n+nullable",
"targetNodeDomainReadyTimestamp": "The timestamp at which the target node detects the domain is active",
"targetNodeDomainDetected": "The Target Node has seen the Domain Start Event",
"targetNodeAddress": "The address of the target node to use for the migration",
"targetDirectMigrationNodePorts": "The list of ports opened for live migration on the destination node",
"targetNode": "The target node that the VMI is moving to",
"targetPod": "The target pod that the VMI is moving to",
"targetAttachmentPodUID": "The UID of the target attachment pod for hotplug volumes",
"sourceNode": "The source node that the VMI originated on",
"completed": "Indicates the migration completed",
"failed": "Indicates that the migration failed",
"abortRequested": "Indicates that the migration has been requested to abort",
"abortStatus": "Indicates the final status of the live migration abortion",
"failureReason": "Contains the reason why the migration failed",
"migrationUid": "The VirtualMachineInstanceMigration object associated with this migration",
"mode": "Lets us know if the vmi is currently running pre or post copy migration",
"migrationPolicyName": "Name of the migration policy. If string is empty, no policy is matched",
"migrationConfiguration": "Migration configurations to apply",
"targetCPUSet": "If the VMI requires dedicated CPUs, this field will\nhold the dedicated CPU set on the target node\n+listType=atomic",
"targetNodeTopology": "If the VMI requires dedicated CPUs, this field will\nhold the numa topology on the target node",
"sourcePersistentStatePVCName": "If the VMI being migrated uses persistent features (backend-storage), its source PVC name is saved here",
"targetPersistentStatePVCName": "If the VMI being migrated uses persistent features (backend-storage), its target PVC name is saved here",
"sourceState": "SourceState contains migration state managed by the source virt handler",
"targetState": "TargetState contains migration state managed by the target virt handler",
"migrationNetworkType": "The type of migration network, either 'pod' or 'migration'",
}
}
func (VMISelector) SwaggerDoc() map[string]string {
return map[string]string{
"name": "Name of the VirtualMachineInstance to migrate",
}
}
func (VirtualMachineInstanceReplicaSet) SwaggerDoc() map[string]string {
return map[string]string{
"": "VirtualMachineInstance is *the* VirtualMachineInstance Definition. It represents a virtual machine in the runtime environment of kubernetes.\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+genclient",
"spec": "VirtualMachineInstance Spec contains the VirtualMachineInstance specification.",
"status": "Status is the high level overview of how the VirtualMachineInstance is doing. It contains information available to controllers and users.\n+nullable",
}
}
func (VirtualMachineInstanceReplicaSetList) SwaggerDoc() map[string]string {
return map[string]string{
"": "VMIList is a list of VMIs\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
}
}
func (VirtualMachineInstanceReplicaSetSpec) SwaggerDoc() map[string]string {
return map[string]string{
"replicas": "Number of desired pods. This is a pointer to distinguish between explicit\nzero and not specified. Defaults to 1.\n+optional",
"selector": "Label selector for pods. Existing ReplicaSets whose pods are\nselected by this will be the ones affected by this deployment.",
"template": "Template describes the pods that will be created.",
"paused": "Indicates that the replica set is paused.\n+optional",
}
}
func (VirtualMachineInstanceReplicaSetStatus) SwaggerDoc() map[string]string {
return map[string]string{
"replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).\n+optional",
"readyReplicas": "The number of ready replicas for this replica set.\n+optional",
"labelSelector": "Canonical form of the label selector for HPA which consumes it through the scale subresource.",
}
}
func (VirtualMachineInstanceReplicaSetCondition) SwaggerDoc() map[string]string {
return map[string]string{
"lastProbeTime": "+nullable",
"lastTransitionTime": "+nullable",
}
}
func (DataVolumeTemplateDummyStatus) SwaggerDoc() map[string]string {
return map[string]string{}
}
func (DataVolumeTemplateSpec) SwaggerDoc() map[string]string {
return map[string]string{
"spec": "DataVolumeSpec contains the DataVolume specification.",
"status": "DataVolumeTemplateDummyStatus is here simply for backwards compatibility with\na previous API.\n+nullable\n+optional",
}
}
func (VirtualMachineInstanceTemplateSpec) SwaggerDoc() map[string]string {
return map[string]string{
"metadata": "+kubebuilder:pruning:PreserveUnknownFields\n+nullable",
"spec": "VirtualMachineInstance Spec contains the VirtualMachineInstance specification.",
}
}
func (VirtualMachineInstanceMigration) SwaggerDoc() map[string]string {
return map[string]string{
"": "VirtualMachineInstanceMigration represents the object tracking a VMI's migration\nto another host in the cluster\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+genclient",
}
}
func (VirtualMachineInstanceMigrationList) SwaggerDoc() map[string]string {
return map[string]string{
"": "VirtualMachineInstanceMigrationList is a list of VirtualMachineMigrations\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
}
}
func (VirtualMachineInstanceMigrationSpec) SwaggerDoc() map[string]string {
return map[string]string{
"vmiName": "The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace",
"addedNodeSelector": "AddedNodeSelector is an additional selector that can be used to\ncomplement a NodeSelector or NodeAffinity as set on the VM\nto restrict the set of allowed target nodes for a migration.\nIn case of key collisions, values set on the VM objects\nare going to be preserved to ensure that addedNodeSelector\ncan only restrict but not bypass constraints already set on the VM object.\n+optional",
"sendTo": "If sendTo is specified, this VirtualMachineInstanceMigration will be considered the source",
"receive": "If receieve is specified, this VirtualMachineInstanceMigration will be considered the target",
}
}
func (VirtualMachineInstanceMigrationSource) SwaggerDoc() map[string]string {
return map[string]string{
"migrationID": "A unique identifier to identify this migration.",
"connectURL": "The synchronization controller URL to connect to.",
}
}
func (VirtualMachineInstanceMigrationTarget) SwaggerDoc() map[string]string {
return map[string]string{
"migrationID": "A unique identifier to identify this migration.",
}
}
func (VirtualMachineInstanceMigrationPhaseTransitionTimestamp) SwaggerDoc() map[string]string {
return map[string]string{
"": "VirtualMachineInstanceMigrationPhaseTransitionTimestamp gives a timestamp in relation to when a phase is set on a vmi",
"phase": "Phase is the status of the VirtualMachineInstanceMigrationPhase in kubernetes world. It is not the VirtualMachineInstanceMigrationPhase status, but partially correlates to it.",
"phaseTransitionTimestamp": "PhaseTransitionTimestamp is the timestamp of when the phase change occurred",
}
}
func (VirtualMachineInstanceMigrationStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "VirtualMachineInstanceMigration reprents information pertaining to a VMI's migration.",
"phaseTransitionTimestamps": "PhaseTransitionTimestamp is the timestamp of when the last phase change occurred\n+listType=atomic\n+optional",
"migrationState": "Represents the status of a live migration",
"synchronizationAddresses": "The synchronization addresses one can use to connect to the synchronization controller, includes the port, if multiple\naddresses are available, the first one is reported in the synchronizationAddress field.\n+optional\n+listType=atomic",
}
}
func (VirtualMachineInstancePreset) SwaggerDoc() map[string]string {
return map[string]string{
"": "Deprecated for removal in v2, please use VirtualMachineInstanceType and VirtualMachinePreference instead.\n\nVirtualMachineInstancePreset defines a VMI spec.domain to be applied to all VMIs that match the provided label selector\nMore info: https://kubevirt.io/user-guide/virtual_machines/presets/#overrides\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+genclient",
"spec": "VirtualMachineInstance Spec contains the VirtualMachineInstance specification.",
}
}
func (VirtualMachineInstancePresetList) SwaggerDoc() map[string]string {
return map[string]string{
"": "VirtualMachineInstancePresetList is a list of VirtualMachinePresets\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
}
}
func (VirtualMachineInstancePresetSpec) SwaggerDoc() map[string]string {
return map[string]string{
"selector": "Selector is a label query over a set of VMIs.\nRequired.",
"domain": "Domain is the same object type as contained in VirtualMachineInstanceSpec",
}
}
func (VirtualMachine) SwaggerDoc() map[string]string {
return map[string]string{
"": "VirtualMachine handles the VirtualMachines that are not running\nor are in a stopped state\nThe VirtualMachine contains the template to create the\nVirtualMachineInstance. It also mirrors the running state of the created\nVirtualMachineInstance in its status.\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+genclient",
"spec": "Spec contains the specification of VirtualMachineInstance created",
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/types.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/types.go | /*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2017 Red Hat, Inc.
*
*/
package v1
/*
ATTENTION: Rerun code generators when comments on structs or fields are modified.
*/
import (
"encoding/json"
"fmt"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
)
const DefaultGracePeriodSeconds int64 = 30
// VirtualMachineInstance is *the* VirtualMachineInstance Definition. It represents a virtual machine in the runtime environment of kubernetes.
//
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +genclient
// +genclient:noStatus
type VirtualMachineInstance struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// VirtualMachineInstance Spec contains the VirtualMachineInstance specification.
Spec VirtualMachineInstanceSpec `json:"spec" valid:"required"`
// Status is the high level overview of how the VirtualMachineInstance is doing. It contains information available to controllers and users.
Status VirtualMachineInstanceStatus `json:"status,omitempty"`
}
func (v *VirtualMachineInstance) MarshalBinary() (data []byte, err error) {
return json.Marshal(*v)
}
func (v *VirtualMachineInstance) UnmarshalBinary(data []byte) error {
return json.Unmarshal(data, v)
}
// VirtualMachineInstanceList is a list of VirtualMachines
//
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type VirtualMachineInstanceList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []VirtualMachineInstance `json:"items"`
}
type EvictionStrategy string
type StartStrategy string
const (
StartStrategyPaused StartStrategy = "Paused"
)
// VirtualMachineInstanceSpec is a description of a VirtualMachineInstance.
type VirtualMachineInstanceSpec struct {
// If specified, indicates the pod's priority.
// If not specified, the pod priority will be default or zero if there is no
// default.
// +optional
PriorityClassName string `json:"priorityClassName,omitempty"`
// Specification of the desired behavior of the VirtualMachineInstance on the host.
Domain DomainSpec `json:"domain"`
// NodeSelector is a selector which must be true for the vmi to fit on a node.
// Selector which must match a node's labels for the vmi to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// If affinity is specifies, obey all the affinity rules
Affinity *k8sv1.Affinity `json:"affinity,omitempty"`
// If specified, the VMI will be dispatched by specified scheduler.
// If not specified, the VMI will be dispatched by default scheduler.
// +optional
SchedulerName string `json:"schedulerName,omitempty"`
// If toleration is specified, obey all the toleration rules.
Tolerations []k8sv1.Toleration `json:"tolerations,omitempty"`
// TopologySpreadConstraints describes how a group of VMIs will be spread across a given topology
// domains. K8s scheduler will schedule VMI pods in a way which abides by the constraints.
// +optional
// +patchMergeKey=topologyKey
// +patchStrategy=merge
// +listType=map
// +listMapKey=topologyKey
// +listMapKey=whenUnsatisfiable
TopologySpreadConstraints []k8sv1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey"`
// EvictionStrategy describes the strategy to follow when a node drain occurs.
// The possible options are:
// - "None": No action will be taken, according to the specified 'RunStrategy' the VirtualMachine will be restarted or shutdown.
// - "LiveMigrate": the VirtualMachineInstance will be migrated instead of being shutdown.
// - "LiveMigrateIfPossible": the same as "LiveMigrate" but only if the VirtualMachine is Live-Migratable, otherwise it will behave as "None".
// - "External": the VirtualMachineInstance will be protected and `vmi.Status.EvacuationNodeName` will be set on eviction. This is mainly useful for cluster-api-provider-kubevirt (capk) which needs a way for VMI's to be blocked from eviction, yet signal capk that eviction has been called on the VMI so the capk controller can handle tearing the VMI down. Details can be found in the commit description https://github.com/kubevirt/kubevirt/commit/c1d77face705c8b126696bac9a3ee3825f27f1fa.
// +optional
EvictionStrategy *EvictionStrategy `json:"evictionStrategy,omitempty"`
// StartStrategy can be set to "Paused" if Virtual Machine should be started in paused state.
//
// +optional
StartStrategy *StartStrategy `json:"startStrategy,omitempty"`
// Grace period observed after signalling a VirtualMachineInstance to stop after which the VirtualMachineInstance is force terminated.
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
// List of volumes that can be mounted by disks belonging to the vmi.
// +kubebuilder:validation:MaxItems:=256
Volumes []Volume `json:"volumes,omitempty"`
// Periodic probe of VirtualMachineInstance liveness.
// VirtualmachineInstances will be stopped if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
LivenessProbe *Probe `json:"livenessProbe,omitempty"`
// Periodic probe of VirtualMachineInstance service readiness.
// VirtualmachineInstances will be removed from service endpoints if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
ReadinessProbe *Probe `json:"readinessProbe,omitempty"`
// Specifies the hostname of the vmi
// If not specified, the hostname will be set to the name of the vmi, if dhcp or cloud-init is configured properly.
// +optional
Hostname string `json:"hostname,omitempty"`
// If specified, the fully qualified vmi hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
// If not specified, the vmi will not have a domainname at all. The DNS entry will resolve to the vmi,
// no matter if the vmi itself can pick up a hostname.
// +optional
Subdomain string `json:"subdomain,omitempty"`
// List of networks that can be attached to a vm's virtual interface.
// +kubebuilder:validation:MaxItems:=256
Networks []Network `json:"networks,omitempty"`
// Set DNS policy for the pod.
// Defaults to "ClusterFirst".
// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
// To have DNS options set along with hostNetwork, you have to specify DNS policy
// explicitly to 'ClusterFirstWithHostNet'.
// +optional
DNSPolicy k8sv1.DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
// Specifies the DNS parameters of a pod.
// Parameters specified here will be merged to the generated DNS
// configuration based on DNSPolicy.
// +optional
DNSConfig *k8sv1.PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"`
// Specifies a set of public keys to inject into the vm guest
// +listType=atomic
// +optional
// +kubebuilder:validation:MaxItems:=256
AccessCredentials []AccessCredential `json:"accessCredentials,omitempty"`
// Specifies the architecture of the vm guest you are attempting to run. Defaults to the compiled architecture of the KubeVirt components
Architecture string `json:"architecture,omitempty"`
// ResourceClaims define which ResourceClaims must be allocated
// and reserved before the VMI, hence virt-launcher pod is allowed to start. The resources
// will be made available to the domain which consumes them
// by name.
//
// This is an alpha field and requires enabling the
// DynamicResourceAllocation feature gate in kubernetes
// https://kubernetes.io/docs/concepts/scheduling-eviction/dynamic-resource-allocation/
// This field should only be configured if one of the feature-gates GPUsWithDRA or HostDevicesWithDRA is enabled.
// This feature is in alpha.
//
// +listType=map
// +listMapKey=name
// +optional
ResourceClaims []k8sv1.PodResourceClaim `json:"resourceClaims,omitempty"`
}
func (vmiSpec *VirtualMachineInstanceSpec) UnmarshalJSON(data []byte) error {
type VMISpecAlias VirtualMachineInstanceSpec
var vmiSpecAlias VMISpecAlias
if err := json.Unmarshal(data, &vmiSpecAlias); err != nil {
return err
}
if vmiSpecAlias.DNSConfig != nil {
for i, ns := range vmiSpecAlias.DNSConfig.Nameservers {
if sanitizedIP, err := sanitizeIP(ns); err == nil {
vmiSpecAlias.DNSConfig.Nameservers[i] = sanitizedIP
}
}
}
*vmiSpec = VirtualMachineInstanceSpec(vmiSpecAlias)
return nil
}
// VirtualMachineInstancePhaseTransitionTimestamp gives a timestamp in relation to when a phase is set on a vmi
type VirtualMachineInstancePhaseTransitionTimestamp struct {
// Phase is the status of the VirtualMachineInstance in kubernetes world. It is not the VirtualMachineInstance status, but partially correlates to it.
Phase VirtualMachineInstancePhase `json:"phase,omitempty"`
// PhaseTransitionTimestamp is the timestamp of when the phase change occurred
PhaseTransitionTimestamp metav1.Time `json:"phaseTransitionTimestamp,omitempty"`
}
type TopologyHints struct {
TSCFrequency *int64 `json:"tscFrequency,omitempty"`
}
// VirtualMachineInstanceStatus represents information about the status of a VirtualMachineInstance. Status may trail the actual
// state of a system.
type VirtualMachineInstanceStatus struct {
// NodeName is the name where the VirtualMachineInstance is currently running.
NodeName string `json:"nodeName,omitempty"`
// A brief CamelCase message indicating details about why the VMI is in this state. e.g. 'NodeUnresponsive'
// +optional
Reason string `json:"reason,omitempty"`
// Conditions are specific points in VirtualMachineInstance's pod runtime.
Conditions []VirtualMachineInstanceCondition `json:"conditions,omitempty"`
// Phase is the status of the VirtualMachineInstance in kubernetes world. It is not the VirtualMachineInstance status, but partially correlates to it.
Phase VirtualMachineInstancePhase `json:"phase,omitempty"`
// PhaseTransitionTimestamp is the timestamp of when the last phase change occurred
// +listType=atomic
// +optional
PhaseTransitionTimestamps []VirtualMachineInstancePhaseTransitionTimestamp `json:"phaseTransitionTimestamps,omitempty"`
// Interfaces represent the details of available network interfaces.
Interfaces []VirtualMachineInstanceNetworkInterface `json:"interfaces,omitempty"`
// Guest OS Information
GuestOSInfo VirtualMachineInstanceGuestOSInfo `json:"guestOSInfo,omitempty"`
// Represents the status of a live migration
MigrationState *VirtualMachineInstanceMigrationState `json:"migrationState,omitempty"`
// Represents the method using which the vmi can be migrated: live migration or block migration
MigrationMethod VirtualMachineInstanceMigrationMethod `json:"migrationMethod,omitempty"`
// This represents the migration transport
MigrationTransport VirtualMachineInstanceMigrationTransport `json:"migrationTransport,omitempty"`
// The Quality of Service (QOS) classification assigned to the virtual machine instance based on resource requirements
// See PodQOSClass type for available QOS classes
// More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md
// +optional
QOSClass *k8sv1.PodQOSClass `json:"qosClass,omitempty"`
// LauncherContainerImageVersion indicates what container image is currently active for the vmi.
LauncherContainerImageVersion string `json:"launcherContainerImageVersion,omitempty"`
// EvacuationNodeName is used to track the eviction process of a VMI. It stores the name of the node that we want
// to evacuate. It is meant to be used by KubeVirt core components only and can't be set or modified by users.
// +optional
EvacuationNodeName string `json:"evacuationNodeName,omitempty"`
// ActivePods is a mapping of pod UID to node name.
// It is possible for multiple pods to be running for a single VMI during migration.
ActivePods map[types.UID]string `json:"activePods,omitempty"`
// VolumeStatus contains the statuses of all the volumes
// +optional
// +listType=atomic
VolumeStatus []VolumeStatus `json:"volumeStatus,omitempty"`
// KernelBootStatus contains info about the kernelBootContainer
// +optional
KernelBootStatus *KernelBootStatus `json:"kernelBootStatus,omitempty"`
// FSFreezeStatus indicates whether a freeze operation was requested for the guest filesystem.
// It will be set to "frozen" if the request was made, or unset otherwise.
// This does not reflect the actual state of the guest filesystem.
// +optional
FSFreezeStatus string `json:"fsFreezeStatus,omitempty"`
// +optional
TopologyHints *TopologyHints `json:"topologyHints,omitempty"`
//VirtualMachineRevisionName is used to get the vm revision of the vmi when doing
// an online vm snapshot
// +optional
VirtualMachineRevisionName string `json:"virtualMachineRevisionName,omitempty"`
// RuntimeUser is used to determine what user will be used in launcher
// +optional
RuntimeUser uint64 `json:"runtimeUser"`
// VSOCKCID is used to track the allocated VSOCK CID in the VM.
// +optional
VSOCKCID *uint32 `json:"VSOCKCID,omitempty"`
// SELinuxContext is the actual SELinux context of the virt-launcher pod
// +optional
SelinuxContext string `json:"selinuxContext,omitempty"`
// Machine shows the final resulting qemu machine type. This can be different
// than the machine type selected in the spec, due to qemus machine type alias mechanism.
// +optional
Machine *Machine `json:"machine,omitempty"`
// CurrentCPUTopology specifies the current CPU topology used by the VM workload.
// Current topology may differ from the desired topology in the spec while CPU hotplug
// takes place.
CurrentCPUTopology *CPUTopology `json:"currentCPUTopology,omitempty"`
// Memory shows various informations about the VirtualMachine memory.
// +optional
Memory *MemoryStatus `json:"memory,omitempty"`
// MigratedVolumes lists the source and destination volumes during the volume migration
// +listType=atomic
// +optional
MigratedVolumes []StorageMigratedVolumeInfo `json:"migratedVolumes,omitempty"`
// DeviceStatus reflects the state of devices requested in spec.domain.devices. This is an optional field available
// only when DRA feature gate is enabled
// This field will only be populated if one of the feature-gates GPUsWithDRA or HostDevicesWithDRA is enabled.
// This feature is in alpha.
// +optional
DeviceStatus *DeviceStatus `json:"deviceStatus,omitempty"`
}
// DeviceStatus has the information of all devices allocated spec.domain.devices
// +k8s:openapi-gen=true
type DeviceStatus struct {
// GPUStatuses reflects the state of GPUs requested in spec.domain.devices.gpus
// +listType=atomic
// +optional
GPUStatuses []DeviceStatusInfo `json:"gpuStatuses,omitempty"`
// HostDeviceStatuses reflects the state of GPUs requested in spec.domain.devices.hostDevices
// DRA
// +listType=atomic
// +optional
HostDeviceStatuses []DeviceStatusInfo `json:"hostDeviceStatuses,omitempty"`
}
type DeviceStatusInfo struct {
// Name of the device as specified in spec.domain.devices.gpus.name or spec.domain.devices.hostDevices.name
Name string `json:"name"`
// DeviceResourceClaimStatus reflects the DRA related information for the device
DeviceResourceClaimStatus *DeviceResourceClaimStatus `json:"deviceResourceClaimStatus,omitempty"`
}
// DeviceResourceClaimStatus has to be before SyncVMI call from virt-handler to virt-launcher
type DeviceResourceClaimStatus struct {
// Name is the name of actual device on the host provisioned by the driver as reflected in resourceclaim.status
// +optional
Name *string `json:"name,omitempty"`
// ResourceClaimName is the name of the resource claims object used to provision this resource
// +optional
ResourceClaimName *string `json:"resourceClaimName,omitempty"`
// Attributes are properties of the device that could be used by kubevirt and other copmonents to learn more
// about the device, like pciAddress or mdevUUID
// +optional
Attributes *DeviceAttribute `json:"attributes,omitempty"`
}
// DeviceAttribute must have exactly one field set.
type DeviceAttribute struct {
// PCIAddress is the PCIe bus address of the allocated device
// +optional
PCIAddress *string `json:"pciAddress,omitempty"`
//MDevUUID is the mediated device uuid of the allocated device
// +optional
MDevUUID *string `json:"mDevUUID,omitempty"`
}
// StorageMigratedVolumeInfo tracks the information about the source and destination volumes during the volume migration
type StorageMigratedVolumeInfo struct {
// VolumeName is the name of the volume that is being migrated
VolumeName string `json:"volumeName"`
// SourcePVCInfo contains the information about the source PVC
SourcePVCInfo *PersistentVolumeClaimInfo `json:"sourcePVCInfo,omitempty" valid:"required"`
// DestinationPVCInfo contains the information about the destination PVC
DestinationPVCInfo *PersistentVolumeClaimInfo `json:"destinationPVCInfo,omitempty" valid:"required"`
}
// PersistentVolumeClaimInfo contains the relavant information virt-handler needs cached about a PVC
type PersistentVolumeClaimInfo struct {
// ClaimName is the name of the PVC
ClaimName string `json:"claimName,omitempty"`
// AccessModes contains the desired access modes the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +listType=atomic
// +optional
AccessModes []k8sv1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
// VolumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
// +optional
VolumeMode *k8sv1.PersistentVolumeMode `json:"volumeMode,omitempty"`
// Capacity represents the capacity set on the corresponding PVC status
// +optional
Capacity k8sv1.ResourceList `json:"capacity,omitempty"`
// Requests represents the resources requested by the corresponding PVC spec
// +optional
Requests k8sv1.ResourceList `json:"requests,omitempty"`
// Preallocated indicates if the PVC's storage is preallocated or not
// +optional
Preallocated bool `json:"preallocated,omitempty"`
// Percentage of filesystem's size to be reserved when resizing the PVC
// +optional
FilesystemOverhead *Percent `json:"filesystemOverhead,omitempty"`
}
// Percent is a string that can only be a value between [0,1)
// +kubebuilder:validation:Pattern=`^(0(?:\.\d{1,3})?|1)$`
type Percent string
// VolumeStatus represents information about the status of volumes attached to the VirtualMachineInstance.
type VolumeStatus struct {
// Name is the name of the volume
Name string `json:"name"`
// Target is the target name used when adding the volume to the VM, eg: vda
Target string `json:"target"`
// Phase is the phase
Phase VolumePhase `json:"phase,omitempty"`
// Reason is a brief description of why we are in the current hotplug volume phase
Reason string `json:"reason,omitempty"`
// Message is a detailed message about the current hotplug volume phase
Message string `json:"message,omitempty"`
// PersistentVolumeClaimInfo is information about the PVC that handler requires during start flow
PersistentVolumeClaimInfo *PersistentVolumeClaimInfo `json:"persistentVolumeClaimInfo,omitempty"`
// If the volume is hotplug, this will contain the hotplug status.
HotplugVolume *HotplugVolumeStatus `json:"hotplugVolume,omitempty"`
// Represents the size of the volume
Size int64 `json:"size,omitempty"`
// If the volume is memorydump volume, this will contain the memorydump info.
MemoryDumpVolume *DomainMemoryDumpInfo `json:"memoryDumpVolume,omitempty"`
// ContainerDiskVolume shows info about the containerdisk, if the volume is a containerdisk
ContainerDiskVolume *ContainerDiskInfo `json:"containerDiskVolume,omitempty"`
}
// KernelInfo show info about the kernel image
type KernelInfo struct {
// Checksum is the checksum of the kernel image
Checksum uint32 `json:"checksum,omitempty"`
}
// InitrdInfo show info about the initrd file
type InitrdInfo struct {
// Checksum is the checksum of the initrd file
Checksum uint32 `json:"checksum,omitempty"`
}
// KernelBootStatus contains info about the kernelBootContainer
type KernelBootStatus struct {
// KernelInfo show info about the kernel image
KernelInfo *KernelInfo `json:"kernelInfo,omitempty"`
// InitrdInfo show info about the initrd file
InitrdInfo *InitrdInfo `json:"initrdInfo,omitempty"`
}
// DomainMemoryDumpInfo represents the memory dump information
type DomainMemoryDumpInfo struct {
// StartTimestamp is the time when the memory dump started
StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"`
// EndTimestamp is the time when the memory dump completed
EndTimestamp *metav1.Time `json:"endTimestamp,omitempty"`
// ClaimName is the name of the pvc the memory was dumped to
ClaimName string `json:"claimName,omitempty"`
// TargetFileName is the name of the memory dump output
TargetFileName string `json:"targetFileName,omitempty"`
}
// HotplugVolumeStatus represents the hotplug status of the volume
type HotplugVolumeStatus struct {
// AttachPodName is the name of the pod used to attach the volume to the node.
AttachPodName string `json:"attachPodName,omitempty"`
// AttachPodUID is the UID of the pod used to attach the volume to the node.
AttachPodUID types.UID `json:"attachPodUID,omitempty"`
}
// ContainerDiskInfo shows info about the containerdisk
type ContainerDiskInfo struct {
// Checksum is the checksum of the rootdisk or kernel artifacts inside the containerdisk
Checksum uint32 `json:"checksum,omitempty"`
}
// VolumePhase indicates the current phase of the hotplug process.
type VolumePhase string
const (
// VolumePending means the Volume is pending and cannot be attached to the node yet.
VolumePending VolumePhase = "Pending"
// VolumeBound means the Volume is bound and can be attach to the node.
VolumeBound VolumePhase = "Bound"
// HotplugVolumeAttachedToNode means the volume has been attached to the node.
HotplugVolumeAttachedToNode VolumePhase = "AttachedToNode"
// HotplugVolumeMounted means the volume has been attached to the node and is mounted to the virt-launcher pod.
HotplugVolumeMounted VolumePhase = "MountedToPod"
// VolumeReady means the volume is ready to be used by the VirtualMachineInstance.
VolumeReady VolumePhase = "Ready"
// HotplugVolumeDetaching means the volume is being detached from the node, and the attachment pod is being removed.
HotplugVolumeDetaching VolumePhase = "Detaching"
// HotplugVolumeUnMounted means the volume has been unmounted from the virt-launcer pod.
HotplugVolumeUnMounted VolumePhase = "UnMountedFromPod"
// MemoryDumpVolumeCompleted means that the requested memory dump was completed and the dump is ready in the volume
MemoryDumpVolumeCompleted VolumePhase = "MemoryDumpCompleted"
// MemoryDumpVolumeInProgress means that the volume for the memory dump was attached, and now the command is being triggered
MemoryDumpVolumeInProgress VolumePhase = "MemoryDumpInProgress"
// MemoryDumpVolumeInProgress means that the volume for the memory dump was attached, and now the command is being triggered
MemoryDumpVolumeFailed VolumePhase = "MemoryDumpFailed"
)
func (v *VirtualMachineInstance) IsScheduling() bool {
return v.Status.Phase == Scheduling
}
func (v *VirtualMachineInstance) IsScheduled() bool {
return v.Status.Phase == Scheduled
}
func (v *VirtualMachineInstance) IsRunning() bool {
return v.Status.Phase == Running
}
func (v *VirtualMachineInstance) IsMarkedForEviction() bool {
return v.Status.EvacuationNodeName != ""
}
func (v *VirtualMachineInstance) IsMigratable() bool {
for _, cond := range v.Status.Conditions {
if cond.Type == VirtualMachineInstanceIsMigratable && cond.Status == k8sv1.ConditionTrue {
return true
}
}
return false
}
func (v *VirtualMachineInstance) IsBlockMigration() bool {
return v.Status.MigrationMethod == BlockMigration ||
len(v.Status.MigratedVolumes) > 0
}
func (v *VirtualMachineInstance) IsFinal() bool {
return v.Status.Phase == Failed || v.Status.Phase == Succeeded
}
func (v *VirtualMachineInstance) IsMarkedForDeletion() bool {
return v.ObjectMeta.DeletionTimestamp != nil
}
func (v *VirtualMachineInstance) IsUnknown() bool {
return v.Status.Phase == Unknown
}
func (v *VirtualMachineInstance) IsUnprocessed() bool {
return v.Status.Phase == Pending || v.Status.Phase == VmPhaseUnset
}
// Checks if CPU pinning has been requested
func (v *VirtualMachineInstance) IsCPUDedicated() bool {
return v.Spec.Domain.CPU != nil && v.Spec.Domain.CPU.DedicatedCPUPlacement
}
func (v *VirtualMachineInstance) IsBootloaderEFI() bool {
return v.Spec.Domain.Firmware != nil && v.Spec.Domain.Firmware.Bootloader != nil &&
v.Spec.Domain.Firmware.Bootloader.EFI != nil
}
// WantsToHaveQOSGuaranteed checks if cpu and memoyr limits and requests are identical on the VMI.
// This is the indicator that people want a VMI with QOS of guaranteed
func (v *VirtualMachineInstance) WantsToHaveQOSGuaranteed() bool {
resources := v.Spec.Domain.Resources
return !resources.Requests.Memory().IsZero() && resources.Requests.Memory().Cmp(*resources.Limits.Memory()) == 0 &&
!resources.Requests.Cpu().IsZero() && resources.Requests.Cpu().Cmp(*resources.Limits.Cpu()) == 0
}
// ShouldStartPaused returns true if VMI should be started in paused state
func (v *VirtualMachineInstance) ShouldStartPaused() bool {
return v.Spec.StartStrategy != nil && *v.Spec.StartStrategy == StartStrategyPaused
}
func (v *VirtualMachineInstance) IsRealtimeEnabled() bool {
return v.Spec.Domain.CPU != nil && v.Spec.Domain.CPU.Realtime != nil
}
// IsHighPerformanceVMI returns true if the VMI is considered as high performance.
// A VMI is considered as high performance if one of the following is true:
// - the vmi requests a dedicated cpu
// - the realtime flag is enabled
// - the vmi requests hugepages
func (v *VirtualMachineInstance) IsHighPerformanceVMI() bool {
if v.Spec.Domain.CPU != nil {
if v.Spec.Domain.CPU.DedicatedCPUPlacement || v.Spec.Domain.CPU.Realtime != nil {
return true
}
}
if v.Spec.Domain.Memory != nil && v.Spec.Domain.Memory.Hugepages != nil {
return true
}
return false
}
func (v *VirtualMachineInstance) IsMigrationSource() bool {
// Can use this after being fully synchronized.
return v.Status.MigrationState != nil && v.Status.MigrationState.TargetState != nil && v.Status.MigrationState.TargetState.SyncAddress != nil && v.Status.MigrationState.TargetState.NodeAddress != nil
}
func (v *VirtualMachineInstance) IsMigrationTarget() bool {
return v.GetAnnotations()[CreateMigrationTarget] == "true"
}
func (v *VirtualMachineInstance) IsWaitingForSync() bool {
return v.Status.Phase == WaitingForSync
}
// Is set to true after the VMI is fully synced
func (v *VirtualMachineInstance) IsMigrationTargetNodeLabelSet() bool {
_, ok := v.Labels[MigrationTargetNodeNameLabel]
return ok
}
// Assume that this only called when in decentralized live migration
func (v *VirtualMachineInstance) IsMigrationSourceSynchronized() bool {
return v.Status.MigrationState != nil && v.Status.MigrationState.SourceState != nil &&
v.Status.MigrationState.TargetState != nil &&
v.Status.MigrationState.SourceState.MigrationUID != "" &&
v.Status.MigrationState.SourceState.Pod != "" &&
v.Status.MigrationState.SourceState.NodeSelectors != nil &&
v.Status.MigrationState.SourceState.Node != ""
}
func (v *VirtualMachineInstance) IsMigrationCompleted() bool {
return v.Status.MigrationState != nil && v.Status.MigrationState.Completed
}
// IsMigrationSynchronized is true after a decentralized migration VMI has sync with the other side. It
// checks if the SourceState and TargetState are not nil, and the migrationUIDs are set. This can only
// happen when both sides have synchronized at least once. For 'local' migrations this returns true if the
// migration state is not nil. Essentially this happens after the migration resource is created.
func (v *VirtualMachineInstance) IsMigrationSynchronized(migration *VirtualMachineInstanceMigration) bool {
if migration.IsDecentralized() {
return v.Status.MigrationState != nil && v.Status.MigrationState.SourceState != nil &&
v.Status.MigrationState.TargetState != nil &&
v.Status.MigrationState.SourceState.MigrationUID != "" &&
v.Status.MigrationState.TargetState.MigrationUID != ""
} else {
return v.Status.MigrationState != nil
}
}
func (v *VirtualMachineInstance) IsTargetPreparing(migration *VirtualMachineInstanceMigration) bool {
if migration.IsDecentralized() {
return v.IsMigrationSynchronized(migration) &&
v.Status.MigrationState.TargetState.Pod != "" &&
v.Status.MigrationState.TargetState.Node != ""
} else {
return v.Status.MigrationState != nil && v.Status.MigrationState.MigrationUID == migration.UID &&
v.Status.MigrationState.TargetNode != ""
}
}
func (v *VirtualMachineInstance) IsDecentralizedMigration() bool {
return v.Status.MigrationState != nil &&
v.Status.MigrationState.TargetState != nil &&
v.Status.MigrationState.SourceState != nil &&
((v.Status.MigrationState.SourceState.SyncAddress == nil && v.Status.MigrationState.TargetState.SyncAddress != nil) ||
(v.Status.MigrationState.SourceState.SyncAddress != nil && v.Status.MigrationState.TargetState.SyncAddress == nil))
}
type VirtualMachineInstanceConditionType string
// These are valid conditions of VMIs.
const (
// Provisioning means, a VMI depends on DataVolumes which are in Pending/WaitForFirstConsumer status,
// and some actions are taken to provision the PVCs for the DataVolumes
VirtualMachineInstanceProvisioning VirtualMachineInstanceConditionType = "Provisioning"
// Ready means the VMI is able to service requests and should be added to the
// load balancing pools of all matching services.
VirtualMachineInstanceReady VirtualMachineInstanceConditionType = "Ready"
// If there happens any error while trying to synchronize the VirtualMachineInstance with the Domain,
// this is reported as false.
VirtualMachineInstanceSynchronized VirtualMachineInstanceConditionType = "Synchronized"
// If the VMI was paused by the user, this is reported as true.
VirtualMachineInstancePaused VirtualMachineInstanceConditionType = "Paused"
// Reflects whether the QEMU guest agent is connected through the channel
VirtualMachineInstanceAgentConnected VirtualMachineInstanceConditionType = "AgentConnected"
// Reflects whether the QEMU guest agent updated access credentials successfully
VirtualMachineInstanceAccessCredentialsSynchronized VirtualMachineInstanceConditionType = "AccessCredentialsSynchronized"
// Reflects whether the QEMU guest agent is connected through the channel
VirtualMachineInstanceUnsupportedAgent VirtualMachineInstanceConditionType = "AgentVersionNotSupported"
// Indicates whether the VMI is live migratable
VirtualMachineInstanceIsMigratable VirtualMachineInstanceConditionType = "LiveMigratable"
// Indicates that the VMI is in progress of Hot vCPU Plug/UnPlug
VirtualMachineInstanceVCPUChange VirtualMachineInstanceConditionType = "HotVCPUChange"
// Indicates that the VMI is hot(un)plugging memory
VirtualMachineInstanceMemoryChange VirtualMachineInstanceConditionType = "HotMemoryChange"
// Indicates that the VMI has an updates in its volume set
VirtualMachineInstanceVolumesChange VirtualMachineInstanceConditionType = "VolumesChange"
// Summarizes that all the DataVolumes attached to the VMI are Ready or not
VirtualMachineInstanceDataVolumesReady VirtualMachineInstanceConditionType = "DataVolumesReady"
// Indicates whether the VMI is live migratable
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/register.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/register.go | /*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2019 Red Hat, Inc.
*
*/
package v1
import (
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/utils/pointer"
"kubevirt.io/api/core"
)
const SubresourceGroupName = "subresources.kubevirt.io"
const KubeVirtClientGoSchemeRegistrationVersionEnvVar = "KUBEVIRT_CLIENT_GO_SCHEME_REGISTRATION_VERSION"
var (
ApiLatestVersion = "v1"
ApiSupportedWebhookVersions = []string{"v1alpha3", "v1"}
ApiStorageVersion = "v1"
ApiSupportedVersions = []extv1.CustomResourceDefinitionVersion{
{
Name: "v1",
Served: true,
Storage: true,
},
{
Name: "v1alpha3",
Served: true,
Storage: false,
Deprecated: true,
DeprecationWarning: pointer.String("kubevirt.io/v1alpha3 is now deprecated and will be removed in a future release."),
},
}
)
var (
// GroupVersion is the latest group version for the KubeVirt api
GroupVersion = schema.GroupVersion{Group: core.GroupName, Version: ApiLatestVersion}
SchemeGroupVersion = schema.GroupVersion{Group: core.GroupName, Version: ApiLatestVersion}
// StorageGroupVersion is the group version our api is persistented internally as
StorageGroupVersion = schema.GroupVersion{Group: core.GroupName, Version: ApiStorageVersion}
// GroupVersions is group version list used to register these objects
// The preferred group version is the first item in the list.
GroupVersions = []schema.GroupVersion{{Group: core.GroupName, Version: "v1"}, {Group: core.GroupName, Version: "v1alpha3"}}
// SubresourceGroupVersions is group version list used to register these objects
// The preferred group version is the first item in the list.
SubresourceGroupVersions = []schema.GroupVersion{{Group: SubresourceGroupName, Version: ApiLatestVersion}, {Group: SubresourceGroupName, Version: "v1alpha3"}}
// SubresourceStorageGroupVersion is the group version our api is persistented internally as
SubresourceStorageGroupVersion = schema.GroupVersion{Group: SubresourceGroupName, Version: ApiStorageVersion}
)
var (
// GroupVersionKind
VirtualMachineInstanceGroupVersionKind = schema.GroupVersionKind{Group: core.GroupName, Version: GroupVersion.Version, Kind: "VirtualMachineInstance"}
VirtualMachineInstanceReplicaSetGroupVersionKind = schema.GroupVersionKind{Group: core.GroupName, Version: GroupVersion.Version, Kind: "VirtualMachineInstanceReplicaSet"}
VirtualMachineInstancePresetGroupVersionKind = schema.GroupVersionKind{Group: core.GroupName, Version: GroupVersion.Version, Kind: "VirtualMachineInstancePreset"}
VirtualMachineGroupVersionKind = schema.GroupVersionKind{Group: core.GroupName, Version: GroupVersion.Version, Kind: "VirtualMachine"}
VirtualMachineInstanceMigrationGroupVersionKind = schema.GroupVersionKind{Group: core.GroupName, Version: GroupVersion.Version, Kind: "VirtualMachineInstanceMigration"}
KubeVirtGroupVersionKind = schema.GroupVersionKind{Group: core.GroupName, Version: GroupVersion.Version, Kind: "KubeVirt"}
)
var (
SchemeBuilder = runtime.NewSchemeBuilder(AddKnownTypesGenerator([]schema.GroupVersion{GroupVersion}))
AddToScheme = SchemeBuilder.AddToScheme
)
func AddKnownTypesGenerator(groupVersions []schema.GroupVersion) func(scheme *runtime.Scheme) error {
// Adds the list of known types to api.Scheme.
return func(scheme *runtime.Scheme) error {
for _, groupVersion := range groupVersions {
scheme.AddKnownTypes(groupVersion,
&VirtualMachineInstance{},
&VirtualMachineInstanceList{},
&VirtualMachineInstanceReplicaSet{},
&VirtualMachineInstanceReplicaSetList{},
&VirtualMachineInstancePreset{},
&VirtualMachineInstancePresetList{},
&VirtualMachineInstanceMigration{},
&VirtualMachineInstanceMigrationList{},
&VirtualMachine{},
&VirtualMachineList{},
&KubeVirt{},
&KubeVirtList{},
)
metav1.AddToGroupVersion(scheme, groupVersion)
}
return nil
}
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return GroupVersion.WithResource(resource).GroupResource()
}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/defaults.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/defaults.go | package v1
import (
"github.com/google/uuid"
"k8s.io/apimachinery/pkg/types"
)
var _true = t(true)
var _false = t(false)
func SetDefaults_HPETTimer(obj *HPETTimer) {
if obj.Enabled == nil {
obj.Enabled = _true
}
}
func SetDefaults_PITTimer(obj *PITTimer) {
if obj.Enabled == nil {
obj.Enabled = _true
}
}
func SetDefaults_KVMTimer(obj *KVMTimer) {
if obj.Enabled == nil {
obj.Enabled = _true
}
}
func SetDefaults_HypervTimer(obj *HypervTimer) {
if obj.Enabled == nil {
obj.Enabled = _true
}
}
func SetDefaults_RTCTimer(obj *RTCTimer) {
if obj.Enabled == nil {
obj.Enabled = _true
}
}
func SetDefaults_FeatureState(obj *FeatureState) {
if obj.Enabled == nil {
obj.Enabled = _true
}
}
func SetDefaults_SyNICTimer(obj *SyNICTimer) {
if obj.Enabled == nil {
obj.Enabled = _true
}
if obj.Direct != nil && obj.Direct.Enabled == nil {
obj.Direct.Enabled = _true
}
}
func SetDefaults_FeatureAPIC(obj *FeatureAPIC) {
if obj.Enabled == nil {
obj.Enabled = _true
}
}
func SetDefaults_FeatureVendorID(obj *FeatureVendorID) {
if obj.Enabled == nil {
obj.Enabled = _true
}
}
func SetDefaults_DiskDevice(obj *DiskDevice) {
if obj.Disk == nil &&
obj.CDRom == nil &&
obj.LUN == nil {
obj.Disk = &DiskTarget{}
}
}
func SetDefaults_CDRomTarget(obj *CDRomTarget) {
if obj.ReadOnly == nil {
obj.ReadOnly = _true
}
if obj.Tray == "" {
obj.Tray = TrayStateClosed
}
}
func SetDefaults_FeatureSpinlocks(obj *FeatureSpinlocks) {
if obj.Enabled == nil {
obj.Enabled = _true
}
if *obj.Enabled == *_true && obj.Retries == nil {
obj.Retries = ui32(4096)
}
}
func SetDefaults_Firmware(obj *Firmware) {
if obj.UUID == "" {
obj.UUID = types.UID(uuid.NewString())
}
}
func SetDefaults_VirtualMachineInstance(obj *VirtualMachineInstance) {
if obj.Spec.Domain.Firmware == nil {
obj.Spec.Domain.Firmware = &Firmware{}
}
if obj.Spec.Domain.Features == nil {
obj.Spec.Domain.Features = &Features{}
}
setDefaults_Disk(obj)
setDefaults_Input(obj)
SetDefaults_Probe(obj.Spec.ReadinessProbe)
SetDefaults_Probe(obj.Spec.LivenessProbe)
}
func setDefaults_Disk(obj *VirtualMachineInstance) {
for i := range obj.Spec.Domain.Devices.Disks {
disk := &obj.Spec.Domain.Devices.Disks[i].DiskDevice
SetDefaults_DiskDevice(disk)
}
}
func setDefaults_Input(obj *VirtualMachineInstance) {
for i := range obj.Spec.Domain.Devices.Inputs {
input := &obj.Spec.Domain.Devices.Inputs[i]
if input.Bus == "" {
input.Bus = InputBusUSB
}
if input.Type == "" {
input.Type = InputTypeTablet
}
}
}
func SetDefaults_Probe(probe *Probe) {
if probe == nil {
return
}
if probe.TimeoutSeconds < 1 {
probe.TimeoutSeconds = 1
}
if probe.PeriodSeconds < 1 {
probe.PeriodSeconds = 10
}
if probe.SuccessThreshold < 1 {
probe.SuccessThreshold = 1
}
if probe.FailureThreshold < 1 {
probe.FailureThreshold = 3
}
}
func DefaultBridgeNetworkInterface() *Interface {
iface := &Interface{
Name: "default",
InterfaceBindingMethod: InterfaceBindingMethod{
Bridge: &InterfaceBridge{},
},
}
return iface
}
func DefaultMasqueradeNetworkInterface() *Interface {
iface := &Interface{
Name: "default",
InterfaceBindingMethod: InterfaceBindingMethod{
Masquerade: &InterfaceMasquerade{},
},
}
return iface
}
func DefaultPodNetwork() *Network {
defaultNet := &Network{
Name: "default",
NetworkSource: NetworkSource{
Pod: &PodNetwork{},
},
}
return defaultNet
}
func t(v bool) *bool {
return &v
}
func ui32(v uint32) *uint32 {
return &v
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/zz_generated.defaults.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/zz_generated.defaults.go | //go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
This file is part of the KubeVirt project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Copyright The KubeVirt Authors.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&VirtualMachine{}, func(obj interface{}) { SetObjectDefaults_VirtualMachine(obj.(*VirtualMachine)) })
scheme.AddTypeDefaultingFunc(&VirtualMachineInstance{}, func(obj interface{}) { SetObjectDefaults_VirtualMachineInstance(obj.(*VirtualMachineInstance)) })
scheme.AddTypeDefaultingFunc(&VirtualMachineInstanceList{}, func(obj interface{}) { SetObjectDefaults_VirtualMachineInstanceList(obj.(*VirtualMachineInstanceList)) })
scheme.AddTypeDefaultingFunc(&VirtualMachineInstancePreset{}, func(obj interface{}) {
SetObjectDefaults_VirtualMachineInstancePreset(obj.(*VirtualMachineInstancePreset))
})
scheme.AddTypeDefaultingFunc(&VirtualMachineInstancePresetList{}, func(obj interface{}) {
SetObjectDefaults_VirtualMachineInstancePresetList(obj.(*VirtualMachineInstancePresetList))
})
scheme.AddTypeDefaultingFunc(&VirtualMachineInstanceReplicaSet{}, func(obj interface{}) {
SetObjectDefaults_VirtualMachineInstanceReplicaSet(obj.(*VirtualMachineInstanceReplicaSet))
})
scheme.AddTypeDefaultingFunc(&VirtualMachineInstanceReplicaSetList{}, func(obj interface{}) {
SetObjectDefaults_VirtualMachineInstanceReplicaSetList(obj.(*VirtualMachineInstanceReplicaSetList))
})
scheme.AddTypeDefaultingFunc(&VirtualMachineList{}, func(obj interface{}) { SetObjectDefaults_VirtualMachineList(obj.(*VirtualMachineList)) })
return nil
}
func SetObjectDefaults_VirtualMachine(in *VirtualMachine) {
if in.Spec.Template != nil {
if in.Spec.Template.Spec.Domain.Firmware != nil {
SetDefaults_Firmware(in.Spec.Template.Spec.Domain.Firmware)
}
if in.Spec.Template.Spec.Domain.Clock != nil {
if in.Spec.Template.Spec.Domain.Clock.Timer != nil {
if in.Spec.Template.Spec.Domain.Clock.Timer.HPET != nil {
SetDefaults_HPETTimer(in.Spec.Template.Spec.Domain.Clock.Timer.HPET)
}
if in.Spec.Template.Spec.Domain.Clock.Timer.KVM != nil {
SetDefaults_KVMTimer(in.Spec.Template.Spec.Domain.Clock.Timer.KVM)
}
if in.Spec.Template.Spec.Domain.Clock.Timer.PIT != nil {
SetDefaults_PITTimer(in.Spec.Template.Spec.Domain.Clock.Timer.PIT)
}
if in.Spec.Template.Spec.Domain.Clock.Timer.RTC != nil {
SetDefaults_RTCTimer(in.Spec.Template.Spec.Domain.Clock.Timer.RTC)
}
if in.Spec.Template.Spec.Domain.Clock.Timer.Hyperv != nil {
SetDefaults_HypervTimer(in.Spec.Template.Spec.Domain.Clock.Timer.Hyperv)
}
}
}
if in.Spec.Template.Spec.Domain.Features != nil {
SetDefaults_FeatureState(&in.Spec.Template.Spec.Domain.Features.ACPI)
if in.Spec.Template.Spec.Domain.Features.APIC != nil {
SetDefaults_FeatureAPIC(in.Spec.Template.Spec.Domain.Features.APIC)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv != nil {
if in.Spec.Template.Spec.Domain.Features.Hyperv.Relaxed != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Relaxed)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.VAPIC != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.VAPIC)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.Spinlocks != nil {
SetDefaults_FeatureSpinlocks(in.Spec.Template.Spec.Domain.Features.Hyperv.Spinlocks)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.VPIndex != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.VPIndex)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.Runtime != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Runtime)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.SyNIC != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.SyNIC)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer != nil {
SetDefaults_SyNICTimer(in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer)
if in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer.Direct != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer.Direct)
}
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.Reset != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Reset)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.VendorID != nil {
SetDefaults_FeatureVendorID(in.Spec.Template.Spec.Domain.Features.Hyperv.VendorID)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.Frequencies != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Frequencies)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.Reenlightenment != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Reenlightenment)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.TLBFlush != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.TLBFlush)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.IPI != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.IPI)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.EVMCS != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.EVMCS)
}
}
if in.Spec.Template.Spec.Domain.Features.SMM != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.SMM)
}
if in.Spec.Template.Spec.Domain.Features.Pvspinlock != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Pvspinlock)
}
}
for i := range in.Spec.Template.Spec.Domain.Devices.Disks {
a := &in.Spec.Template.Spec.Domain.Devices.Disks[i]
SetDefaults_DiskDevice(&a.DiskDevice)
if a.DiskDevice.CDRom != nil {
SetDefaults_CDRomTarget(a.DiskDevice.CDRom)
}
if a.BlockSize != nil {
if a.BlockSize.MatchVolume != nil {
SetDefaults_FeatureState(a.BlockSize.MatchVolume)
}
}
}
for i := range in.Spec.Template.Spec.Domain.Devices.GPUs {
a := &in.Spec.Template.Spec.Domain.Devices.GPUs[i]
if a.VirtualGPUOptions != nil {
if a.VirtualGPUOptions.Display != nil {
if a.VirtualGPUOptions.Display.RamFB != nil {
SetDefaults_FeatureState(a.VirtualGPUOptions.Display.RamFB)
}
}
}
}
if in.Spec.Template.Spec.LivenessProbe != nil {
SetDefaults_Probe(in.Spec.Template.Spec.LivenessProbe)
}
if in.Spec.Template.Spec.ReadinessProbe != nil {
SetDefaults_Probe(in.Spec.Template.Spec.ReadinessProbe)
}
}
for i := range in.Status.VolumeRequests {
a := &in.Status.VolumeRequests[i]
if a.AddVolumeOptions != nil {
if a.AddVolumeOptions.Disk != nil {
SetDefaults_DiskDevice(&a.AddVolumeOptions.Disk.DiskDevice)
if a.AddVolumeOptions.Disk.DiskDevice.CDRom != nil {
SetDefaults_CDRomTarget(a.AddVolumeOptions.Disk.DiskDevice.CDRom)
}
if a.AddVolumeOptions.Disk.BlockSize != nil {
if a.AddVolumeOptions.Disk.BlockSize.MatchVolume != nil {
SetDefaults_FeatureState(a.AddVolumeOptions.Disk.BlockSize.MatchVolume)
}
}
}
}
}
}
func SetObjectDefaults_VirtualMachineInstance(in *VirtualMachineInstance) {
SetDefaults_VirtualMachineInstance(in)
if in.Spec.Domain.Firmware != nil {
SetDefaults_Firmware(in.Spec.Domain.Firmware)
}
if in.Spec.Domain.Clock != nil {
if in.Spec.Domain.Clock.Timer != nil {
if in.Spec.Domain.Clock.Timer.HPET != nil {
SetDefaults_HPETTimer(in.Spec.Domain.Clock.Timer.HPET)
}
if in.Spec.Domain.Clock.Timer.KVM != nil {
SetDefaults_KVMTimer(in.Spec.Domain.Clock.Timer.KVM)
}
if in.Spec.Domain.Clock.Timer.PIT != nil {
SetDefaults_PITTimer(in.Spec.Domain.Clock.Timer.PIT)
}
if in.Spec.Domain.Clock.Timer.RTC != nil {
SetDefaults_RTCTimer(in.Spec.Domain.Clock.Timer.RTC)
}
if in.Spec.Domain.Clock.Timer.Hyperv != nil {
SetDefaults_HypervTimer(in.Spec.Domain.Clock.Timer.Hyperv)
}
}
}
if in.Spec.Domain.Features != nil {
SetDefaults_FeatureState(&in.Spec.Domain.Features.ACPI)
if in.Spec.Domain.Features.APIC != nil {
SetDefaults_FeatureAPIC(in.Spec.Domain.Features.APIC)
}
if in.Spec.Domain.Features.Hyperv != nil {
if in.Spec.Domain.Features.Hyperv.Relaxed != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Relaxed)
}
if in.Spec.Domain.Features.Hyperv.VAPIC != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.VAPIC)
}
if in.Spec.Domain.Features.Hyperv.Spinlocks != nil {
SetDefaults_FeatureSpinlocks(in.Spec.Domain.Features.Hyperv.Spinlocks)
}
if in.Spec.Domain.Features.Hyperv.VPIndex != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.VPIndex)
}
if in.Spec.Domain.Features.Hyperv.Runtime != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Runtime)
}
if in.Spec.Domain.Features.Hyperv.SyNIC != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.SyNIC)
}
if in.Spec.Domain.Features.Hyperv.SyNICTimer != nil {
SetDefaults_SyNICTimer(in.Spec.Domain.Features.Hyperv.SyNICTimer)
if in.Spec.Domain.Features.Hyperv.SyNICTimer.Direct != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.SyNICTimer.Direct)
}
}
if in.Spec.Domain.Features.Hyperv.Reset != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Reset)
}
if in.Spec.Domain.Features.Hyperv.VendorID != nil {
SetDefaults_FeatureVendorID(in.Spec.Domain.Features.Hyperv.VendorID)
}
if in.Spec.Domain.Features.Hyperv.Frequencies != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Frequencies)
}
if in.Spec.Domain.Features.Hyperv.Reenlightenment != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Reenlightenment)
}
if in.Spec.Domain.Features.Hyperv.TLBFlush != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.TLBFlush)
}
if in.Spec.Domain.Features.Hyperv.IPI != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.IPI)
}
if in.Spec.Domain.Features.Hyperv.EVMCS != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.EVMCS)
}
}
if in.Spec.Domain.Features.SMM != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.SMM)
}
if in.Spec.Domain.Features.Pvspinlock != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Pvspinlock)
}
}
for i := range in.Spec.Domain.Devices.Disks {
a := &in.Spec.Domain.Devices.Disks[i]
SetDefaults_DiskDevice(&a.DiskDevice)
if a.DiskDevice.CDRom != nil {
SetDefaults_CDRomTarget(a.DiskDevice.CDRom)
}
if a.BlockSize != nil {
if a.BlockSize.MatchVolume != nil {
SetDefaults_FeatureState(a.BlockSize.MatchVolume)
}
}
}
for i := range in.Spec.Domain.Devices.GPUs {
a := &in.Spec.Domain.Devices.GPUs[i]
if a.VirtualGPUOptions != nil {
if a.VirtualGPUOptions.Display != nil {
if a.VirtualGPUOptions.Display.RamFB != nil {
SetDefaults_FeatureState(a.VirtualGPUOptions.Display.RamFB)
}
}
}
}
if in.Spec.LivenessProbe != nil {
SetDefaults_Probe(in.Spec.LivenessProbe)
}
if in.Spec.ReadinessProbe != nil {
SetDefaults_Probe(in.Spec.ReadinessProbe)
}
}
func SetObjectDefaults_VirtualMachineInstanceList(in *VirtualMachineInstanceList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_VirtualMachineInstance(a)
}
}
func SetObjectDefaults_VirtualMachineInstancePreset(in *VirtualMachineInstancePreset) {
if in.Spec.Domain != nil {
if in.Spec.Domain.Firmware != nil {
SetDefaults_Firmware(in.Spec.Domain.Firmware)
}
if in.Spec.Domain.Clock != nil {
if in.Spec.Domain.Clock.Timer != nil {
if in.Spec.Domain.Clock.Timer.HPET != nil {
SetDefaults_HPETTimer(in.Spec.Domain.Clock.Timer.HPET)
}
if in.Spec.Domain.Clock.Timer.KVM != nil {
SetDefaults_KVMTimer(in.Spec.Domain.Clock.Timer.KVM)
}
if in.Spec.Domain.Clock.Timer.PIT != nil {
SetDefaults_PITTimer(in.Spec.Domain.Clock.Timer.PIT)
}
if in.Spec.Domain.Clock.Timer.RTC != nil {
SetDefaults_RTCTimer(in.Spec.Domain.Clock.Timer.RTC)
}
if in.Spec.Domain.Clock.Timer.Hyperv != nil {
SetDefaults_HypervTimer(in.Spec.Domain.Clock.Timer.Hyperv)
}
}
}
if in.Spec.Domain.Features != nil {
SetDefaults_FeatureState(&in.Spec.Domain.Features.ACPI)
if in.Spec.Domain.Features.APIC != nil {
SetDefaults_FeatureAPIC(in.Spec.Domain.Features.APIC)
}
if in.Spec.Domain.Features.Hyperv != nil {
if in.Spec.Domain.Features.Hyperv.Relaxed != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Relaxed)
}
if in.Spec.Domain.Features.Hyperv.VAPIC != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.VAPIC)
}
if in.Spec.Domain.Features.Hyperv.Spinlocks != nil {
SetDefaults_FeatureSpinlocks(in.Spec.Domain.Features.Hyperv.Spinlocks)
}
if in.Spec.Domain.Features.Hyperv.VPIndex != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.VPIndex)
}
if in.Spec.Domain.Features.Hyperv.Runtime != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Runtime)
}
if in.Spec.Domain.Features.Hyperv.SyNIC != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.SyNIC)
}
if in.Spec.Domain.Features.Hyperv.SyNICTimer != nil {
SetDefaults_SyNICTimer(in.Spec.Domain.Features.Hyperv.SyNICTimer)
if in.Spec.Domain.Features.Hyperv.SyNICTimer.Direct != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.SyNICTimer.Direct)
}
}
if in.Spec.Domain.Features.Hyperv.Reset != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Reset)
}
if in.Spec.Domain.Features.Hyperv.VendorID != nil {
SetDefaults_FeatureVendorID(in.Spec.Domain.Features.Hyperv.VendorID)
}
if in.Spec.Domain.Features.Hyperv.Frequencies != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Frequencies)
}
if in.Spec.Domain.Features.Hyperv.Reenlightenment != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Reenlightenment)
}
if in.Spec.Domain.Features.Hyperv.TLBFlush != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.TLBFlush)
}
if in.Spec.Domain.Features.Hyperv.IPI != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.IPI)
}
if in.Spec.Domain.Features.Hyperv.EVMCS != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.EVMCS)
}
}
if in.Spec.Domain.Features.SMM != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.SMM)
}
if in.Spec.Domain.Features.Pvspinlock != nil {
SetDefaults_FeatureState(in.Spec.Domain.Features.Pvspinlock)
}
}
for i := range in.Spec.Domain.Devices.Disks {
a := &in.Spec.Domain.Devices.Disks[i]
SetDefaults_DiskDevice(&a.DiskDevice)
if a.DiskDevice.CDRom != nil {
SetDefaults_CDRomTarget(a.DiskDevice.CDRom)
}
if a.BlockSize != nil {
if a.BlockSize.MatchVolume != nil {
SetDefaults_FeatureState(a.BlockSize.MatchVolume)
}
}
}
for i := range in.Spec.Domain.Devices.GPUs {
a := &in.Spec.Domain.Devices.GPUs[i]
if a.VirtualGPUOptions != nil {
if a.VirtualGPUOptions.Display != nil {
if a.VirtualGPUOptions.Display.RamFB != nil {
SetDefaults_FeatureState(a.VirtualGPUOptions.Display.RamFB)
}
}
}
}
}
}
func SetObjectDefaults_VirtualMachineInstancePresetList(in *VirtualMachineInstancePresetList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_VirtualMachineInstancePreset(a)
}
}
func SetObjectDefaults_VirtualMachineInstanceReplicaSet(in *VirtualMachineInstanceReplicaSet) {
if in.Spec.Template != nil {
if in.Spec.Template.Spec.Domain.Firmware != nil {
SetDefaults_Firmware(in.Spec.Template.Spec.Domain.Firmware)
}
if in.Spec.Template.Spec.Domain.Clock != nil {
if in.Spec.Template.Spec.Domain.Clock.Timer != nil {
if in.Spec.Template.Spec.Domain.Clock.Timer.HPET != nil {
SetDefaults_HPETTimer(in.Spec.Template.Spec.Domain.Clock.Timer.HPET)
}
if in.Spec.Template.Spec.Domain.Clock.Timer.KVM != nil {
SetDefaults_KVMTimer(in.Spec.Template.Spec.Domain.Clock.Timer.KVM)
}
if in.Spec.Template.Spec.Domain.Clock.Timer.PIT != nil {
SetDefaults_PITTimer(in.Spec.Template.Spec.Domain.Clock.Timer.PIT)
}
if in.Spec.Template.Spec.Domain.Clock.Timer.RTC != nil {
SetDefaults_RTCTimer(in.Spec.Template.Spec.Domain.Clock.Timer.RTC)
}
if in.Spec.Template.Spec.Domain.Clock.Timer.Hyperv != nil {
SetDefaults_HypervTimer(in.Spec.Template.Spec.Domain.Clock.Timer.Hyperv)
}
}
}
if in.Spec.Template.Spec.Domain.Features != nil {
SetDefaults_FeatureState(&in.Spec.Template.Spec.Domain.Features.ACPI)
if in.Spec.Template.Spec.Domain.Features.APIC != nil {
SetDefaults_FeatureAPIC(in.Spec.Template.Spec.Domain.Features.APIC)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv != nil {
if in.Spec.Template.Spec.Domain.Features.Hyperv.Relaxed != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Relaxed)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.VAPIC != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.VAPIC)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.Spinlocks != nil {
SetDefaults_FeatureSpinlocks(in.Spec.Template.Spec.Domain.Features.Hyperv.Spinlocks)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.VPIndex != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.VPIndex)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.Runtime != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Runtime)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.SyNIC != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.SyNIC)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer != nil {
SetDefaults_SyNICTimer(in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer)
if in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer.Direct != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer.Direct)
}
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.Reset != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Reset)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.VendorID != nil {
SetDefaults_FeatureVendorID(in.Spec.Template.Spec.Domain.Features.Hyperv.VendorID)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.Frequencies != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Frequencies)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.Reenlightenment != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Reenlightenment)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.TLBFlush != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.TLBFlush)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.IPI != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.IPI)
}
if in.Spec.Template.Spec.Domain.Features.Hyperv.EVMCS != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.EVMCS)
}
}
if in.Spec.Template.Spec.Domain.Features.SMM != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.SMM)
}
if in.Spec.Template.Spec.Domain.Features.Pvspinlock != nil {
SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Pvspinlock)
}
}
for i := range in.Spec.Template.Spec.Domain.Devices.Disks {
a := &in.Spec.Template.Spec.Domain.Devices.Disks[i]
SetDefaults_DiskDevice(&a.DiskDevice)
if a.DiskDevice.CDRom != nil {
SetDefaults_CDRomTarget(a.DiskDevice.CDRom)
}
if a.BlockSize != nil {
if a.BlockSize.MatchVolume != nil {
SetDefaults_FeatureState(a.BlockSize.MatchVolume)
}
}
}
for i := range in.Spec.Template.Spec.Domain.Devices.GPUs {
a := &in.Spec.Template.Spec.Domain.Devices.GPUs[i]
if a.VirtualGPUOptions != nil {
if a.VirtualGPUOptions.Display != nil {
if a.VirtualGPUOptions.Display.RamFB != nil {
SetDefaults_FeatureState(a.VirtualGPUOptions.Display.RamFB)
}
}
}
}
if in.Spec.Template.Spec.LivenessProbe != nil {
SetDefaults_Probe(in.Spec.Template.Spec.LivenessProbe)
}
if in.Spec.Template.Spec.ReadinessProbe != nil {
SetDefaults_Probe(in.Spec.Template.Spec.ReadinessProbe)
}
}
}
func SetObjectDefaults_VirtualMachineInstanceReplicaSetList(in *VirtualMachineInstanceReplicaSetList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_VirtualMachineInstanceReplicaSet(a)
}
}
func SetObjectDefaults_VirtualMachineList(in *VirtualMachineList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_VirtualMachine(a)
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/deepcopy_generated.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/deepcopy_generated.go | //go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
This file is part of the KubeVirt project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Copyright The KubeVirt Authors.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ACPI) DeepCopyInto(out *ACPI) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACPI.
func (in *ACPI) DeepCopy() *ACPI {
if in == nil {
return nil
}
out := new(ACPI)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AccessCredential) DeepCopyInto(out *AccessCredential) {
*out = *in
if in.SSHPublicKey != nil {
in, out := &in.SSHPublicKey, &out.SSHPublicKey
*out = new(SSHPublicKeyAccessCredential)
(*in).DeepCopyInto(*out)
}
if in.UserPassword != nil {
in, out := &in.UserPassword, &out.UserPassword
*out = new(UserPasswordAccessCredential)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessCredential.
func (in *AccessCredential) DeepCopy() *AccessCredential {
if in == nil {
return nil
}
out := new(AccessCredential)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AccessCredentialSecretSource) DeepCopyInto(out *AccessCredentialSecretSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessCredentialSecretSource.
func (in *AccessCredentialSecretSource) DeepCopy() *AccessCredentialSecretSource {
if in == nil {
return nil
}
out := new(AccessCredentialSecretSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AddVolumeOptions) DeepCopyInto(out *AddVolumeOptions) {
*out = *in
if in.Disk != nil {
in, out := &in.Disk, &out.Disk
*out = new(Disk)
(*in).DeepCopyInto(*out)
}
if in.VolumeSource != nil {
in, out := &in.VolumeSource, &out.VolumeSource
*out = new(HotplugVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.DryRun != nil {
in, out := &in.DryRun, &out.DryRun
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddVolumeOptions.
func (in *AddVolumeOptions) DeepCopy() *AddVolumeOptions {
if in == nil {
return nil
}
out := new(AddVolumeOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ArchConfiguration) DeepCopyInto(out *ArchConfiguration) {
*out = *in
if in.Amd64 != nil {
in, out := &in.Amd64, &out.Amd64
*out = new(ArchSpecificConfiguration)
(*in).DeepCopyInto(*out)
}
if in.Arm64 != nil {
in, out := &in.Arm64, &out.Arm64
*out = new(ArchSpecificConfiguration)
(*in).DeepCopyInto(*out)
}
if in.Ppc64le != nil {
in, out := &in.Ppc64le, &out.Ppc64le
*out = new(ArchSpecificConfiguration)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchConfiguration.
func (in *ArchConfiguration) DeepCopy() *ArchConfiguration {
if in == nil {
return nil
}
out := new(ArchConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ArchSpecificConfiguration) DeepCopyInto(out *ArchSpecificConfiguration) {
*out = *in
if in.EmulatedMachines != nil {
in, out := &in.EmulatedMachines, &out.EmulatedMachines
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchSpecificConfiguration.
func (in *ArchSpecificConfiguration) DeepCopy() *ArchSpecificConfiguration {
if in == nil {
return nil
}
out := new(ArchSpecificConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuthorizedKeysFile) DeepCopyInto(out *AuthorizedKeysFile) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizedKeysFile.
func (in *AuthorizedKeysFile) DeepCopy() *AuthorizedKeysFile {
if in == nil {
return nil
}
out := new(AuthorizedKeysFile)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BIOS) DeepCopyInto(out *BIOS) {
*out = *in
if in.UseSerial != nil {
in, out := &in.UseSerial, &out.UseSerial
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BIOS.
func (in *BIOS) DeepCopy() *BIOS {
if in == nil {
return nil
}
out := new(BIOS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BlockSize) DeepCopyInto(out *BlockSize) {
*out = *in
if in.Custom != nil {
in, out := &in.Custom, &out.Custom
*out = new(CustomBlockSize)
**out = **in
}
if in.MatchVolume != nil {
in, out := &in.MatchVolume, &out.MatchVolume
*out = new(FeatureState)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockSize.
func (in *BlockSize) DeepCopy() *BlockSize {
if in == nil {
return nil
}
out := new(BlockSize)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Bootloader) DeepCopyInto(out *Bootloader) {
*out = *in
if in.BIOS != nil {
in, out := &in.BIOS, &out.BIOS
*out = new(BIOS)
(*in).DeepCopyInto(*out)
}
if in.EFI != nil {
in, out := &in.EFI, &out.EFI
*out = new(EFI)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bootloader.
func (in *Bootloader) DeepCopy() *Bootloader {
if in == nil {
return nil
}
out := new(Bootloader)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CDRomTarget) DeepCopyInto(out *CDRomTarget) {
*out = *in
if in.ReadOnly != nil {
in, out := &in.ReadOnly, &out.ReadOnly
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDRomTarget.
func (in *CDRomTarget) DeepCopy() *CDRomTarget {
if in == nil {
return nil
}
out := new(CDRomTarget)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CPU) DeepCopyInto(out *CPU) {
*out = *in
if in.Features != nil {
in, out := &in.Features, &out.Features
*out = make([]CPUFeature, len(*in))
copy(*out, *in)
}
if in.NUMA != nil {
in, out := &in.NUMA, &out.NUMA
*out = new(NUMA)
(*in).DeepCopyInto(*out)
}
if in.Realtime != nil {
in, out := &in.Realtime, &out.Realtime
*out = new(Realtime)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPU.
func (in *CPU) DeepCopy() *CPU {
if in == nil {
return nil
}
out := new(CPU)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CPUFeature) DeepCopyInto(out *CPUFeature) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUFeature.
func (in *CPUFeature) DeepCopy() *CPUFeature {
if in == nil {
return nil
}
out := new(CPUFeature)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CPUTopology) DeepCopyInto(out *CPUTopology) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUTopology.
func (in *CPUTopology) DeepCopy() *CPUTopology {
if in == nil {
return nil
}
out := new(CPUTopology)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CertConfig) DeepCopyInto(out *CertConfig) {
*out = *in
if in.Duration != nil {
in, out := &in.Duration, &out.Duration
*out = new(metav1.Duration)
**out = **in
}
if in.RenewBefore != nil {
in, out := &in.RenewBefore, &out.RenewBefore
*out = new(metav1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertConfig.
func (in *CertConfig) DeepCopy() *CertConfig {
if in == nil {
return nil
}
out := new(CertConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Chassis) DeepCopyInto(out *Chassis) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Chassis.
func (in *Chassis) DeepCopy() *Chassis {
if in == nil {
return nil
}
out := new(Chassis)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClaimRequest) DeepCopyInto(out *ClaimRequest) {
*out = *in
if in.ClaimName != nil {
in, out := &in.ClaimName, &out.ClaimName
*out = new(string)
**out = **in
}
if in.RequestName != nil {
in, out := &in.RequestName, &out.RequestName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimRequest.
func (in *ClaimRequest) DeepCopy() *ClaimRequest {
if in == nil {
return nil
}
out := new(ClaimRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClientPassthroughDevices) DeepCopyInto(out *ClientPassthroughDevices) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPassthroughDevices.
func (in *ClientPassthroughDevices) DeepCopy() *ClientPassthroughDevices {
if in == nil {
return nil
}
out := new(ClientPassthroughDevices)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Clock) DeepCopyInto(out *Clock) {
*out = *in
in.ClockOffset.DeepCopyInto(&out.ClockOffset)
if in.Timer != nil {
in, out := &in.Timer, &out.Timer
*out = new(Timer)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Clock.
func (in *Clock) DeepCopy() *Clock {
if in == nil {
return nil
}
out := new(Clock)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClockOffset) DeepCopyInto(out *ClockOffset) {
*out = *in
if in.UTC != nil {
in, out := &in.UTC, &out.UTC
*out = new(ClockOffsetUTC)
(*in).DeepCopyInto(*out)
}
if in.Timezone != nil {
in, out := &in.Timezone, &out.Timezone
*out = new(ClockOffsetTimezone)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClockOffset.
func (in *ClockOffset) DeepCopy() *ClockOffset {
if in == nil {
return nil
}
out := new(ClockOffset)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClockOffsetUTC) DeepCopyInto(out *ClockOffsetUTC) {
*out = *in
if in.OffsetSeconds != nil {
in, out := &in.OffsetSeconds, &out.OffsetSeconds
*out = new(int)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClockOffsetUTC.
func (in *ClockOffsetUTC) DeepCopy() *ClockOffsetUTC {
if in == nil {
return nil
}
out := new(ClockOffsetUTC)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloudInitConfigDriveSource) DeepCopyInto(out *CloudInitConfigDriveSource) {
*out = *in
if in.UserDataSecretRef != nil {
in, out := &in.UserDataSecretRef, &out.UserDataSecretRef
*out = new(corev1.LocalObjectReference)
**out = **in
}
if in.NetworkDataSecretRef != nil {
in, out := &in.NetworkDataSecretRef, &out.NetworkDataSecretRef
*out = new(corev1.LocalObjectReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInitConfigDriveSource.
func (in *CloudInitConfigDriveSource) DeepCopy() *CloudInitConfigDriveSource {
if in == nil {
return nil
}
out := new(CloudInitConfigDriveSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloudInitNoCloudSource) DeepCopyInto(out *CloudInitNoCloudSource) {
*out = *in
if in.UserDataSecretRef != nil {
in, out := &in.UserDataSecretRef, &out.UserDataSecretRef
*out = new(corev1.LocalObjectReference)
**out = **in
}
if in.NetworkDataSecretRef != nil {
in, out := &in.NetworkDataSecretRef, &out.NetworkDataSecretRef
*out = new(corev1.LocalObjectReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInitNoCloudSource.
func (in *CloudInitNoCloudSource) DeepCopy() *CloudInitNoCloudSource {
if in == nil {
return nil
}
out := new(CloudInitNoCloudSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterProfilerRequest) DeepCopyInto(out *ClusterProfilerRequest) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterProfilerRequest.
func (in *ClusterProfilerRequest) DeepCopy() *ClusterProfilerRequest {
if in == nil {
return nil
}
out := new(ClusterProfilerRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterProfilerResults) DeepCopyInto(out *ClusterProfilerResults) {
*out = *in
if in.ComponentResults != nil {
in, out := &in.ComponentResults, &out.ComponentResults
*out = make(map[string]ProfilerResult, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterProfilerResults.
func (in *ClusterProfilerResults) DeepCopy() *ClusterProfilerResults {
if in == nil {
return nil
}
out := new(ClusterProfilerResults)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CommonInstancetypesDeployment) DeepCopyInto(out *CommonInstancetypesDeployment) {
*out = *in
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonInstancetypesDeployment.
func (in *CommonInstancetypesDeployment) DeepCopy() *CommonInstancetypesDeployment {
if in == nil {
return nil
}
out := new(CommonInstancetypesDeployment)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ComponentConfig) DeepCopyInto(out *ComponentConfig) {
*out = *in
if in.NodePlacement != nil {
in, out := &in.NodePlacement, &out.NodePlacement
*out = new(NodePlacement)
(*in).DeepCopyInto(*out)
}
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(byte)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentConfig.
func (in *ComponentConfig) DeepCopy() *ComponentConfig {
if in == nil {
return nil
}
out := new(ComponentConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigDriveSSHPublicKeyAccessCredentialPropagation) DeepCopyInto(out *ConfigDriveSSHPublicKeyAccessCredentialPropagation) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigDriveSSHPublicKeyAccessCredentialPropagation.
func (in *ConfigDriveSSHPublicKeyAccessCredentialPropagation) DeepCopy() *ConfigDriveSSHPublicKeyAccessCredentialPropagation {
if in == nil {
return nil
}
out := new(ConfigDriveSSHPublicKeyAccessCredentialPropagation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) {
*out = *in
out.LocalObjectReference = in.LocalObjectReference
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource.
func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource {
if in == nil {
return nil
}
out := new(ConfigMapVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerDiskInfo) DeepCopyInto(out *ContainerDiskInfo) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerDiskInfo.
func (in *ContainerDiskInfo) DeepCopy() *ContainerDiskInfo {
if in == nil {
return nil
}
out := new(ContainerDiskInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerDiskSource) DeepCopyInto(out *ContainerDiskSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerDiskSource.
func (in *ContainerDiskSource) DeepCopy() *ContainerDiskSource {
if in == nil {
return nil
}
out := new(ContainerDiskSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerRevisionRef) DeepCopyInto(out *ControllerRevisionRef) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionRef.
func (in *ControllerRevisionRef) DeepCopy() *ControllerRevisionRef {
if in == nil {
return nil
}
out := new(ControllerRevisionRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomBlockSize) DeepCopyInto(out *CustomBlockSize) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomBlockSize.
func (in *CustomBlockSize) DeepCopy() *CustomBlockSize {
if in == nil {
return nil
}
out := new(CustomBlockSize)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomProfile) DeepCopyInto(out *CustomProfile) {
*out = *in
if in.LocalhostProfile != nil {
in, out := &in.LocalhostProfile, &out.LocalhostProfile
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomProfile.
func (in *CustomProfile) DeepCopy() *CustomProfile {
if in == nil {
return nil
}
out := new(CustomProfile)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomizeComponents) DeepCopyInto(out *CustomizeComponents) {
*out = *in
if in.Patches != nil {
in, out := &in.Patches, &out.Patches
*out = make([]CustomizeComponentsPatch, len(*in))
copy(*out, *in)
}
if in.Flags != nil {
in, out := &in.Flags, &out.Flags
*out = new(Flags)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizeComponents.
func (in *CustomizeComponents) DeepCopy() *CustomizeComponents {
if in == nil {
return nil
}
out := new(CustomizeComponents)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomizeComponentsPatch) DeepCopyInto(out *CustomizeComponentsPatch) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizeComponentsPatch.
func (in *CustomizeComponentsPatch) DeepCopy() *CustomizeComponentsPatch {
if in == nil {
return nil
}
out := new(CustomizeComponentsPatch)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DHCPOptions) DeepCopyInto(out *DHCPOptions) {
*out = *in
if in.NTPServers != nil {
in, out := &in.NTPServers, &out.NTPServers
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PrivateOptions != nil {
in, out := &in.PrivateOptions, &out.PrivateOptions
*out = make([]DHCPPrivateOptions, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DHCPOptions.
func (in *DHCPOptions) DeepCopy() *DHCPOptions {
if in == nil {
return nil
}
out := new(DHCPOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DHCPPrivateOptions) DeepCopyInto(out *DHCPPrivateOptions) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DHCPPrivateOptions.
func (in *DHCPPrivateOptions) DeepCopy() *DHCPPrivateOptions {
if in == nil {
return nil
}
out := new(DHCPPrivateOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolumeSource) DeepCopyInto(out *DataVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSource.
func (in *DataVolumeSource) DeepCopy() *DataVolumeSource {
if in == nil {
return nil
}
out := new(DataVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolumeTemplateDummyStatus) DeepCopyInto(out *DataVolumeTemplateDummyStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeTemplateDummyStatus.
func (in *DataVolumeTemplateDummyStatus) DeepCopy() *DataVolumeTemplateDummyStatus {
if in == nil {
return nil
}
out := new(DataVolumeTemplateDummyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolumeTemplateSpec) DeepCopyInto(out *DataVolumeTemplateSpec) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(DataVolumeTemplateDummyStatus)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeTemplateSpec.
func (in *DataVolumeTemplateSpec) DeepCopy() *DataVolumeTemplateSpec {
if in == nil {
return nil
}
out := new(DataVolumeTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeprecatedInterfaceMacvtap) DeepCopyInto(out *DeprecatedInterfaceMacvtap) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeprecatedInterfaceMacvtap.
func (in *DeprecatedInterfaceMacvtap) DeepCopy() *DeprecatedInterfaceMacvtap {
if in == nil {
return nil
}
out := new(DeprecatedInterfaceMacvtap)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeprecatedInterfacePasst) DeepCopyInto(out *DeprecatedInterfacePasst) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeprecatedInterfacePasst.
func (in *DeprecatedInterfacePasst) DeepCopy() *DeprecatedInterfacePasst {
if in == nil {
return nil
}
out := new(DeprecatedInterfacePasst)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeprecatedInterfaceSlirp) DeepCopyInto(out *DeprecatedInterfaceSlirp) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeprecatedInterfaceSlirp.
func (in *DeprecatedInterfaceSlirp) DeepCopy() *DeprecatedInterfaceSlirp {
if in == nil {
return nil
}
out := new(DeprecatedInterfaceSlirp)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeveloperConfiguration) DeepCopyInto(out *DeveloperConfiguration) {
*out = *in
if in.FeatureGates != nil {
in, out := &in.FeatureGates, &out.FeatureGates
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.NodeSelectors != nil {
in, out := &in.NodeSelectors, &out.NodeSelectors
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.MinimumClusterTSCFrequency != nil {
in, out := &in.MinimumClusterTSCFrequency, &out.MinimumClusterTSCFrequency
*out = new(int64)
**out = **in
}
if in.DiskVerification != nil {
in, out := &in.DiskVerification, &out.DiskVerification
*out = new(DiskVerification)
(*in).DeepCopyInto(*out)
}
if in.LogVerbosity != nil {
in, out := &in.LogVerbosity, &out.LogVerbosity
*out = new(LogVerbosity)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConfiguration.
func (in *DeveloperConfiguration) DeepCopy() *DeveloperConfiguration {
if in == nil {
return nil
}
out := new(DeveloperConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceAttribute) DeepCopyInto(out *DeviceAttribute) {
*out = *in
if in.PCIAddress != nil {
in, out := &in.PCIAddress, &out.PCIAddress
*out = new(string)
**out = **in
}
if in.MDevUUID != nil {
in, out := &in.MDevUUID, &out.MDevUUID
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAttribute.
func (in *DeviceAttribute) DeepCopy() *DeviceAttribute {
if in == nil {
return nil
}
out := new(DeviceAttribute)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceResourceClaimStatus) DeepCopyInto(out *DeviceResourceClaimStatus) {
*out = *in
if in.Name != nil {
in, out := &in.Name, &out.Name
*out = new(string)
**out = **in
}
if in.ResourceClaimName != nil {
in, out := &in.ResourceClaimName, &out.ResourceClaimName
*out = new(string)
**out = **in
}
if in.Attributes != nil {
in, out := &in.Attributes, &out.Attributes
*out = new(DeviceAttribute)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceResourceClaimStatus.
func (in *DeviceResourceClaimStatus) DeepCopy() *DeviceResourceClaimStatus {
if in == nil {
return nil
}
out := new(DeviceResourceClaimStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceStatus) DeepCopyInto(out *DeviceStatus) {
*out = *in
if in.GPUStatuses != nil {
in, out := &in.GPUStatuses, &out.GPUStatuses
*out = make([]DeviceStatusInfo, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.HostDeviceStatuses != nil {
in, out := &in.HostDeviceStatuses, &out.HostDeviceStatuses
*out = make([]DeviceStatusInfo, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceStatus.
func (in *DeviceStatus) DeepCopy() *DeviceStatus {
if in == nil {
return nil
}
out := new(DeviceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceStatusInfo) DeepCopyInto(out *DeviceStatusInfo) {
*out = *in
if in.DeviceResourceClaimStatus != nil {
in, out := &in.DeviceResourceClaimStatus, &out.DeviceResourceClaimStatus
*out = new(DeviceResourceClaimStatus)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceStatusInfo.
func (in *DeviceStatusInfo) DeepCopy() *DeviceStatusInfo {
if in == nil {
return nil
}
out := new(DeviceStatusInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Devices) DeepCopyInto(out *Devices) {
*out = *in
if in.UseVirtioTransitional != nil {
in, out := &in.UseVirtioTransitional, &out.UseVirtioTransitional
*out = new(bool)
**out = **in
}
if in.Disks != nil {
in, out := &in.Disks, &out.Disks
*out = make([]Disk, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Watchdog != nil {
in, out := &in.Watchdog, &out.Watchdog
*out = new(Watchdog)
(*in).DeepCopyInto(*out)
}
if in.Interfaces != nil {
in, out := &in.Interfaces, &out.Interfaces
*out = make([]Interface, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Inputs != nil {
in, out := &in.Inputs, &out.Inputs
*out = make([]Input, len(*in))
copy(*out, *in)
}
if in.AutoattachPodInterface != nil {
in, out := &in.AutoattachPodInterface, &out.AutoattachPodInterface
*out = new(bool)
**out = **in
}
if in.AutoattachGraphicsDevice != nil {
in, out := &in.AutoattachGraphicsDevice, &out.AutoattachGraphicsDevice
*out = new(bool)
**out = **in
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/schema_swagger_generated.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/schema_swagger_generated.go | // Code generated by swagger-doc. DO NOT EDIT.
package v1
func (HostDisk) SwaggerDoc() map[string]string {
return map[string]string{
"": "Represents a disk created on the cluster level",
"path": "The path to HostDisk image located on the cluster",
"type": "Contains information if disk.img exists or should be created\nallowed options are 'Disk' and 'DiskOrCreate'",
"capacity": "Capacity of the sparse disk\n+optional",
"shared": "Shared indicate whether the path is shared between nodes",
}
}
func (ConfigMapVolumeSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "ConfigMapVolumeSource adapts a ConfigMap into a volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes/#configmap",
"optional": "Specify whether the ConfigMap or it's keys must be defined\n+optional",
"volumeLabel": "The volume label of the resulting disk inside the VMI.\nDifferent bootstrapping mechanisms require different values.\nTypical values are \"cidata\" (cloud-init), \"config-2\" (cloud-init) or \"OEMDRV\" (kickstart).\n+optional",
}
}
func (SecretVolumeSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "SecretVolumeSource adapts a Secret into a volume.",
"secretName": "Name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
"optional": "Specify whether the Secret or it's keys must be defined\n+optional",
"volumeLabel": "The volume label of the resulting disk inside the VMI.\nDifferent bootstrapping mechanisms require different values.\nTypical values are \"cidata\" (cloud-init), \"config-2\" (cloud-init) or \"OEMDRV\" (kickstart).\n+optional",
}
}
func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "DownwardAPIVolumeSource represents a volume containing downward API info.",
"fields": "Fields is a list of downward API volume file\n+optional",
"volumeLabel": "The volume label of the resulting disk inside the VMI.\nDifferent bootstrapping mechanisms require different values.\nTypical values are \"cidata\" (cloud-init), \"config-2\" (cloud-init) or \"OEMDRV\" (kickstart).\n+optional",
}
}
func (ServiceAccountVolumeSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "ServiceAccountVolumeSource adapts a ServiceAccount into a volume.",
"serviceAccountName": "Name of the service account in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
}
}
func (DownwardMetricsVolumeSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "DownwardMetricsVolumeSource adds a very small disk to VMIs which contains a limited view of host and guest\nmetrics. The disk content is compatible with vhostmd (https://github.com/vhostmd/vhostmd) and vm-dump-metrics.",
}
}
func (SysprepSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "Represents a Sysprep volume source.",
"secret": "Secret references a k8s Secret that contains Sysprep answer file named autounattend.xml that should be attached as disk of CDROM type.\n+ optional",
"configMap": "ConfigMap references a ConfigMap that contains Sysprep answer file named autounattend.xml that should be attached as disk of CDROM type.\n+ optional",
}
}
func (CloudInitNoCloudSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "Represents a cloud-init nocloud user data source.\nMore info: http://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html",
"secretRef": "UserDataSecretRef references a k8s secret that contains NoCloud userdata.\n+ optional",
"userDataBase64": "UserDataBase64 contains NoCloud cloud-init userdata as a base64 encoded string.\n+ optional",
"userData": "UserData contains NoCloud inline cloud-init userdata.\n+ optional",
"networkDataSecretRef": "NetworkDataSecretRef references a k8s secret that contains NoCloud networkdata.\n+ optional",
"networkDataBase64": "NetworkDataBase64 contains NoCloud cloud-init networkdata as a base64 encoded string.\n+ optional",
"networkData": "NetworkData contains NoCloud inline cloud-init networkdata.\n+ optional",
}
}
func (CloudInitConfigDriveSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "Represents a cloud-init config drive user data source.\nMore info: https://cloudinit.readthedocs.io/en/latest/topics/datasources/configdrive.html",
"secretRef": "UserDataSecretRef references a k8s secret that contains config drive userdata.\n+ optional",
"userDataBase64": "UserDataBase64 contains config drive cloud-init userdata as a base64 encoded string.\n+ optional",
"userData": "UserData contains config drive inline cloud-init userdata.\n+ optional",
"networkDataSecretRef": "NetworkDataSecretRef references a k8s secret that contains config drive networkdata.\n+ optional",
"networkDataBase64": "NetworkDataBase64 contains config drive cloud-init networkdata as a base64 encoded string.\n+ optional",
"networkData": "NetworkData contains config drive inline cloud-init networkdata.\n+ optional",
}
}
func (DomainSpec) SwaggerDoc() map[string]string {
return map[string]string{
"resources": "Resources describes the Compute Resources required by this vmi.",
"cpu": "CPU allow specified the detailed CPU topology inside the vmi.\n+optional",
"memory": "Memory allow specifying the VMI memory features.\n+optional",
"machine": "Machine type.\n+optional",
"firmware": "Firmware.\n+optional",
"clock": "Clock sets the clock and timers of the vmi.\n+optional",
"features": "Features like acpi, apic, hyperv, smm.\n+optional",
"devices": "Devices allows adding disks, network interfaces, and others",
"ioThreadsPolicy": "Controls whether or not disks will share IOThreads.\nOmitting IOThreadsPolicy disables use of IOThreads.\nOne of: shared, auto, supplementalPool\n+optional",
"ioThreads": "IOThreads specifies the IOThreads options.\n+optional",
"chassis": "Chassis specifies the chassis info passed to the domain.\n+optional",
"launchSecurity": "Launch Security setting of the vmi.\n+optional",
}
}
func (Chassis) SwaggerDoc() map[string]string {
return map[string]string{
"": "Chassis specifies the chassis info passed to the domain.",
}
}
func (Bootloader) SwaggerDoc() map[string]string {
return map[string]string{
"": "Represents the firmware blob used to assist in the domain creation process.\nUsed for setting the QEMU BIOS file path for the libvirt domain.",
"bios": "If set (default), BIOS will be used.\n+optional",
"efi": "If set, EFI will be used instead of BIOS.\n+optional",
}
}
func (BIOS) SwaggerDoc() map[string]string {
return map[string]string{
"": "If set (default), BIOS will be used.",
"useSerial": "If set, the BIOS output will be transmitted over serial\n+optional",
}
}
func (EFI) SwaggerDoc() map[string]string {
return map[string]string{
"": "If set, EFI will be used instead of BIOS.",
"secureBoot": "If set, SecureBoot will be enabled and the OVMF roms will be swapped for\nSecureBoot-enabled ones.\nRequires SMM to be enabled.\nDefaults to true\n+optional",
"persistent": "If set to true, Persistent will persist the EFI NVRAM across reboots.\nDefaults to false\n+optional",
}
}
func (KernelBootContainer) SwaggerDoc() map[string]string {
return map[string]string{
"": "If set, the VM will be booted from the defined kernel / initrd.",
"image": "Image that contains initrd / kernel files.",
"imagePullSecret": "ImagePullSecret is the name of the Docker registry secret required to pull the image. The secret must already exist.\n+optional",
"imagePullPolicy": "Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n+optional",
"kernelPath": "The fully-qualified path to the kernel image in the host OS\n+optional",
"initrdPath": "the fully-qualified path to the ramdisk image in the host OS\n+optional",
}
}
func (KernelBoot) SwaggerDoc() map[string]string {
return map[string]string{
"": "Represents the firmware blob used to assist in the kernel boot process.\nUsed for setting the kernel, initrd and command line arguments",
"kernelArgs": "Arguments to be passed to the kernel at boot time",
"container": "Container defines the container that containes kernel artifacts",
}
}
func (ResourceRequirements) SwaggerDoc() map[string]string {
return map[string]string{
"requests": "Requests is a description of the initial vmi resources.\nValid resource keys are \"memory\" and \"cpu\".\n+optional",
"limits": "Limits describes the maximum amount of compute resources allowed.\nValid resource keys are \"memory\" and \"cpu\".\n+optional",
"overcommitGuestOverhead": "Don't ask the scheduler to take the guest-management overhead into account. Instead\nput the overhead only into the container's memory limit. This can lead to crashes if\nall memory is in use on a node. Defaults to false.",
}
}
func (CPU) SwaggerDoc() map[string]string {
return map[string]string{
"": "CPU allows specifying the CPU topology.",
"cores": "Cores specifies the number of cores inside the vmi.\nMust be a value greater or equal 1.",
"sockets": "Sockets specifies the number of sockets inside the vmi.\nMust be a value greater or equal 1.",
"maxSockets": "MaxSockets specifies the maximum amount of sockets that can\nbe hotplugged",
"threads": "Threads specifies the number of threads inside the vmi.\nMust be a value greater or equal 1.",
"model": "Model specifies the CPU model inside the VMI.\nList of available models https://github.com/libvirt/libvirt/tree/master/src/cpu_map.\nIt is possible to specify special cases like \"host-passthrough\" to get the same CPU as the node\nand \"host-model\" to get CPU closest to the node one.\nDefaults to host-model.\n+optional",
"features": "Features specifies the CPU features list inside the VMI.\n+optional",
"dedicatedCpuPlacement": "DedicatedCPUPlacement requests the scheduler to place the VirtualMachineInstance on a node\nwith enough dedicated pCPUs and pin the vCPUs to it.\n+optional",
"numa": "NUMA allows specifying settings for the guest NUMA topology\n+optional",
"isolateEmulatorThread": "IsolateEmulatorThread requests one more dedicated pCPU to be allocated for the VMI to place\nthe emulator thread on it.\n+optional",
"realtime": "Realtime instructs the virt-launcher to tune the VMI for lower latency, optional for real time workloads\n+optional",
}
}
func (Realtime) SwaggerDoc() map[string]string {
return map[string]string{
"": "Realtime holds the tuning knobs specific for realtime workloads.",
"mask": "Mask defines the vcpu mask expression that defines which vcpus are used for realtime. Format matches libvirt's expressions.\nExample: \"0-3,^1\",\"0,2,3\",\"2-3\"\n+optional",
}
}
func (NUMAGuestMappingPassthrough) SwaggerDoc() map[string]string {
return map[string]string{
"": "NUMAGuestMappingPassthrough instructs kubevirt to model numa topology which is compatible with the CPU pinning on the guest.\nThis will result in a subset of the node numa topology being passed through, ensuring that virtual numa nodes and their memory\nnever cross boundaries coming from the node numa mapping.",
}
}
func (NUMA) SwaggerDoc() map[string]string {
return map[string]string{
"guestMappingPassthrough": "GuestMappingPassthrough will create an efficient guest topology based on host CPUs exclusively assigned to a pod.\nThe created topology ensures that memory and CPUs on the virtual numa nodes never cross boundaries of host numa nodes.\n+optional",
}
}
func (CPUFeature) SwaggerDoc() map[string]string {
return map[string]string{
"": "CPUFeature allows specifying a CPU feature.",
"name": "Name of the CPU feature",
"policy": "Policy is the CPU feature attribute which can have the following attributes:\nforce - The virtual CPU will claim the feature is supported regardless of it being supported by host CPU.\nrequire - Guest creation will fail unless the feature is supported by the host CPU or the hypervisor is able to emulate it.\noptional - The feature will be supported by virtual CPU if and only if it is supported by host CPU.\ndisable - The feature will not be supported by virtual CPU.\nforbid - Guest creation will fail if the feature is supported by host CPU.\nDefaults to require\n+optional",
}
}
func (Memory) SwaggerDoc() map[string]string {
return map[string]string{
"": "Memory allows specifying the VirtualMachineInstance memory features.",
"hugepages": "Hugepages allow to use hugepages for the VirtualMachineInstance instead of regular memory.\n+optional",
"guest": "Guest allows to specifying the amount of memory which is visible inside the Guest OS.\nThe Guest must lie between Requests and Limits from the resources section.\nDefaults to the requested memory in the resources section if not specified.\n+ optional",
"maxGuest": "MaxGuest allows to specify the maximum amount of memory which is visible inside the Guest OS.\nThe delta between MaxGuest and Guest is the amount of memory that can be hot(un)plugged.",
}
}
func (MemoryStatus) SwaggerDoc() map[string]string {
return map[string]string{
"guestAtBoot": "GuestAtBoot specifies with how much memory the VirtualMachine intiallly booted with.\n+optional",
"guestCurrent": "GuestCurrent specifies how much memory is currently available for the VirtualMachine.\n+optional",
"guestRequested": "GuestRequested specifies how much memory was requested (hotplug) for the VirtualMachine.\n+optional",
}
}
func (Hugepages) SwaggerDoc() map[string]string {
return map[string]string{
"": "Hugepages allow to use hugepages for the VirtualMachineInstance instead of regular memory.",
"pageSize": "PageSize specifies the hugepage size, for x86_64 architecture valid values are 1Gi and 2Mi.",
}
}
func (Machine) SwaggerDoc() map[string]string {
return map[string]string{
"type": "QEMU machine type is the actual chipset of the VirtualMachineInstance.\n+optional",
}
}
func (Firmware) SwaggerDoc() map[string]string {
return map[string]string{
"uuid": "UUID reported by the vmi bios.\nDefaults to a random generated uid.",
"bootloader": "Settings to control the bootloader that is used.\n+optional",
"serial": "The system-serial-number in SMBIOS",
"kernelBoot": "Settings to set the kernel for booting.\n+optional",
"acpi": "Information that can be set in the ACPI table",
}
}
func (ACPI) SwaggerDoc() map[string]string {
return map[string]string{
"slicNameRef": "SlicNameRef should match the volume name of a secret object. The data in the secret should\nbe a binary blob that follows the ACPI SLIC standard, see:\nhttps://learn.microsoft.com/en-us/previous-versions/windows/hardware/design/dn653305(v=vs.85)",
"msdmNameRef": "Similar to SlicNameRef, another ACPI entry that is used in more recent Windows versions.\nThe above points to the spec of MSDM too.",
}
}
func (Devices) SwaggerDoc() map[string]string {
return map[string]string{
"useVirtioTransitional": "Fall back to legacy virtio 0.9 support if virtio bus is selected on devices.\nThis is helpful for old machines like CentOS6 or RHEL6 which\ndo not understand virtio_non_transitional (virtio 1.0).",
"disableHotplug": "DisableHotplug disabled the ability to hotplug disks.",
"disks": "Disks describes disks, cdroms and luns which are connected to the vmi.\n+kubebuilder:validation:MaxItems:=256",
"watchdog": "Watchdog describes a watchdog device which can be added to the vmi.",
"interfaces": "Interfaces describe network interfaces which are added to the vmi.\n+kubebuilder:validation:MaxItems:=256",
"inputs": "Inputs describe input devices",
"autoattachPodInterface": "Whether to attach a pod network interface. Defaults to true.",
"autoattachGraphicsDevice": "Whether to attach the default graphics device or not.\nVNC will not be available if set to false. Defaults to true.",
"autoattachSerialConsole": "Whether to attach the default virtio-serial console or not.\nSerial console access will not be available if set to false. Defaults to true.",
"logSerialConsole": "Whether to log the auto-attached default serial console or not.\nSerial console logs will be collect to a file and then streamed from a named `guest-console-log`.\nNot relevant if autoattachSerialConsole is disabled.\nDefaults to cluster wide setting on VirtualMachineOptions.",
"autoattachMemBalloon": "Whether to attach the Memory balloon device with default period.\nPeriod can be adjusted in virt-config.\nDefaults to true.\n+optional",
"autoattachInputDevice": "Whether to attach an Input Device.\nDefaults to false.\n+optional",
"autoattachVSOCK": "Whether to attach the VSOCK CID to the VM or not.\nVSOCK access will be available if set to true. Defaults to false.",
"rng": "Whether to have random number generator from host\n+optional",
"blockMultiQueue": "Whether or not to enable virtio multi-queue for block devices.\nDefaults to false.\n+optional",
"networkInterfaceMultiqueue": "If specified, virtual network interfaces configured with a virtio bus will also enable the vhost multiqueue feature for network devices. The number of queues created depends on additional factors of the VirtualMachineInstance, like the number of guest CPUs.\n+optional",
"gpus": "Whether to attach a GPU device to the vmi.\n+optional\n+listType=atomic",
"downwardMetrics": "DownwardMetrics creates a virtio serials for exposing the downward metrics to the vmi.\n+optional",
"panicDevices": "PanicDevices provides additional crash information when a guest crashes.\n+optional\n+listtype=atomic",
"filesystems": "Filesystems describes filesystem which is connected to the vmi.\n+optional\n+listType=atomic",
"hostDevices": "Whether to attach a host device to the vmi.\n+optional\n+listType=atomic",
"clientPassthrough": "To configure and access client devices such as redirecting USB\n+optional",
"sound": "Whether to emulate a sound device.\n+optional",
"tpm": "Whether to emulate a TPM device.\n+optional",
"video": "Video describes the video device configuration for the vmi.\n+optional",
}
}
func (ClientPassthroughDevices) SwaggerDoc() map[string]string {
return map[string]string{
"": "Represent a subset of client devices that can be accessed by VMI. At the\nmoment only, USB devices using Usbredir's library and tooling. Another fit\nwould be a smartcard with libcacard.\n\nThe struct is currently empty as there is no immediate request for\nuser-facing APIs. This structure simply turns on USB redirection of\nUsbClientPassthroughMaxNumberOf devices.",
}
}
func (SoundDevice) SwaggerDoc() map[string]string {
return map[string]string{
"": "Represents the user's configuration to emulate sound cards in the VMI.",
"name": "User's defined name for this sound device",
"model": "We only support ich9 or ac97.\nIf SoundDevice is not set: No sound card is emulated.\nIf SoundDevice is set but Model is not: ich9\n+optional",
}
}
func (TPMDevice) SwaggerDoc() map[string]string {
return map[string]string{
"enabled": "Enabled allows a user to explicitly disable the vTPM even when one is enabled by a preference referenced by the VirtualMachine\nDefaults to True",
"persistent": "Persistent indicates the state of the TPM device should be kept accross reboots\nDefaults to false",
}
}
func (VideoDevice) SwaggerDoc() map[string]string {
return map[string]string{
"type": "Type specifies the video device type (e.g., virtio, vga, bochs, ramfb).\nIf not specified, the default is architecture-dependent (VGA for BIOS-based VMs, Bochs for EFI-based VMs on AMD64; virtio for Arm and s390x).\n+optional",
}
}
func (Input) SwaggerDoc() map[string]string {
return map[string]string{
"bus": "Bus indicates the bus of input device to emulate.\nSupported values: virtio, usb.",
"type": "Type indicated the type of input device.\nSupported values: tablet.",
"name": "Name is the device name",
}
}
func (Filesystem) SwaggerDoc() map[string]string {
return map[string]string{
"name": "Name is the device name",
"virtiofs": "Virtiofs is supported",
}
}
func (FilesystemVirtiofs) SwaggerDoc() map[string]string {
return map[string]string{}
}
func (DownwardMetrics) SwaggerDoc() map[string]string {
return map[string]string{}
}
func (GPU) SwaggerDoc() map[string]string {
return map[string]string{
"name": "Name of the GPU device as exposed by a device plugin",
"deviceName": "DeviceName is the name of the device provisioned by device-plugins",
"tag": "If specified, the virtual network interface address and its tag will be provided to the guest via config drive\n+optional",
}
}
func (ClaimRequest) SwaggerDoc() map[string]string {
return map[string]string{
"claimName": "ClaimName needs to be provided from the list vmi.spec.resourceClaims[].name where this\ndevice is allocated\n+optional",
"requestName": "RequestName needs to be provided from resourceClaim.spec.devices.requests[].name where this\ndevice is requested\n+optional",
}
}
func (VGPUOptions) SwaggerDoc() map[string]string {
return map[string]string{}
}
func (VGPUDisplayOptions) SwaggerDoc() map[string]string {
return map[string]string{
"enabled": "Enabled determines if a display addapter backed by a vGPU should be enabled or disabled on the guest.\nDefaults to true.\n+optional",
"ramFB": "Enables a boot framebuffer, until the guest OS loads a real GPU driver\nDefaults to true.\n+optional",
}
}
func (PanicDevice) SwaggerDoc() map[string]string {
return map[string]string{
"model": "Model specifies what type of panic device is provided.\nThe panic model used when this attribute is missing depends on the hypervisor and guest arch.\nOne of: isa, hyperv, pvpanic.\n+optional",
}
}
func (HostDevice) SwaggerDoc() map[string]string {
return map[string]string{
"deviceName": "DeviceName is the name of the device provisioned by device-plugins",
"tag": "If specified, the virtual network interface address and its tag will be provided to the guest via config drive\n+optional",
}
}
func (Disk) SwaggerDoc() map[string]string {
return map[string]string{
"name": "Name is the device name",
"bootOrder": "BootOrder is an integer value > 0, used to determine ordering of boot devices.\nLower values take precedence.\nEach disk or interface that has a boot order must have a unique value.\nDisks without a boot order are not tried if a disk with a boot order exists.\n+optional",
"serial": "Serial provides the ability to specify a serial number for the disk device.\n+optional",
"dedicatedIOThread": "dedicatedIOThread indicates this disk should have an exclusive IO Thread.\nEnabling this implies useIOThreads = true.\nDefaults to false.\n+optional",
"cache": "Cache specifies which kvm disk cache mode should be used.\nSupported values are:\nnone: Guest I/O not cached on the host, but may be kept in a disk cache.\nwritethrough: Guest I/O cached on the host but written through to the physical medium. Slowest but with most guarantees.\nwriteback: Guest I/O cached on the host.\nDefaults to none if the storage supports O_DIRECT, otherwise writethrough.\n+optional",
"io": "IO specifies which QEMU disk IO mode should be used.\nSupported values are: native, default, threads.\n+optional",
"tag": "If specified, disk address and its tag will be provided to the guest via config drive metadata\n+optional",
"blockSize": "If specified, the virtual disk will be presented with the given block sizes.\n+optional",
"shareable": "If specified the disk is made sharable and multiple write from different VMs are permitted\n+optional",
"errorPolicy": "If specified, it can change the default error policy (stop) for the disk\n+optional",
}
}
func (CustomBlockSize) SwaggerDoc() map[string]string {
return map[string]string{
"": "CustomBlockSize represents the desired logical and physical block size for a VM disk.",
}
}
func (BlockSize) SwaggerDoc() map[string]string {
return map[string]string{
"": "BlockSize provides the option to change the block size presented to the VM for a disk.\nOnly one of its members may be specified.",
}
}
func (DiskDevice) SwaggerDoc() map[string]string {
return map[string]string{
"": "Represents the target of a volume to mount.\nOnly one of its members may be specified.",
"disk": "Attach a volume as a disk to the vmi.",
"lun": "Attach a volume as a LUN to the vmi.",
"cdrom": "Attach a volume as a cdrom to the vmi.",
}
}
func (DiskTarget) SwaggerDoc() map[string]string {
return map[string]string{
"bus": "Bus indicates the type of disk device to emulate.\nsupported values: virtio, sata, scsi, usb.",
"readonly": "ReadOnly.\nDefaults to false.",
"pciAddress": "If specified, the virtual disk will be placed on the guests pci address with the specified PCI address. For example: 0000:81:01.10\n+optional",
}
}
func (LaunchSecurity) SwaggerDoc() map[string]string {
return map[string]string{
"sev": "AMD Secure Encrypted Virtualization (SEV).",
}
}
func (SEV) SwaggerDoc() map[string]string {
return map[string]string{
"policy": "Guest policy flags as defined in AMD SEV API specification.\nNote: due to security reasons it is not allowed to enable guest debugging. Therefore NoDebug flag is not exposed to users and is always true.",
"attestation": "If specified, run the attestation process for a vmi.\n+optional",
"session": "Base64 encoded session blob.",
"dhCert": "Base64 encoded guest owner's Diffie-Hellman key.",
}
}
func (SEVPolicy) SwaggerDoc() map[string]string {
return map[string]string{
"encryptedState": "SEV-ES is required.\nDefaults to false.\n+optional",
}
}
func (SEVAttestation) SwaggerDoc() map[string]string {
return map[string]string{}
}
func (LunTarget) SwaggerDoc() map[string]string {
return map[string]string{
"bus": "Bus indicates the type of disk device to emulate.\nsupported values: virtio, sata, scsi.",
"readonly": "ReadOnly.\nDefaults to false.",
"reservation": "Reservation indicates if the disk needs to support the persistent reservation for the SCSI disk",
}
}
func (CDRomTarget) SwaggerDoc() map[string]string {
return map[string]string{
"bus": "Bus indicates the type of disk device to emulate.\nsupported values: virtio, sata, scsi.",
"readonly": "ReadOnly.\nDefaults to true.",
"tray": "Tray indicates if the tray of the device is open or closed.\nAllowed values are \"open\" and \"closed\".\nDefaults to closed.\n+optional",
}
}
func (Volume) SwaggerDoc() map[string]string {
return map[string]string{
"": "Volume represents a named volume in a vmi.",
"name": "Volume's name.\nMust be a DNS_LABEL and unique within the vmi.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
}
}
func (VolumeSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "Represents the source of a volume to mount.\nOnly one of its members may be specified.",
"hostDisk": "HostDisk represents a disk created on the cluster level\n+optional",
"persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace.\nDirectly attached to the vmi via qemu.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional",
"cloudInitNoCloud": "CloudInitNoCloud represents a cloud-init NoCloud user-data source.\nThe NoCloud data will be added as a disk to the vmi. A proper cloud-init installation is required inside the guest.\nMore info: http://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html\n+optional",
"cloudInitConfigDrive": "CloudInitConfigDrive represents a cloud-init Config Drive user-data source.\nThe Config Drive data will be added as a disk to the vmi. A proper cloud-init installation is required inside the guest.\nMore info: https://cloudinit.readthedocs.io/en/latest/topics/datasources/configdrive.html\n+optional",
"sysprep": "Represents a Sysprep volume source.\n+optional",
"containerDisk": "ContainerDisk references a docker image, embedding a qcow or raw disk.\nMore info: https://kubevirt.gitbooks.io/user-guide/registry-disk.html\n+optional",
"ephemeral": "Ephemeral is a special volume source that \"wraps\" specified source and provides copy-on-write image on top of it.\n+optional",
"emptyDisk": "EmptyDisk represents a temporary disk which shares the vmis lifecycle.\nMore info: https://kubevirt.gitbooks.io/user-guide/disks-and-volumes.html\n+optional",
"dataVolume": "DataVolume represents the dynamic creation a PVC for this volume as well as\nthe process of populating that PVC with a disk image.\n+optional",
"configMap": "ConfigMapSource represents a reference to a ConfigMap in the same namespace.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/\n+optional",
"secret": "SecretVolumeSource represents a reference to a secret data in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/configuration/secret/\n+optional",
"downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume\n+optional",
"serviceAccount": "ServiceAccountVolumeSource represents a reference to a service account.\nThere can only be one volume of this type!\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/\n+optional",
"downwardMetrics": "DownwardMetrics adds a very small disk to VMIs which contains a limited view of host and guest\nmetrics. The disk content is compatible with vhostmd (https://github.com/vhostmd/vhostmd) and vm-dump-metrics.",
"memoryDump": "MemoryDump is attached to the virt launcher and is populated with a memory dump of the vmi",
}
}
func (HotplugVolumeSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "HotplugVolumeSource Represents the source of a volume to mount which are capable\nof being hotplugged on a live running VMI.\nOnly one of its members may be specified.",
"persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace.\nDirectly attached to the vmi via qemu.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional",
"dataVolume": "DataVolume represents the dynamic creation a PVC for this volume as well as\nthe process of populating that PVC with a disk image.\n+optional",
}
}
func (DataVolumeSource) SwaggerDoc() map[string]string {
return map[string]string{
"name": "Name of both the DataVolume and the PVC in the same namespace.",
"hotpluggable": "Hotpluggable indicates whether the volume can be hotplugged and hotunplugged.\n+optional",
}
}
func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace.\nDirectly attached to the vmi via qemu.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
"hotpluggable": "Hotpluggable indicates whether the volume can be hotplugged and hotunplugged.\n+optional",
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/doc.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/doc.go | // +k8s:deepcopy-gen=package
// +k8s:openapi-gen=true
// +k8s:defaulter-gen=TypeMeta
// +groupName=kubevirt.io
// +versionName=v1
// Package v1 is the v1 version of the API.
package v1
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/sanitizers.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/sanitizers.go | /*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2022 Red Hat, Inc.
*
*/
package v1
import (
"fmt"
netutils "k8s.io/utils/net"
)
func sanitizeIP(address string) (string, error) {
sanitizedAddress := netutils.ParseIPSloppy(address)
if sanitizedAddress == nil {
return "", fmt.Errorf("not a valid IP address")
}
return sanitizedAddress.String(), nil
}
func sanitizeCIDR(cidr string) (string, error) {
ip, net, err := netutils.ParseCIDRSloppy(cidr)
if err != nil {
return "", err
}
netMaskSize, _ := net.Mask.Size()
return fmt.Sprintf("%s/%d", ip.String(), netMaskSize), nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/schema.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/api/core/v1/schema.go | /*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2017, 2018 Red Hat, Inc.
*
*/
package v1
import (
"encoding/json"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
)
type IOThreadsPolicy string
const (
IOThreadsPolicyShared IOThreadsPolicy = "shared"
IOThreadsPolicyAuto IOThreadsPolicy = "auto"
IOThreadsPolicySupplementalPool IOThreadsPolicy = "supplementalPool"
CPUModeHostPassthrough = "host-passthrough"
CPUModeHostModel = "host-model"
DefaultCPUModel = CPUModeHostModel
)
const HotplugDiskDir = "/var/run/kubevirt/hotplug-disks/"
type DiskErrorPolicy string
const (
DiskErrorPolicyStop DiskErrorPolicy = "stop"
DiskErrorPolicyIgnore DiskErrorPolicy = "ignore"
DiskErrorPolicyReport DiskErrorPolicy = "report"
DiskErrorPolicyEnospace DiskErrorPolicy = "enospace"
)
type PanicDeviceModel string
const (
Hyperv PanicDeviceModel = "hyperv"
Isa PanicDeviceModel = "isa"
Pvpanic PanicDeviceModel = "pvpanic"
)
/*
ATTENTION: Rerun code generators when comments on structs or fields are modified.
*/
// Represents a disk created on the cluster level
type HostDisk struct {
// The path to HostDisk image located on the cluster
Path string `json:"path"`
// Contains information if disk.img exists or should be created
// allowed options are 'Disk' and 'DiskOrCreate'
Type HostDiskType `json:"type"`
// Capacity of the sparse disk
// +optional
Capacity resource.Quantity `json:"capacity,omitempty"`
// Shared indicate whether the path is shared between nodes
Shared *bool `json:"shared,omitempty"`
}
// ConfigMapVolumeSource adapts a ConfigMap into a volume.
// More info: https://kubernetes.io/docs/concepts/storage/volumes/#configmap
type ConfigMapVolumeSource struct {
v1.LocalObjectReference `json:",inline"`
// Specify whether the ConfigMap or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty"`
// The volume label of the resulting disk inside the VMI.
// Different bootstrapping mechanisms require different values.
// Typical values are "cidata" (cloud-init), "config-2" (cloud-init) or "OEMDRV" (kickstart).
// +optional
VolumeLabel string `json:"volumeLabel,omitempty"`
}
// SecretVolumeSource adapts a Secret into a volume.
type SecretVolumeSource struct {
// Name of the secret in the pod's namespace to use.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
SecretName string `json:"secretName,omitempty"`
// Specify whether the Secret or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty"`
// The volume label of the resulting disk inside the VMI.
// Different bootstrapping mechanisms require different values.
// Typical values are "cidata" (cloud-init), "config-2" (cloud-init) or "OEMDRV" (kickstart).
// +optional
VolumeLabel string `json:"volumeLabel,omitempty"`
}
// DownwardAPIVolumeSource represents a volume containing downward API info.
type DownwardAPIVolumeSource struct {
// Fields is a list of downward API volume file
// +optional
Fields []v1.DownwardAPIVolumeFile `json:"fields,omitempty"`
// The volume label of the resulting disk inside the VMI.
// Different bootstrapping mechanisms require different values.
// Typical values are "cidata" (cloud-init), "config-2" (cloud-init) or "OEMDRV" (kickstart).
// +optional
VolumeLabel string `json:"volumeLabel,omitempty"`
}
// ServiceAccountVolumeSource adapts a ServiceAccount into a volume.
type ServiceAccountVolumeSource struct {
// Name of the service account in the pod's namespace to use.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
ServiceAccountName string `json:"serviceAccountName,omitempty"`
}
// DownwardMetricsVolumeSource adds a very small disk to VMIs which contains a limited view of host and guest
// metrics. The disk content is compatible with vhostmd (https://github.com/vhostmd/vhostmd) and vm-dump-metrics.
type DownwardMetricsVolumeSource struct {
}
// Represents a Sysprep volume source.
type SysprepSource struct {
// Secret references a k8s Secret that contains Sysprep answer file named autounattend.xml that should be attached as disk of CDROM type.
// + optional
Secret *v1.LocalObjectReference `json:"secret,omitempty"`
// ConfigMap references a ConfigMap that contains Sysprep answer file named autounattend.xml that should be attached as disk of CDROM type.
// + optional
ConfigMap *v1.LocalObjectReference `json:"configMap,omitempty"`
}
// Represents a cloud-init nocloud user data source.
// More info: http://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html
type CloudInitNoCloudSource struct {
// UserDataSecretRef references a k8s secret that contains NoCloud userdata.
// + optional
UserDataSecretRef *v1.LocalObjectReference `json:"secretRef,omitempty"`
// UserDataBase64 contains NoCloud cloud-init userdata as a base64 encoded string.
// + optional
UserDataBase64 string `json:"userDataBase64,omitempty"`
// UserData contains NoCloud inline cloud-init userdata.
// + optional
UserData string `json:"userData,omitempty"`
// NetworkDataSecretRef references a k8s secret that contains NoCloud networkdata.
// + optional
NetworkDataSecretRef *v1.LocalObjectReference `json:"networkDataSecretRef,omitempty"`
// NetworkDataBase64 contains NoCloud cloud-init networkdata as a base64 encoded string.
// + optional
NetworkDataBase64 string `json:"networkDataBase64,omitempty"`
// NetworkData contains NoCloud inline cloud-init networkdata.
// + optional
NetworkData string `json:"networkData,omitempty"`
}
// Represents a cloud-init config drive user data source.
// More info: https://cloudinit.readthedocs.io/en/latest/topics/datasources/configdrive.html
type CloudInitConfigDriveSource struct {
// UserDataSecretRef references a k8s secret that contains config drive userdata.
// + optional
UserDataSecretRef *v1.LocalObjectReference `json:"secretRef,omitempty"`
// UserDataBase64 contains config drive cloud-init userdata as a base64 encoded string.
// + optional
UserDataBase64 string `json:"userDataBase64,omitempty"`
// UserData contains config drive inline cloud-init userdata.
// + optional
UserData string `json:"userData,omitempty"`
// NetworkDataSecretRef references a k8s secret that contains config drive networkdata.
// + optional
NetworkDataSecretRef *v1.LocalObjectReference `json:"networkDataSecretRef,omitempty"`
// NetworkDataBase64 contains config drive cloud-init networkdata as a base64 encoded string.
// + optional
NetworkDataBase64 string `json:"networkDataBase64,omitempty"`
// NetworkData contains config drive inline cloud-init networkdata.
// + optional
NetworkData string `json:"networkData,omitempty"`
}
type DomainSpec struct {
// Resources describes the Compute Resources required by this vmi.
Resources ResourceRequirements `json:"resources,omitempty"`
// CPU allow specified the detailed CPU topology inside the vmi.
// +optional
CPU *CPU `json:"cpu,omitempty"`
// Memory allow specifying the VMI memory features.
// +optional
Memory *Memory `json:"memory,omitempty"`
// Machine type.
// +optional
Machine *Machine `json:"machine,omitempty"`
// Firmware.
// +optional
Firmware *Firmware `json:"firmware,omitempty"`
// Clock sets the clock and timers of the vmi.
// +optional
Clock *Clock `json:"clock,omitempty"`
// Features like acpi, apic, hyperv, smm.
// +optional
Features *Features `json:"features,omitempty"`
// Devices allows adding disks, network interfaces, and others
Devices Devices `json:"devices"`
// Controls whether or not disks will share IOThreads.
// Omitting IOThreadsPolicy disables use of IOThreads.
// One of: shared, auto, supplementalPool
// +optional
IOThreadsPolicy *IOThreadsPolicy `json:"ioThreadsPolicy,omitempty"`
// IOThreads specifies the IOThreads options.
// +optional
IOThreads *DiskIOThreads `json:"ioThreads,omitempty"`
// Chassis specifies the chassis info passed to the domain.
// +optional
Chassis *Chassis `json:"chassis,omitempty"`
// Launch Security setting of the vmi.
// +optional
LaunchSecurity *LaunchSecurity `json:"launchSecurity,omitempty"`
}
// Chassis specifies the chassis info passed to the domain.
type Chassis struct {
Manufacturer string `json:"manufacturer,omitempty"`
Version string `json:"version,omitempty"`
Serial string `json:"serial,omitempty"`
Asset string `json:"asset,omitempty"`
Sku string `json:"sku,omitempty"`
}
// Represents the firmware blob used to assist in the domain creation process.
// Used for setting the QEMU BIOS file path for the libvirt domain.
type Bootloader struct {
// If set (default), BIOS will be used.
// +optional
BIOS *BIOS `json:"bios,omitempty"`
// If set, EFI will be used instead of BIOS.
// +optional
EFI *EFI `json:"efi,omitempty"`
}
// If set (default), BIOS will be used.
type BIOS struct {
// If set, the BIOS output will be transmitted over serial
// +optional
UseSerial *bool `json:"useSerial,omitempty"`
}
// If set, EFI will be used instead of BIOS.
type EFI struct {
// If set, SecureBoot will be enabled and the OVMF roms will be swapped for
// SecureBoot-enabled ones.
// Requires SMM to be enabled.
// Defaults to true
// +optional
SecureBoot *bool `json:"secureBoot,omitempty"`
// If set to true, Persistent will persist the EFI NVRAM across reboots.
// Defaults to false
// +optional
Persistent *bool `json:"persistent,omitempty"`
}
// If set, the VM will be booted from the defined kernel / initrd.
type KernelBootContainer struct {
// Image that contains initrd / kernel files.
Image string `json:"image"`
// ImagePullSecret is the name of the Docker registry secret required to pull the image. The secret must already exist.
//+optional
ImagePullSecret string `json:"imagePullSecret,omitempty"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"`
// The fully-qualified path to the kernel image in the host OS
//+optional
KernelPath string `json:"kernelPath,omitempty"`
// the fully-qualified path to the ramdisk image in the host OS
//+optional
InitrdPath string `json:"initrdPath,omitempty"`
}
// Represents the firmware blob used to assist in the kernel boot process.
// Used for setting the kernel, initrd and command line arguments
type KernelBoot struct {
// Arguments to be passed to the kernel at boot time
KernelArgs string `json:"kernelArgs,omitempty"`
// Container defines the container that containes kernel artifacts
Container *KernelBootContainer `json:"container,omitempty"`
}
type ResourceRequirements struct {
// Requests is a description of the initial vmi resources.
// Valid resource keys are "memory" and "cpu".
// +optional
Requests v1.ResourceList `json:"requests,omitempty"`
// Limits describes the maximum amount of compute resources allowed.
// Valid resource keys are "memory" and "cpu".
// +optional
Limits v1.ResourceList `json:"limits,omitempty"`
// Don't ask the scheduler to take the guest-management overhead into account. Instead
// put the overhead only into the container's memory limit. This can lead to crashes if
// all memory is in use on a node. Defaults to false.
OvercommitGuestOverhead bool `json:"overcommitGuestOverhead,omitempty"`
}
// CPU allows specifying the CPU topology.
type CPU struct {
// Cores specifies the number of cores inside the vmi.
// Must be a value greater or equal 1.
Cores uint32 `json:"cores,omitempty"`
// Sockets specifies the number of sockets inside the vmi.
// Must be a value greater or equal 1.
Sockets uint32 `json:"sockets,omitempty"`
// MaxSockets specifies the maximum amount of sockets that can
// be hotplugged
MaxSockets uint32 `json:"maxSockets,omitempty"`
// Threads specifies the number of threads inside the vmi.
// Must be a value greater or equal 1.
Threads uint32 `json:"threads,omitempty"`
// Model specifies the CPU model inside the VMI.
// List of available models https://github.com/libvirt/libvirt/tree/master/src/cpu_map.
// It is possible to specify special cases like "host-passthrough" to get the same CPU as the node
// and "host-model" to get CPU closest to the node one.
// Defaults to host-model.
// +optional
Model string `json:"model,omitempty"`
// Features specifies the CPU features list inside the VMI.
// +optional
Features []CPUFeature `json:"features,omitempty"`
// DedicatedCPUPlacement requests the scheduler to place the VirtualMachineInstance on a node
// with enough dedicated pCPUs and pin the vCPUs to it.
// +optional
DedicatedCPUPlacement bool `json:"dedicatedCpuPlacement,omitempty"`
// NUMA allows specifying settings for the guest NUMA topology
// +optional
NUMA *NUMA `json:"numa,omitempty"`
// IsolateEmulatorThread requests one more dedicated pCPU to be allocated for the VMI to place
// the emulator thread on it.
// +optional
IsolateEmulatorThread bool `json:"isolateEmulatorThread,omitempty"`
// Realtime instructs the virt-launcher to tune the VMI for lower latency, optional for real time workloads
// +optional
Realtime *Realtime `json:"realtime,omitempty"`
}
// Realtime holds the tuning knobs specific for realtime workloads.
type Realtime struct {
// Mask defines the vcpu mask expression that defines which vcpus are used for realtime. Format matches libvirt's expressions.
// Example: "0-3,^1","0,2,3","2-3"
// +optional
Mask string `json:"mask,omitempty"`
}
// NUMAGuestMappingPassthrough instructs kubevirt to model numa topology which is compatible with the CPU pinning on the guest.
// This will result in a subset of the node numa topology being passed through, ensuring that virtual numa nodes and their memory
// never cross boundaries coming from the node numa mapping.
type NUMAGuestMappingPassthrough struct {
}
type NUMA struct {
// GuestMappingPassthrough will create an efficient guest topology based on host CPUs exclusively assigned to a pod.
// The created topology ensures that memory and CPUs on the virtual numa nodes never cross boundaries of host numa nodes.
// +optional
GuestMappingPassthrough *NUMAGuestMappingPassthrough `json:"guestMappingPassthrough,omitempty"`
}
// CPUFeature allows specifying a CPU feature.
type CPUFeature struct {
// Name of the CPU feature
Name string `json:"name"`
// Policy is the CPU feature attribute which can have the following attributes:
// force - The virtual CPU will claim the feature is supported regardless of it being supported by host CPU.
// require - Guest creation will fail unless the feature is supported by the host CPU or the hypervisor is able to emulate it.
// optional - The feature will be supported by virtual CPU if and only if it is supported by host CPU.
// disable - The feature will not be supported by virtual CPU.
// forbid - Guest creation will fail if the feature is supported by host CPU.
// Defaults to require
// +optional
Policy string `json:"policy,omitempty"`
}
// Memory allows specifying the VirtualMachineInstance memory features.
type Memory struct {
// Hugepages allow to use hugepages for the VirtualMachineInstance instead of regular memory.
// +optional
Hugepages *Hugepages `json:"hugepages,omitempty"`
// Guest allows to specifying the amount of memory which is visible inside the Guest OS.
// The Guest must lie between Requests and Limits from the resources section.
// Defaults to the requested memory in the resources section if not specified.
// + optional
Guest *resource.Quantity `json:"guest,omitempty"`
// MaxGuest allows to specify the maximum amount of memory which is visible inside the Guest OS.
// The delta between MaxGuest and Guest is the amount of memory that can be hot(un)plugged.
MaxGuest *resource.Quantity `json:"maxGuest,omitempty"`
}
type MemoryStatus struct {
// GuestAtBoot specifies with how much memory the VirtualMachine intiallly booted with.
// +optional
GuestAtBoot *resource.Quantity `json:"guestAtBoot,omitempty"`
// GuestCurrent specifies how much memory is currently available for the VirtualMachine.
// +optional
GuestCurrent *resource.Quantity `json:"guestCurrent,omitempty"`
// GuestRequested specifies how much memory was requested (hotplug) for the VirtualMachine.
// +optional
GuestRequested *resource.Quantity `json:"guestRequested,omitempty"`
}
// Hugepages allow to use hugepages for the VirtualMachineInstance instead of regular memory.
type Hugepages struct {
// PageSize specifies the hugepage size, for x86_64 architecture valid values are 1Gi and 2Mi.
PageSize string `json:"pageSize,omitempty"`
}
type Machine struct {
// QEMU machine type is the actual chipset of the VirtualMachineInstance.
// +optional
Type string `json:"type"`
}
type Firmware struct {
// UUID reported by the vmi bios.
// Defaults to a random generated uid.
UUID types.UID `json:"uuid,omitempty"`
// Settings to control the bootloader that is used.
// +optional
Bootloader *Bootloader `json:"bootloader,omitempty"`
// The system-serial-number in SMBIOS
Serial string `json:"serial,omitempty"`
// Settings to set the kernel for booting.
// +optional
KernelBoot *KernelBoot `json:"kernelBoot,omitempty"`
// Information that can be set in the ACPI table
ACPI *ACPI `json:"acpi,omitempty"`
}
type ACPI struct {
// SlicNameRef should match the volume name of a secret object. The data in the secret should
// be a binary blob that follows the ACPI SLIC standard, see:
// https://learn.microsoft.com/en-us/previous-versions/windows/hardware/design/dn653305(v=vs.85)
SlicNameRef string `json:"slicNameRef,omitempty"`
// Similar to SlicNameRef, another ACPI entry that is used in more recent Windows versions.
// The above points to the spec of MSDM too.
MsdmNameRef string `json:"msdmNameRef,omitempty"`
}
type Devices struct {
// Fall back to legacy virtio 0.9 support if virtio bus is selected on devices.
// This is helpful for old machines like CentOS6 or RHEL6 which
// do not understand virtio_non_transitional (virtio 1.0).
UseVirtioTransitional *bool `json:"useVirtioTransitional,omitempty"`
// DisableHotplug disabled the ability to hotplug disks.
DisableHotplug bool `json:"disableHotplug,omitempty"`
// Disks describes disks, cdroms and luns which are connected to the vmi.
// +kubebuilder:validation:MaxItems:=256
Disks []Disk `json:"disks,omitempty"`
// Watchdog describes a watchdog device which can be added to the vmi.
Watchdog *Watchdog `json:"watchdog,omitempty"`
// Interfaces describe network interfaces which are added to the vmi.
// +kubebuilder:validation:MaxItems:=256
Interfaces []Interface `json:"interfaces,omitempty"`
// Inputs describe input devices
Inputs []Input `json:"inputs,omitempty"`
// Whether to attach a pod network interface. Defaults to true.
AutoattachPodInterface *bool `json:"autoattachPodInterface,omitempty"`
// Whether to attach the default graphics device or not.
// VNC will not be available if set to false. Defaults to true.
AutoattachGraphicsDevice *bool `json:"autoattachGraphicsDevice,omitempty"`
// Whether to attach the default virtio-serial console or not.
// Serial console access will not be available if set to false. Defaults to true.
AutoattachSerialConsole *bool `json:"autoattachSerialConsole,omitempty"`
// Whether to log the auto-attached default serial console or not.
// Serial console logs will be collect to a file and then streamed from a named `guest-console-log`.
// Not relevant if autoattachSerialConsole is disabled.
// Defaults to cluster wide setting on VirtualMachineOptions.
LogSerialConsole *bool `json:"logSerialConsole,omitempty"`
// Whether to attach the Memory balloon device with default period.
// Period can be adjusted in virt-config.
// Defaults to true.
// +optional
AutoattachMemBalloon *bool `json:"autoattachMemBalloon,omitempty"`
// Whether to attach an Input Device.
// Defaults to false.
// +optional
AutoattachInputDevice *bool `json:"autoattachInputDevice,omitempty"`
// Whether to attach the VSOCK CID to the VM or not.
// VSOCK access will be available if set to true. Defaults to false.
AutoattachVSOCK *bool `json:"autoattachVSOCK,omitempty"`
// Whether to have random number generator from host
// +optional
Rng *Rng `json:"rng,omitempty"`
// Whether or not to enable virtio multi-queue for block devices.
// Defaults to false.
// +optional
BlockMultiQueue *bool `json:"blockMultiQueue,omitempty"`
// If specified, virtual network interfaces configured with a virtio bus will also enable the vhost multiqueue feature for network devices. The number of queues created depends on additional factors of the VirtualMachineInstance, like the number of guest CPUs.
// +optional
NetworkInterfaceMultiQueue *bool `json:"networkInterfaceMultiqueue,omitempty"`
//Whether to attach a GPU device to the vmi.
// +optional
// +listType=atomic
GPUs []GPU `json:"gpus,omitempty"`
// DownwardMetrics creates a virtio serials for exposing the downward metrics to the vmi.
// +optional
DownwardMetrics *DownwardMetrics `json:"downwardMetrics,omitempty"`
// PanicDevices provides additional crash information when a guest crashes.
// +optional
// +listtype=atomic
PanicDevices []PanicDevice `json:"panicDevices,omitempty"`
// Filesystems describes filesystem which is connected to the vmi.
// +optional
// +listType=atomic
Filesystems []Filesystem `json:"filesystems,omitempty"`
//Whether to attach a host device to the vmi.
// +optional
// +listType=atomic
HostDevices []HostDevice `json:"hostDevices,omitempty"`
// To configure and access client devices such as redirecting USB
// +optional
ClientPassthrough *ClientPassthroughDevices `json:"clientPassthrough,omitempty"`
// Whether to emulate a sound device.
// +optional
Sound *SoundDevice `json:"sound,omitempty"`
// Whether to emulate a TPM device.
// +optional
TPM *TPMDevice `json:"tpm,omitempty"`
// Video describes the video device configuration for the vmi.
// +optional
Video *VideoDevice `json:"video,omitempty"`
}
// Represent a subset of client devices that can be accessed by VMI. At the
// moment only, USB devices using Usbredir's library and tooling. Another fit
// would be a smartcard with libcacard.
//
// The struct is currently empty as there is no immediate request for
// user-facing APIs. This structure simply turns on USB redirection of
// UsbClientPassthroughMaxNumberOf devices.
type ClientPassthroughDevices struct {
}
// Represents the upper limit allowed by QEMU + KubeVirt.
const (
UsbClientPassthroughMaxNumberOf = 4
)
// Represents the user's configuration to emulate sound cards in the VMI.
type SoundDevice struct {
// User's defined name for this sound device
Name string `json:"name"`
// We only support ich9 or ac97.
// If SoundDevice is not set: No sound card is emulated.
// If SoundDevice is set but Model is not: ich9
// +optional
Model string `json:"model,omitempty"`
}
type TPMDevice struct {
// Enabled allows a user to explicitly disable the vTPM even when one is enabled by a preference referenced by the VirtualMachine
// Defaults to True
Enabled *bool `json:"enabled,omitempty"`
// Persistent indicates the state of the TPM device should be kept accross reboots
// Defaults to false
Persistent *bool `json:"persistent,omitempty"`
}
type VideoDevice struct {
// Type specifies the video device type (e.g., virtio, vga, bochs, ramfb).
// If not specified, the default is architecture-dependent (VGA for BIOS-based VMs, Bochs for EFI-based VMs on AMD64; virtio for Arm and s390x).
// +optional
Type string `json:"type,omitempty"`
}
type InputBus string
const (
InputBusUSB InputBus = "usb"
InputBusVirtio InputBus = "virtio"
)
type InputType string
const (
InputTypeTablet InputType = "tablet"
InputTypeKeyboard InputType = "keyboard"
)
type Input struct {
// Bus indicates the bus of input device to emulate.
// Supported values: virtio, usb.
Bus InputBus `json:"bus,omitempty"`
// Type indicated the type of input device.
// Supported values: tablet.
Type InputType `json:"type"`
// Name is the device name
Name string `json:"name"`
}
type Filesystem struct {
// Name is the device name
Name string `json:"name"`
// Virtiofs is supported
Virtiofs *FilesystemVirtiofs `json:"virtiofs"`
}
type FilesystemVirtiofs struct{}
type DownwardMetrics struct{}
type GPU struct {
// Name of the GPU device as exposed by a device plugin
Name string `json:"name"`
// DeviceName is the name of the device provisioned by device-plugins
DeviceName string `json:"deviceName,omitempty"`
// ClaimRequest provides the ClaimName from vmi.spec.resourceClaims[].name and
// requestName from resourceClaim.spec.devices.requests[].name
// This field should only be configured if one of the feature-gates GPUsWithDRA or HostDevicesWithDRA is enabled.
// This feature is in alpha.
*ClaimRequest `json:",inline"`
VirtualGPUOptions *VGPUOptions `json:"virtualGPUOptions,omitempty"`
// If specified, the virtual network interface address and its tag will be provided to the guest via config drive
// +optional
Tag string `json:"tag,omitempty"`
}
type ClaimRequest struct {
// ClaimName needs to be provided from the list vmi.spec.resourceClaims[].name where this
// device is allocated
// +optional
ClaimName *string `json:"claimName,omitempty"`
// RequestName needs to be provided from resourceClaim.spec.devices.requests[].name where this
// device is requested
// +optional
RequestName *string `json:"requestName,omitempty"`
}
type VGPUOptions struct {
Display *VGPUDisplayOptions `json:"display,omitempty"`
}
type VGPUDisplayOptions struct {
// Enabled determines if a display addapter backed by a vGPU should be enabled or disabled on the guest.
// Defaults to true.
// +optional
Enabled *bool `json:"enabled,omitempty"`
// Enables a boot framebuffer, until the guest OS loads a real GPU driver
// Defaults to true.
// +optional
RamFB *FeatureState `json:"ramFB,omitempty"`
}
type PanicDevice struct {
// Model specifies what type of panic device is provided.
// The panic model used when this attribute is missing depends on the hypervisor and guest arch.
// One of: isa, hyperv, pvpanic.
// +optional
Model *PanicDeviceModel `json:"model,omitempty"`
}
type HostDevice struct {
Name string `json:"name"`
// DeviceName is the name of the device provisioned by device-plugins
DeviceName string `json:"deviceName,omitempty"`
// ClaimRequest provides the ClaimName from vmi.spec.resourceClaims[].name and
// requestName from resourceClaim.spec.devices.requests[].name
// this fields requires DRA feature gate enabled
// This field should only be configured if one of the feature-gates GPUsWithDRA or HostDevicesWithDRA is enabled.
// This feature is in alpha.
*ClaimRequest `json:",inline"`
// If specified, the virtual network interface address and its tag will be provided to the guest via config drive
// +optional
Tag string `json:"tag,omitempty"`
}
type Disk struct {
// Name is the device name
Name string `json:"name"`
// DiskDevice specifies as which device the disk should be added to the guest.
// Defaults to Disk.
DiskDevice `json:",inline"`
// BootOrder is an integer value > 0, used to determine ordering of boot devices.
// Lower values take precedence.
// Each disk or interface that has a boot order must have a unique value.
// Disks without a boot order are not tried if a disk with a boot order exists.
// +optional
BootOrder *uint `json:"bootOrder,omitempty"`
// Serial provides the ability to specify a serial number for the disk device.
// +optional
Serial string `json:"serial,omitempty"`
// dedicatedIOThread indicates this disk should have an exclusive IO Thread.
// Enabling this implies useIOThreads = true.
// Defaults to false.
// +optional
DedicatedIOThread *bool `json:"dedicatedIOThread,omitempty"`
// Cache specifies which kvm disk cache mode should be used.
// Supported values are:
// none: Guest I/O not cached on the host, but may be kept in a disk cache.
// writethrough: Guest I/O cached on the host but written through to the physical medium. Slowest but with most guarantees.
// writeback: Guest I/O cached on the host.
// Defaults to none if the storage supports O_DIRECT, otherwise writethrough.
// +optional
Cache DriverCache `json:"cache,omitempty"`
// IO specifies which QEMU disk IO mode should be used.
// Supported values are: native, default, threads.
// +optional
IO DriverIO `json:"io,omitempty"`
// If specified, disk address and its tag will be provided to the guest via config drive metadata
// +optional
Tag string `json:"tag,omitempty"`
// If specified, the virtual disk will be presented with the given block sizes.
// +optional
BlockSize *BlockSize `json:"blockSize,omitempty"`
// If specified the disk is made sharable and multiple write from different VMs are permitted
// +optional
Shareable *bool `json:"shareable,omitempty"`
// If specified, it can change the default error policy (stop) for the disk
// +optional
ErrorPolicy *DiskErrorPolicy `json:"errorPolicy,omitempty"`
}
// CustomBlockSize represents the desired logical and physical block size for a VM disk.
type CustomBlockSize struct {
Logical uint `json:"logical"`
Physical uint `json:"physical"`
}
// BlockSize provides the option to change the block size presented to the VM for a disk.
// Only one of its members may be specified.
type BlockSize struct {
Custom *CustomBlockSize `json:"custom,omitempty"`
MatchVolume *FeatureState `json:"matchVolume,omitempty"`
}
// Represents the target of a volume to mount.
// Only one of its members may be specified.
type DiskDevice struct {
// Attach a volume as a disk to the vmi.
Disk *DiskTarget `json:"disk,omitempty"`
// Attach a volume as a LUN to the vmi.
LUN *LunTarget `json:"lun,omitempty"`
// Attach a volume as a cdrom to the vmi.
CDRom *CDRomTarget `json:"cdrom,omitempty"`
}
type DiskBus string
const (
DiskBusSCSI DiskBus = "scsi"
DiskBusSATA DiskBus = "sata"
DiskBusVirtio DiskBus = VirtIO
DiskBusUSB DiskBus = "usb"
)
type DiskTarget struct {
// Bus indicates the type of disk device to emulate.
// supported values: virtio, sata, scsi, usb.
Bus DiskBus `json:"bus,omitempty"`
// ReadOnly.
// Defaults to false.
ReadOnly bool `json:"readonly,omitempty"`
// If specified, the virtual disk will be placed on the guests pci address with the specified PCI address. For example: 0000:81:01.10
// +optional
PciAddress string `json:"pciAddress,omitempty"`
}
type LaunchSecurity struct {
// AMD Secure Encrypted Virtualization (SEV).
SEV *SEV `json:"sev,omitempty"`
}
type SEV struct {
// Guest policy flags as defined in AMD SEV API specification.
// Note: due to security reasons it is not allowed to enable guest debugging. Therefore NoDebug flag is not exposed to users and is always true.
Policy *SEVPolicy `json:"policy,omitempty"`
// If specified, run the attestation process for a vmi.
// +optional
Attestation *SEVAttestation `json:"attestation,omitempty"`
// Base64 encoded session blob.
Session string `json:"session,omitempty"`
// Base64 encoded guest owner's Diffie-Hellman key.
DHCert string `json:"dhCert,omitempty"`
}
type SEVPolicy struct {
// SEV-ES is required.
// Defaults to false.
// +optional
EncryptedState *bool `json:"encryptedState,omitempty"`
}
type SEVAttestation struct {
}
type LunTarget struct {
// Bus indicates the type of disk device to emulate.
// supported values: virtio, sata, scsi.
Bus DiskBus `json:"bus,omitempty"`
// ReadOnly.
// Defaults to false.
ReadOnly bool `json:"readonly,omitempty"`
// Reservation indicates if the disk needs to support the persistent reservation for the SCSI disk
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/register.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/register.go | package core
const (
// GroupName to hold the string name for the cdi project
GroupName = "cdi.kubevirt.io"
)
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_transfer.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_transfer.go | /*
Copyright 2021 The CDI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// this has to be here otherwise informer-gen doesn't recognize it
// see https://github.com/kubernetes/code-generator/issues/59
// +genclient:nonNamespaced
// Deprecated for removal in v1.
//
// ObjectTransfer is the cluster scoped object transfer resource
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:resource:shortName=ot;ots,scope=Cluster
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="The phase of the ObjectTransfer"
// +kubebuilder:subresource:status
type ObjectTransfer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ObjectTransferSpec `json:"spec"`
// +optional
Status ObjectTransferStatus `json:"status"`
}
// TransferSource is the source of a ObjectTransfer
type TransferSource struct {
// +optional
APIVersion string `json:"apiVersion,omitempty"`
Kind string `json:"kind"`
Namespace string `json:"namespace"`
Name string `json:"name"`
RequiredAnnotations map[string]string `json:"requiredAnnotations,omitempty"`
}
// TransferTarget is the target of an ObjectTransfer
type TransferTarget struct {
Namespace *string `json:"namespace,omitempty"`
Name *string `json:"name,omitempty"`
}
// ObjectTransferSpec specifies the source/target of the transfer
type ObjectTransferSpec struct {
Source TransferSource `json:"source"`
Target TransferTarget `json:"target"`
ParentName *string `json:"parentName,omitempty"`
}
// ObjectTransferPhase is the phase of the ObjectTransfer
type ObjectTransferPhase string
const (
// ObjectTransferEmpty is the empty transfer phase
ObjectTransferEmpty ObjectTransferPhase = ""
// ObjectTransferPending is the pending transfer phase
ObjectTransferPending ObjectTransferPhase = "Pending"
// ObjectTransferRunning is the running transfer phase
ObjectTransferRunning ObjectTransferPhase = "Running"
// ObjectTransferComplete is the complete transfer phase
ObjectTransferComplete ObjectTransferPhase = "Complete"
// ObjectTransferError is the (terminal) error transfer phase
ObjectTransferError ObjectTransferPhase = "Error"
)
// ObjectTransferConditionType is the type of ObjectTransferCondition
type ObjectTransferConditionType string
const (
// ObjectTransferConditionComplete is the "complete" condition
ObjectTransferConditionComplete ObjectTransferConditionType = "Complete"
)
// ObjectTransferCondition contains condition data
type ObjectTransferCondition struct {
Type ObjectTransferConditionType `json:"type"`
Status corev1.ConditionStatus `json:"status"`
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty"`
Reason string `json:"reason,omitempty"`
Message string `json:"message,omitempty"`
}
// ObjectTransferStatus is the status of the ObjectTransfer
type ObjectTransferStatus struct {
// Data is a place for intermediary state. Or anything really.
Data map[string]string `json:"data,omitempty"`
// Phase is the current phase of the transfer
Phase ObjectTransferPhase `json:"phase,omitempty"`
Conditions []ObjectTransferCondition `json:"conditions,omitempty"`
}
// ObjectTransferList provides the needed parameters to do request a list of ObjectTransfers from the system
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ObjectTransferList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items provides a list of ObjectTransfers
Items []ObjectTransfer `json:"items"`
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/zz_generated.deepcopy.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/zz_generated.deepcopy.go | //go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2018 The CDI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CDI) DeepCopyInto(out *CDI) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDI.
func (in *CDI) DeepCopy() *CDI {
if in == nil {
return nil
}
out := new(CDI)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CDI) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CDICertConfig) DeepCopyInto(out *CDICertConfig) {
*out = *in
if in.CA != nil {
in, out := &in.CA, &out.CA
*out = new(CertConfig)
(*in).DeepCopyInto(*out)
}
if in.Server != nil {
in, out := &in.Server, &out.Server
*out = new(CertConfig)
(*in).DeepCopyInto(*out)
}
if in.Client != nil {
in, out := &in.Client, &out.Client
*out = new(CertConfig)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDICertConfig.
func (in *CDICertConfig) DeepCopy() *CDICertConfig {
if in == nil {
return nil
}
out := new(CDICertConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CDIConfig) DeepCopyInto(out *CDIConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDIConfig.
func (in *CDIConfig) DeepCopy() *CDIConfig {
if in == nil {
return nil
}
out := new(CDIConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CDIConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CDIConfigList) DeepCopyInto(out *CDIConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CDIConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDIConfigList.
func (in *CDIConfigList) DeepCopy() *CDIConfigList {
if in == nil {
return nil
}
out := new(CDIConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CDIConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CDIConfigSpec) DeepCopyInto(out *CDIConfigSpec) {
*out = *in
if in.UploadProxyURLOverride != nil {
in, out := &in.UploadProxyURLOverride, &out.UploadProxyURLOverride
*out = new(string)
**out = **in
}
if in.ImportProxy != nil {
in, out := &in.ImportProxy, &out.ImportProxy
*out = new(ImportProxy)
(*in).DeepCopyInto(*out)
}
if in.ScratchSpaceStorageClass != nil {
in, out := &in.ScratchSpaceStorageClass, &out.ScratchSpaceStorageClass
*out = new(string)
**out = **in
}
if in.PodResourceRequirements != nil {
in, out := &in.PodResourceRequirements, &out.PodResourceRequirements
*out = new(v1.ResourceRequirements)
(*in).DeepCopyInto(*out)
}
if in.FeatureGates != nil {
in, out := &in.FeatureGates, &out.FeatureGates
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.FilesystemOverhead != nil {
in, out := &in.FilesystemOverhead, &out.FilesystemOverhead
*out = new(FilesystemOverhead)
(*in).DeepCopyInto(*out)
}
if in.Preallocation != nil {
in, out := &in.Preallocation, &out.Preallocation
*out = new(bool)
**out = **in
}
if in.InsecureRegistries != nil {
in, out := &in.InsecureRegistries, &out.InsecureRegistries
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DataVolumeTTLSeconds != nil {
in, out := &in.DataVolumeTTLSeconds, &out.DataVolumeTTLSeconds
*out = new(int32)
**out = **in
}
if in.TLSSecurityProfile != nil {
in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile
*out = new(TLSSecurityProfile)
(*in).DeepCopyInto(*out)
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]v1.LocalObjectReference, len(*in))
copy(*out, *in)
}
if in.LogVerbosity != nil {
in, out := &in.LogVerbosity, &out.LogVerbosity
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDIConfigSpec.
func (in *CDIConfigSpec) DeepCopy() *CDIConfigSpec {
if in == nil {
return nil
}
out := new(CDIConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CDIConfigStatus) DeepCopyInto(out *CDIConfigStatus) {
*out = *in
if in.UploadProxyURL != nil {
in, out := &in.UploadProxyURL, &out.UploadProxyURL
*out = new(string)
**out = **in
}
if in.UploadProxyCA != nil {
in, out := &in.UploadProxyCA, &out.UploadProxyCA
*out = new(string)
**out = **in
}
if in.ImportProxy != nil {
in, out := &in.ImportProxy, &out.ImportProxy
*out = new(ImportProxy)
(*in).DeepCopyInto(*out)
}
if in.DefaultPodResourceRequirements != nil {
in, out := &in.DefaultPodResourceRequirements, &out.DefaultPodResourceRequirements
*out = new(v1.ResourceRequirements)
(*in).DeepCopyInto(*out)
}
if in.FilesystemOverhead != nil {
in, out := &in.FilesystemOverhead, &out.FilesystemOverhead
*out = new(FilesystemOverhead)
(*in).DeepCopyInto(*out)
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]v1.LocalObjectReference, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDIConfigStatus.
func (in *CDIConfigStatus) DeepCopy() *CDIConfigStatus {
if in == nil {
return nil
}
out := new(CDIConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CDIList) DeepCopyInto(out *CDIList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CDI, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDIList.
func (in *CDIList) DeepCopy() *CDIList {
if in == nil {
return nil
}
out := new(CDIList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CDIList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CDISpec) DeepCopyInto(out *CDISpec) {
*out = *in
if in.UninstallStrategy != nil {
in, out := &in.UninstallStrategy, &out.UninstallStrategy
*out = new(CDIUninstallStrategy)
**out = **in
}
in.Infra.DeepCopyInto(&out.Infra)
in.Workloads.DeepCopyInto(&out.Workloads)
in.CustomizeComponents.DeepCopyInto(&out.CustomizeComponents)
if in.CloneStrategyOverride != nil {
in, out := &in.CloneStrategyOverride, &out.CloneStrategyOverride
*out = new(CDICloneStrategy)
**out = **in
}
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(CDIConfigSpec)
(*in).DeepCopyInto(*out)
}
if in.CertConfig != nil {
in, out := &in.CertConfig, &out.CertConfig
*out = new(CDICertConfig)
(*in).DeepCopyInto(*out)
}
if in.PriorityClass != nil {
in, out := &in.PriorityClass, &out.PriorityClass
*out = new(CDIPriorityClass)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDISpec.
func (in *CDISpec) DeepCopy() *CDISpec {
if in == nil {
return nil
}
out := new(CDISpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CDIStatus) DeepCopyInto(out *CDIStatus) {
*out = *in
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDIStatus.
func (in *CDIStatus) DeepCopy() *CDIStatus {
if in == nil {
return nil
}
out := new(CDIStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CertConfig) DeepCopyInto(out *CertConfig) {
*out = *in
if in.Duration != nil {
in, out := &in.Duration, &out.Duration
*out = new(metav1.Duration)
**out = **in
}
if in.RenewBefore != nil {
in, out := &in.RenewBefore, &out.RenewBefore
*out = new(metav1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertConfig.
func (in *CertConfig) DeepCopy() *CertConfig {
if in == nil {
return nil
}
out := new(CertConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClaimPropertySet) DeepCopyInto(out *ClaimPropertySet) {
*out = *in
if in.AccessModes != nil {
in, out := &in.AccessModes, &out.AccessModes
*out = make([]v1.PersistentVolumeAccessMode, len(*in))
copy(*out, *in)
}
if in.VolumeMode != nil {
in, out := &in.VolumeMode, &out.VolumeMode
*out = new(v1.PersistentVolumeMode)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimPropertySet.
func (in *ClaimPropertySet) DeepCopy() *ClaimPropertySet {
if in == nil {
return nil
}
out := new(ClaimPropertySet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ComponentConfig) DeepCopyInto(out *ComponentConfig) {
*out = *in
in.NodePlacement.DeepCopyInto(&out.NodePlacement)
if in.DeploymentReplicas != nil {
in, out := &in.DeploymentReplicas, &out.DeploymentReplicas
*out = new(int32)
**out = **in
}
if in.APIServerReplicas != nil {
in, out := &in.APIServerReplicas, &out.APIServerReplicas
*out = new(int32)
**out = **in
}
if in.UploadProxyReplicas != nil {
in, out := &in.UploadProxyReplicas, &out.UploadProxyReplicas
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentConfig.
func (in *ComponentConfig) DeepCopy() *ComponentConfig {
if in == nil {
return nil
}
out := new(ComponentConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConditionState) DeepCopyInto(out *ConditionState) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionState.
func (in *ConditionState) DeepCopy() *ConditionState {
if in == nil {
return nil
}
out := new(ConditionState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomTLSProfile) DeepCopyInto(out *CustomTLSProfile) {
*out = *in
in.TLSProfileSpec.DeepCopyInto(&out.TLSProfileSpec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomTLSProfile.
func (in *CustomTLSProfile) DeepCopy() *CustomTLSProfile {
if in == nil {
return nil
}
out := new(CustomTLSProfile)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomizeComponents) DeepCopyInto(out *CustomizeComponents) {
*out = *in
if in.Patches != nil {
in, out := &in.Patches, &out.Patches
*out = make([]CustomizeComponentsPatch, len(*in))
copy(*out, *in)
}
if in.Flags != nil {
in, out := &in.Flags, &out.Flags
*out = new(Flags)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizeComponents.
func (in *CustomizeComponents) DeepCopy() *CustomizeComponents {
if in == nil {
return nil
}
out := new(CustomizeComponents)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomizeComponentsPatch) DeepCopyInto(out *CustomizeComponentsPatch) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizeComponentsPatch.
func (in *CustomizeComponentsPatch) DeepCopy() *CustomizeComponentsPatch {
if in == nil {
return nil
}
out := new(CustomizeComponentsPatch)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataImportCron) DeepCopyInto(out *DataImportCron) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImportCron.
func (in *DataImportCron) DeepCopy() *DataImportCron {
if in == nil {
return nil
}
out := new(DataImportCron)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DataImportCron) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataImportCronCondition) DeepCopyInto(out *DataImportCronCondition) {
*out = *in
in.ConditionState.DeepCopyInto(&out.ConditionState)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImportCronCondition.
func (in *DataImportCronCondition) DeepCopy() *DataImportCronCondition {
if in == nil {
return nil
}
out := new(DataImportCronCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataImportCronList) DeepCopyInto(out *DataImportCronList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DataImportCron, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImportCronList.
func (in *DataImportCronList) DeepCopy() *DataImportCronList {
if in == nil {
return nil
}
out := new(DataImportCronList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DataImportCronList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataImportCronSpec) DeepCopyInto(out *DataImportCronSpec) {
*out = *in
in.Template.DeepCopyInto(&out.Template)
if in.GarbageCollect != nil {
in, out := &in.GarbageCollect, &out.GarbageCollect
*out = new(DataImportCronGarbageCollect)
**out = **in
}
if in.ImportsToKeep != nil {
in, out := &in.ImportsToKeep, &out.ImportsToKeep
*out = new(int32)
**out = **in
}
if in.RetentionPolicy != nil {
in, out := &in.RetentionPolicy, &out.RetentionPolicy
*out = new(DataImportCronRetentionPolicy)
**out = **in
}
if in.CreatedBy != nil {
in, out := &in.CreatedBy, &out.CreatedBy
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImportCronSpec.
func (in *DataImportCronSpec) DeepCopy() *DataImportCronSpec {
if in == nil {
return nil
}
out := new(DataImportCronSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataImportCronStatus) DeepCopyInto(out *DataImportCronStatus) {
*out = *in
if in.CurrentImports != nil {
in, out := &in.CurrentImports, &out.CurrentImports
*out = make([]ImportStatus, len(*in))
copy(*out, *in)
}
if in.LastImportedPVC != nil {
in, out := &in.LastImportedPVC, &out.LastImportedPVC
*out = new(DataVolumeSourcePVC)
**out = **in
}
if in.LastExecutionTimestamp != nil {
in, out := &in.LastExecutionTimestamp, &out.LastExecutionTimestamp
*out = (*in).DeepCopy()
}
if in.LastImportTimestamp != nil {
in, out := &in.LastImportTimestamp, &out.LastImportTimestamp
*out = (*in).DeepCopy()
}
if in.SourceFormat != nil {
in, out := &in.SourceFormat, &out.SourceFormat
*out = new(DataImportCronSourceFormat)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DataImportCronCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImportCronStatus.
func (in *DataImportCronStatus) DeepCopy() *DataImportCronStatus {
if in == nil {
return nil
}
out := new(DataImportCronStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataSource) DeepCopyInto(out *DataSource) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSource.
func (in *DataSource) DeepCopy() *DataSource {
if in == nil {
return nil
}
out := new(DataSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DataSource) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataSourceCondition) DeepCopyInto(out *DataSourceCondition) {
*out = *in
in.ConditionState.DeepCopyInto(&out.ConditionState)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceCondition.
func (in *DataSourceCondition) DeepCopy() *DataSourceCondition {
if in == nil {
return nil
}
out := new(DataSourceCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataSourceList) DeepCopyInto(out *DataSourceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DataSource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceList.
func (in *DataSourceList) DeepCopy() *DataSourceList {
if in == nil {
return nil
}
out := new(DataSourceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DataSourceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataSourceRefSourceDataSource) DeepCopyInto(out *DataSourceRefSourceDataSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceRefSourceDataSource.
func (in *DataSourceRefSourceDataSource) DeepCopy() *DataSourceRefSourceDataSource {
if in == nil {
return nil
}
out := new(DataSourceRefSourceDataSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataSourceSource) DeepCopyInto(out *DataSourceSource) {
*out = *in
if in.PVC != nil {
in, out := &in.PVC, &out.PVC
*out = new(DataVolumeSourcePVC)
**out = **in
}
if in.Snapshot != nil {
in, out := &in.Snapshot, &out.Snapshot
*out = new(DataVolumeSourceSnapshot)
**out = **in
}
if in.DataSource != nil {
in, out := &in.DataSource, &out.DataSource
*out = new(DataSourceRefSourceDataSource)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceSource.
func (in *DataSourceSource) DeepCopy() *DataSourceSource {
if in == nil {
return nil
}
out := new(DataSourceSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataSourceSpec) DeepCopyInto(out *DataSourceSpec) {
*out = *in
in.Source.DeepCopyInto(&out.Source)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceSpec.
func (in *DataSourceSpec) DeepCopy() *DataSourceSpec {
if in == nil {
return nil
}
out := new(DataSourceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataSourceStatus) DeepCopyInto(out *DataSourceStatus) {
*out = *in
in.Source.DeepCopyInto(&out.Source)
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DataSourceCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceStatus.
func (in *DataSourceStatus) DeepCopy() *DataSourceStatus {
if in == nil {
return nil
}
out := new(DataSourceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolume) DeepCopyInto(out *DataVolume) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolume.
func (in *DataVolume) DeepCopy() *DataVolume {
if in == nil {
return nil
}
out := new(DataVolume)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DataVolume) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolumeBlankImage) DeepCopyInto(out *DataVolumeBlankImage) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeBlankImage.
func (in *DataVolumeBlankImage) DeepCopy() *DataVolumeBlankImage {
if in == nil {
return nil
}
out := new(DataVolumeBlankImage)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolumeCheckpoint) DeepCopyInto(out *DataVolumeCheckpoint) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeCheckpoint.
func (in *DataVolumeCheckpoint) DeepCopy() *DataVolumeCheckpoint {
if in == nil {
return nil
}
out := new(DataVolumeCheckpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolumeCondition) DeepCopyInto(out *DataVolumeCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeCondition.
func (in *DataVolumeCondition) DeepCopy() *DataVolumeCondition {
if in == nil {
return nil
}
out := new(DataVolumeCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolumeList) DeepCopyInto(out *DataVolumeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DataVolume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeList.
func (in *DataVolumeList) DeepCopy() *DataVolumeList {
if in == nil {
return nil
}
out := new(DataVolumeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DataVolumeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolumeSource) DeepCopyInto(out *DataVolumeSource) {
*out = *in
if in.HTTP != nil {
in, out := &in.HTTP, &out.HTTP
*out = new(DataVolumeSourceHTTP)
(*in).DeepCopyInto(*out)
}
if in.S3 != nil {
in, out := &in.S3, &out.S3
*out = new(DataVolumeSourceS3)
**out = **in
}
if in.GCS != nil {
in, out := &in.GCS, &out.GCS
*out = new(DataVolumeSourceGCS)
**out = **in
}
if in.Registry != nil {
in, out := &in.Registry, &out.Registry
*out = new(DataVolumeSourceRegistry)
(*in).DeepCopyInto(*out)
}
if in.PVC != nil {
in, out := &in.PVC, &out.PVC
*out = new(DataVolumeSourcePVC)
**out = **in
}
if in.Upload != nil {
in, out := &in.Upload, &out.Upload
*out = new(DataVolumeSourceUpload)
**out = **in
}
if in.Blank != nil {
in, out := &in.Blank, &out.Blank
*out = new(DataVolumeBlankImage)
**out = **in
}
if in.Imageio != nil {
in, out := &in.Imageio, &out.Imageio
*out = new(DataVolumeSourceImageIO)
(*in).DeepCopyInto(*out)
}
if in.VDDK != nil {
in, out := &in.VDDK, &out.VDDK
*out = new(DataVolumeSourceVDDK)
**out = **in
}
if in.Snapshot != nil {
in, out := &in.Snapshot, &out.Snapshot
*out = new(DataVolumeSourceSnapshot)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSource.
func (in *DataVolumeSource) DeepCopy() *DataVolumeSource {
if in == nil {
return nil
}
out := new(DataVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolumeSourceGCS) DeepCopyInto(out *DataVolumeSourceGCS) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSourceGCS.
func (in *DataVolumeSourceGCS) DeepCopy() *DataVolumeSourceGCS {
if in == nil {
return nil
}
out := new(DataVolumeSourceGCS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolumeSourceHTTP) DeepCopyInto(out *DataVolumeSourceHTTP) {
*out = *in
if in.ExtraHeaders != nil {
in, out := &in.ExtraHeaders, &out.ExtraHeaders
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecretExtraHeaders != nil {
in, out := &in.SecretExtraHeaders, &out.SecretExtraHeaders
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSourceHTTP.
func (in *DataVolumeSourceHTTP) DeepCopy() *DataVolumeSourceHTTP {
if in == nil {
return nil
}
out := new(DataVolumeSourceHTTP)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolumeSourceImageIO) DeepCopyInto(out *DataVolumeSourceImageIO) {
*out = *in
if in.InsecureSkipVerify != nil {
in, out := &in.InsecureSkipVerify, &out.InsecureSkipVerify
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSourceImageIO.
func (in *DataVolumeSourceImageIO) DeepCopy() *DataVolumeSourceImageIO {
if in == nil {
return nil
}
out := new(DataVolumeSourceImageIO)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolumeSourcePVC) DeepCopyInto(out *DataVolumeSourcePVC) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSourcePVC.
func (in *DataVolumeSourcePVC) DeepCopy() *DataVolumeSourcePVC {
if in == nil {
return nil
}
out := new(DataVolumeSourcePVC)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolumeSourceRef) DeepCopyInto(out *DataVolumeSourceRef) {
*out = *in
if in.Namespace != nil {
in, out := &in.Namespace, &out.Namespace
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSourceRef.
func (in *DataVolumeSourceRef) DeepCopy() *DataVolumeSourceRef {
if in == nil {
return nil
}
out := new(DataVolumeSourceRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataVolumeSourceRegistry) DeepCopyInto(out *DataVolumeSourceRegistry) {
*out = *in
if in.URL != nil {
in, out := &in.URL, &out.URL
*out = new(string)
**out = **in
}
if in.ImageStream != nil {
in, out := &in.ImageStream, &out.ImageStream
*out = new(string)
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go | // Code generated by swagger-doc. DO NOT EDIT.
package v1beta1
func (DataVolume) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolume is an abstraction on top of PersistentVolumeClaims to allow easy population of those PersistentVolumeClaims with relation to VirtualMachines\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:shortName=dv;dvs,categories=all\n+kubebuilder:subresource:status\n+kubebuilder:printcolumn:name=\"Phase\",type=\"string\",JSONPath=\".status.phase\",description=\"The phase the data volume is in\"\n+kubebuilder:printcolumn:name=\"Progress\",type=\"string\",JSONPath=\".status.progress\",description=\"Transfer progress in percentage if known, N/A otherwise\"\n+kubebuilder:printcolumn:name=\"Restarts\",type=\"integer\",JSONPath=\".status.restartCount\",description=\"The number of times the transfer has been restarted.\"\n+kubebuilder:printcolumn:name=\"Age\",type=\"date\",JSONPath=\".metadata.creationTimestamp\"",
"status": "+optional",
}
}
func (DataVolumeSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSpec defines the DataVolume type specification",
"source": "Source is the src of the data for the requested DataVolume\n+optional",
"sourceRef": "SourceRef is an indirect reference to the source of data for the requested DataVolume\n+optional",
"pvc": "PVC is the PVC specification",
"storage": "Storage is the requested storage specification",
"priorityClassName": "PriorityClassName for Importer, Cloner and Uploader pod",
"contentType": "DataVolumeContentType options: \"kubevirt\", \"archive\"\n+kubebuilder:validation:Enum=\"kubevirt\";\"archive\"",
"checkpoints": "Checkpoints is a list of DataVolumeCheckpoints, representing stages in a multistage import.",
"finalCheckpoint": "FinalCheckpoint indicates whether the current DataVolumeCheckpoint is the final checkpoint.",
"preallocation": "Preallocation controls whether storage for DataVolumes should be allocated in advance.",
}
}
func (StorageSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "StorageSpec defines the Storage type specification",
"accessModes": "AccessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+optional",
"selector": "A label query over volumes to consider for binding.\n+optional",
"resources": "Resources represents the minimum resources the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources\n+optional",
"volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.\n+optional",
"storageClassName": "Name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1\n+optional",
"volumeMode": "volumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec.\n+optional",
"dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.\nIf the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.\n+optional",
"dataSourceRef": "Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner.\nThis field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty.\nThere are two important differences between DataSource and DataSourceRef:\n* While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects.\n* While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.\n+optional",
}
}
func (DataVolumeCheckpoint) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeCheckpoint defines a stage in a warm migration.",
"previous": "Previous is the identifier of the snapshot from the previous checkpoint.",
"current": "Current is the identifier of the snapshot created for this checkpoint.",
}
}
func (DataVolumeSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSource represents the source for our Data Volume, this can be HTTP, Imageio, S3, GCS, Registry or an existing PVC",
}
}
func (DataVolumeSourcePVC) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourcePVC provides the parameters to create a Data Volume from an existing PVC",
"namespace": "The namespace of the source PVC",
"name": "The name of the source PVC",
}
}
func (DataVolumeSourceSnapshot) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceSnapshot provides the parameters to create a Data Volume from an existing VolumeSnapshot",
"namespace": "The namespace of the source VolumeSnapshot",
"name": "The name of the source VolumeSnapshot",
}
}
func (DataSourceRefSourceDataSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataSourceRefSourceDataSource serves as a reference to another DataSource\nCan be resolved into a DataVolumeSourcePVC or a DataVolumeSourceSnapshot\nThe maximum depth of a reference chain may not exceed 1.",
"namespace": "The namespace of the source DataSource",
"name": "The name of the source DataSource",
}
}
func (DataVolumeBlankImage) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeBlankImage provides the parameters to create a new raw blank image for the PVC",
}
}
func (DataVolumeSourceUpload) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceUpload provides the parameters to create a Data Volume by uploading the source",
}
}
func (DataVolumeSourceS3) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceS3 provides the parameters to create a Data Volume from an S3 source",
"url": "URL is the url of the S3 source",
"secretRef": "SecretRef provides the secret reference needed to access the S3 source",
"certConfigMap": "CertConfigMap is a configmap reference, containing a Certificate Authority(CA) public key, and a base64 encoded pem certificate\n+optional",
}
}
func (DataVolumeSourceGCS) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceGCS provides the parameters to create a Data Volume from an GCS source",
"url": "URL is the url of the GCS source",
"secretRef": "SecretRef provides the secret reference needed to access the GCS source",
}
}
func (DataVolumeSourceRegistry) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceRegistry provides the parameters to create a Data Volume from an registry source",
"url": "URL is the url of the registry source (starting with the scheme: docker, oci-archive)\n+optional",
"imageStream": "ImageStream is the name of image stream for import\n+optional",
"pullMethod": "PullMethod can be either \"pod\" (default import), or \"node\" (node docker cache based import)\n+optional",
"secretRef": "SecretRef provides the secret reference needed to access the Registry source\n+optional",
"certConfigMap": "CertConfigMap provides a reference to the Registry certs\n+optional",
"platform": "Platform describes the minimum runtime requirements of the image\n+optional",
}
}
func (PlatformOptions) SwaggerDoc() map[string]string {
return map[string]string{
"architecture": "Architecture specifies the image target CPU architecture\n+optional",
}
}
func (DataVolumeSourceHTTP) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceHTTP can be either an http or https endpoint, with an optional basic auth user name and password, and an optional configmap containing additional CAs",
"url": "URL is the URL of the http(s) endpoint",
"secretRef": "SecretRef A Secret reference, the secret should contain accessKeyId (user name) base64 encoded, and secretKey (password) also base64 encoded\n+optional",
"certConfigMap": "CertConfigMap is a configmap reference, containing a Certificate Authority(CA) public key, and a base64 encoded pem certificate\n+optional",
"extraHeaders": "ExtraHeaders is a list of strings containing extra headers to include with HTTP transfer requests\n+optional",
"secretExtraHeaders": "SecretExtraHeaders is a list of Secret references, each containing an extra HTTP header that may include sensitive information\n+optional",
}
}
func (DataVolumeSourceImageIO) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceImageIO provides the parameters to create a Data Volume from an imageio source",
"url": "URL is the URL of the ovirt-engine",
"diskId": "DiskID provides id of a disk to be imported",
"secretRef": "SecretRef provides the secret reference needed to access the ovirt-engine",
"certConfigMap": "CertConfigMap provides a reference to the CA cert",
"insecureSkipVerify": "InsecureSkipVerify is a flag to skip certificate verification",
}
}
func (DataVolumeSourceVDDK) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceVDDK provides the parameters to create a Data Volume from a Vmware source",
"url": "URL is the URL of the vCenter or ESXi host with the VM to migrate",
"uuid": "UUID is the UUID of the virtual machine that the backing file is attached to in vCenter/ESXi",
"backingFile": "BackingFile is the path to the virtual hard disk to migrate from vCenter/ESXi",
"thumbprint": "Thumbprint is the certificate thumbprint of the vCenter or ESXi host",
"secretRef": "SecretRef provides a reference to a secret containing the username and password needed to access the vCenter or ESXi host",
"initImageURL": "InitImageURL is an optional URL to an image containing an extracted VDDK library, overrides v2v-vmware config map",
"extraArgs": "ExtraArgs is a reference to a ConfigMap containing extra arguments to pass directly to the VDDK library",
}
}
func (DataVolumeSourceRef) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceRef defines an indirect reference to the source of data for the DataVolume",
"kind": "The kind of the source reference, currently only \"DataSource\" is supported",
"namespace": "The namespace of the source reference, defaults to the DataVolume namespace\n+optional",
"name": "The name of the source reference",
}
}
func (DataVolumeStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeStatus contains the current status of the DataVolume",
"claimName": "ClaimName is the name of the underlying PVC used by the DataVolume.",
"phase": "Phase is the current phase of the data volume",
"restartCount": "RestartCount is the number of times the pod populating the DataVolume has restarted",
}
}
func (DataVolumeList) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeList provides the needed parameters to do request a list of Data Volumes from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"items": "Items provides a list of DataVolumes",
}
}
func (DataVolumeCondition) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeCondition represents the state of a data volume condition.",
}
}
func (StorageProfile) SwaggerDoc() map[string]string {
return map[string]string{
"": "StorageProfile provides a CDI specific recommendation for storage parameters\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:scope=Cluster\n+kubebuilder:subresource:status",
}
}
func (StorageProfileSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "StorageProfileSpec defines specification for StorageProfile",
"cloneStrategy": "CloneStrategy defines the preferred method for performing a CDI clone",
"claimPropertySets": "ClaimPropertySets is a provided set of properties applicable to PVC\n+kubebuilder:validation:MaxItems=8",
"dataImportCronSourceFormat": "DataImportCronSourceFormat defines the format of the DataImportCron-created disk image sources",
"snapshotClass": "SnapshotClass is optional specific VolumeSnapshotClass for CloneStrategySnapshot. If not set, a VolumeSnapshotClass is chosen according to the provisioner.",
}
}
func (StorageProfileStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "StorageProfileStatus provides the most recently observed status of the StorageProfile",
"storageClass": "The StorageClass name for which capabilities are defined",
"provisioner": "The Storage class provisioner plugin name",
"cloneStrategy": "CloneStrategy defines the preferred method for performing a CDI clone",
"claimPropertySets": "ClaimPropertySets computed from the spec and detected in the system\n+kubebuilder:validation:MaxItems=8",
"dataImportCronSourceFormat": "DataImportCronSourceFormat defines the format of the DataImportCron-created disk image sources",
"snapshotClass": "SnapshotClass is optional specific VolumeSnapshotClass for CloneStrategySnapshot. If not set, a VolumeSnapshotClass is chosen according to the provisioner.",
}
}
func (ClaimPropertySet) SwaggerDoc() map[string]string {
return map[string]string{
"": "ClaimPropertySet is a set of properties applicable to PVC",
"accessModes": "AccessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+kubebuilder:validation:MaxItems=4\n+kubebuilder:validation:XValidation:rule=\"self.all(am, am in ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany', 'ReadWriteOncePod'])\", message=\"Illegal AccessMode\"",
"volumeMode": "VolumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec.\n+kubebuilder:validation:Enum=\"Block\";\"Filesystem\"",
}
}
func (StorageProfileList) SwaggerDoc() map[string]string {
return map[string]string{
"": "StorageProfileList provides the needed parameters to request a list of StorageProfile from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"items": "Items provides a list of StorageProfile",
}
}
func (DataSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataSource references an import/clone source for a DataVolume\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:shortName=das,categories=all",
}
}
func (DataSourceSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataSourceSpec defines specification for DataSource",
"source": "Source is the source of the data referenced by the DataSource",
}
}
func (DataSourceSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataSourceSource represents the source for our DataSource",
"pvc": "+optional",
"snapshot": "+optional",
"dataSource": "+optional",
}
}
func (DataSourceStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataSourceStatus provides the most recently observed status of the DataSource",
"source": "Source is the current source of the data referenced by the DataSource",
}
}
func (DataSourceCondition) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataSourceCondition represents the state of a data source condition",
}
}
func (ConditionState) SwaggerDoc() map[string]string {
return map[string]string{
"": "ConditionState represents the state of a condition",
}
}
func (DataSourceList) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataSourceList provides the needed parameters to do request a list of Data Sources from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"items": "Items provides a list of DataSources",
}
}
func (DataImportCron) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataImportCron defines a cron job for recurring polling/importing disk images as PVCs into a golden image namespace\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:shortName=dic;dics,categories=all\n+kubebuilder:printcolumn:name=\"Format\",type=\"string\",JSONPath=\".status.sourceFormat\",description=\"The format in which created sources are saved\"",
}
}
func (DataImportCronSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataImportCronSpec defines specification for DataImportCron",
"template": "Template specifies template for the DVs to be created",
"schedule": "Schedule specifies in cron format when and how often to look for new imports",
"garbageCollect": "GarbageCollect specifies whether old PVCs should be cleaned up after a new PVC is imported.\nOptions are currently \"Outdated\" and \"Never\", defaults to \"Outdated\".\n+optional",
"importsToKeep": "Number of import PVCs to keep when garbage collecting. Default is 3.\n+optional",
"managedDataSource": "ManagedDataSource specifies the name of the corresponding DataSource this cron will manage.\nDataSource has to be in the same namespace.",
"retentionPolicy": "RetentionPolicy specifies whether the created DataVolumes and DataSources are retained when their DataImportCron is deleted. Default is RatainAll.\n+optional",
"createdBy": "CreatedBy is the JSON-marshaled UserInfo of the user who created this DataImportCron.\nThis field is set by the mutating webhook and cannot be set by users.\n+optional",
}
}
func (DataImportCronStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataImportCronStatus provides the most recently observed status of the DataImportCron",
"currentImports": "CurrentImports are the imports in progress. Currently only a single import is supported.",
"lastImportedPVC": "LastImportedPVC is the last imported PVC",
"lastExecutionTimestamp": "LastExecutionTimestamp is the time of the last polling",
"lastImportTimestamp": "LastImportTimestamp is the time of the last import",
"sourceFormat": "SourceFormat defines the format of the DataImportCron-created disk image sources",
}
}
func (ImportStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "ImportStatus of a currently in progress import",
"DataVolumeName": "DataVolumeName is the currently in progress import DataVolume",
"Digest": "Digest of the currently imported image",
}
}
func (DataImportCronCondition) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataImportCronCondition represents the state of a data import cron condition",
}
}
func (DataImportCronList) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataImportCronList provides the needed parameters to do request a list of DataImportCrons from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"items": "Items provides a list of DataImportCrons",
}
}
func (VolumeImportSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "VolumeImportSource works as a specification to populate PersistentVolumeClaims with data\nimported from an HTTP/S3/Registry/Blank/ImageIO/VDDK source\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion",
"status": "+optional",
}
}
func (VolumeImportSourceSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "VolumeImportSourceSpec defines the Spec field for VolumeImportSource",
"source": "Source is the src of the data to be imported in the target PVC",
"preallocation": "Preallocation controls whether storage for the target PVC should be allocated in advance.",
"contentType": "ContentType represents the type of the imported data (Kubevirt or archive)",
"targetClaim": "TargetClaim the name of the specific claim to be populated with a multistage import.",
"checkpoints": "Checkpoints is a list of DataVolumeCheckpoints, representing stages in a multistage import.",
"finalCheckpoint": "FinalCheckpoint indicates whether the current DataVolumeCheckpoint is the final checkpoint.",
}
}
func (ImportSourceType) SwaggerDoc() map[string]string {
return map[string]string{
"": "ImportSourceType contains each one of the source types allowed in a VolumeImportSource",
}
}
func (VolumeImportSourceStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "VolumeImportSourceStatus provides the most recently observed status of the VolumeImportSource",
}
}
func (VolumeImportSourceList) SwaggerDoc() map[string]string {
return map[string]string{
"": "VolumeImportSourceList provides the needed parameters to do request a list of Import Sources from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"items": "Items provides a list of DataSources",
}
}
func (VolumeUploadSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "VolumeUploadSource is a specification to populate PersistentVolumeClaims with upload data\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion",
"status": "+optional",
}
}
func (VolumeUploadSourceSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "VolumeUploadSourceSpec defines specification for VolumeUploadSource",
"contentType": "ContentType represents the type of the upload data (Kubevirt or archive)",
"preallocation": "Preallocation controls whether storage for the target PVC should be allocated in advance.",
}
}
func (VolumeUploadSourceStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "VolumeUploadSourceStatus provides the most recently observed status of the VolumeUploadSource",
}
}
func (VolumeUploadSourceList) SwaggerDoc() map[string]string {
return map[string]string{
"": "VolumeUploadSourceList provides the needed parameters to do request a list of Upload Sources from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"items": "Items provides a list of DataSources",
}
}
func (VolumeCloneSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "VolumeCloneSource refers to a PVC/VolumeSnapshot of any storageclass/volumemode\nto be used as the source of a new PVC\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion",
}
}
func (VolumeCloneSourceSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "VolumeCloneSourceSpec defines the Spec field for VolumeCloneSource",
"source": "Source is the src of the data to be cloned to the target PVC",
"preallocation": "Preallocation controls whether storage for the target PVC should be allocated in advance.\n+optional",
"priorityClassName": "PriorityClassName is the priorityclass for the claim\n+optional",
}
}
func (VolumeCloneSourceList) SwaggerDoc() map[string]string {
return map[string]string{
"": "VolumeCloneSourceList provides the needed parameters to do request a list of VolumeCloneSources from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"items": "Items provides a list of DataSources",
}
}
func (CDI) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDI is the CDI Operator CRD\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:shortName=cdi;cdis,scope=Cluster\n+kubebuilder:printcolumn:name=\"Age\",type=\"date\",JSONPath=\".metadata.creationTimestamp\"\n+kubebuilder:printcolumn:name=\"Phase\",type=\"string\",JSONPath=\".status.phase\"",
"status": "+optional",
}
}
func (CertConfig) SwaggerDoc() map[string]string {
return map[string]string{
"": "CertConfig contains the tunables for TLS certificates",
"duration": "The requested 'duration' (i.e. lifetime) of the Certificate.",
"renewBefore": "The amount of time before the currently issued certificate's `notAfter`\ntime that we will begin to attempt to renew the certificate.",
}
}
func (CDICertConfig) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDICertConfig has the CertConfigs for CDI",
"ca": "CA configuration\nCA certs are kept in the CA bundle as long as they are valid",
"server": "Server configuration\nCerts are rotated and discarded",
"client": "Client configuration\nCerts are rotated and discarded",
}
}
func (CDISpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDISpec defines our specification for the CDI installation",
"imagePullPolicy": "+kubebuilder:validation:Enum=Always;IfNotPresent;Never\nPullPolicy describes a policy for if/when to pull a container image",
"uninstallStrategy": "+kubebuilder:validation:Enum=RemoveWorkloads;BlockUninstallIfWorkloadsExist\nCDIUninstallStrategy defines the state to leave CDI on uninstall",
"infra": "Selectors and tolerations that should apply to cdi infrastructure components",
"workload": "Restrict on which nodes CDI workload pods will be scheduled",
"cloneStrategyOverride": "Clone strategy override: should we use a host-assisted copy even if snapshots are available?\n+kubebuilder:validation:Enum=\"copy\";\"snapshot\";\"csi-clone\"",
"config": "CDIConfig at CDI level",
"certConfig": "certificate configuration",
"priorityClass": "PriorityClass of the CDI control plane",
}
}
func (ComponentConfig) SwaggerDoc() map[string]string {
return map[string]string{
"": "ComponentConfig defines the scheduling and replicas configuration for CDI components",
"deploymentReplicas": "DeploymentReplicas set Replicas for cdi-deployment",
"apiServerReplicas": "ApiserverReplicas set Replicas for cdi-apiserver",
"uploadProxyReplicas": "UploadproxyReplicas set Replicas for cdi-uploadproxy",
}
}
func (CustomizeComponents) SwaggerDoc() map[string]string {
return map[string]string{
"": "CustomizeComponents defines patches for components deployed by the CDI operator.",
"patches": "+listType=atomic",
"flags": "Configure the value used for deployment and daemonset resources",
}
}
func (Flags) SwaggerDoc() map[string]string {
return map[string]string{
"": "Flags will create a patch that will replace all flags for the container's\ncommand field. The only flags that will be used are those define. There are no\nguarantees around forward/backward compatibility. If set incorrectly this will\ncause the resource when rolled out to error until flags are updated.",
}
}
func (CustomizeComponentsPatch) SwaggerDoc() map[string]string {
return map[string]string{
"": "CustomizeComponentsPatch defines a patch for some resource.",
"resourceName": "+kubebuilder:validation:MinLength=1",
"resourceType": "+kubebuilder:validation:MinLength=1",
}
}
func (CDIStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDIStatus defines the status of the installation",
}
}
func (CDIList) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDIList provides the needed parameters to do request a list of CDIs from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"items": "Items provides a list of CDIs",
}
}
func (CDIConfig) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDIConfig provides a user configuration for CDI\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:scope=Cluster",
}
}
func (FilesystemOverhead) SwaggerDoc() map[string]string {
return map[string]string{
"": "FilesystemOverhead defines the reserved size for PVCs with VolumeMode: Filesystem",
"global": "Global is how much space of a Filesystem volume should be reserved for overhead. This value is used unless overridden by a more specific value (per storageClass)",
"storageClass": "StorageClass specifies how much space of a Filesystem volume should be reserved for safety. The keys are the storageClass and the values are the overhead. This value overrides the global value",
}
}
func (CDIConfigSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDIConfigSpec defines specification for user configuration",
"uploadProxyURLOverride": "Override the URL used when uploading to a DataVolume",
"importProxy": "ImportProxy contains importer pod proxy configuration.\n+optional",
"scratchSpaceStorageClass": "Override the storage class to used for scratch space during transfer operations. The scratch space storage class is determined in the following order: 1. value of scratchSpaceStorageClass, if that doesn't exist, use the default storage class, if there is no default storage class, use the storage class of the DataVolume, if no storage class specified, use no storage class for scratch space",
"podResourceRequirements": "ResourceRequirements describes the compute resource requirements.",
"featureGates": "FeatureGates are a list of specific enabled feature gates",
"filesystemOverhead": "FilesystemOverhead describes the space reserved for overhead when using Filesystem volumes. A value is between 0 and 1, if not defined it is 0.06 (6% overhead)",
"preallocation": "Preallocation controls whether storage for DataVolumes should be allocated in advance.",
"insecureRegistries": "InsecureRegistries is a list of TLS disabled registries",
"dataVolumeTTLSeconds": "DataVolumeTTLSeconds is the time in seconds after DataVolume completion it can be garbage collected. Disabled by default.\nDeprecated: Removed in v1.62.\n+optional",
"tlsSecurityProfile": "TLSSecurityProfile is used by operators to apply cluster-wide TLS security settings to operands.",
"imagePullSecrets": "The imagePullSecrets used to pull the container images",
"logVerbosity": "LogVerbosity overrides the default verbosity level used to initialize loggers\n+optional",
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go | /*
Copyright 2018 The CDI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
sdkapi "kubevirt.io/controller-lifecycle-operator-sdk/api"
)
// DataVolume is an abstraction on top of PersistentVolumeClaims to allow easy population of those PersistentVolumeClaims with relation to VirtualMachines
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:resource:shortName=dv;dvs,categories=all
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="The phase the data volume is in"
// +kubebuilder:printcolumn:name="Progress",type="string",JSONPath=".status.progress",description="Transfer progress in percentage if known, N/A otherwise"
// +kubebuilder:printcolumn:name="Restarts",type="integer",JSONPath=".status.restartCount",description="The number of times the transfer has been restarted."
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
type DataVolume struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DataVolumeSpec `json:"spec"`
// +optional
Status DataVolumeStatus `json:"status,omitempty"`
}
// DataVolumeSpec defines the DataVolume type specification
type DataVolumeSpec struct {
//Source is the src of the data for the requested DataVolume
// +optional
Source *DataVolumeSource `json:"source,omitempty"`
//SourceRef is an indirect reference to the source of data for the requested DataVolume
// +optional
SourceRef *DataVolumeSourceRef `json:"sourceRef,omitempty"`
//PVC is the PVC specification
PVC *corev1.PersistentVolumeClaimSpec `json:"pvc,omitempty"`
// Storage is the requested storage specification
Storage *StorageSpec `json:"storage,omitempty"`
//PriorityClassName for Importer, Cloner and Uploader pod
PriorityClassName string `json:"priorityClassName,omitempty"`
//DataVolumeContentType options: "kubevirt", "archive"
// +kubebuilder:validation:Enum="kubevirt";"archive"
ContentType DataVolumeContentType `json:"contentType,omitempty"`
// Checkpoints is a list of DataVolumeCheckpoints, representing stages in a multistage import.
Checkpoints []DataVolumeCheckpoint `json:"checkpoints,omitempty"`
// FinalCheckpoint indicates whether the current DataVolumeCheckpoint is the final checkpoint.
FinalCheckpoint bool `json:"finalCheckpoint,omitempty"`
// Preallocation controls whether storage for DataVolumes should be allocated in advance.
Preallocation *bool `json:"preallocation,omitempty"`
}
// StorageSpec defines the Storage type specification
type StorageSpec struct {
// AccessModes contains the desired access modes the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
// A label query over volumes to consider for binding.
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty"`
// Resources represents the minimum resources the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources corev1.VolumeResourceRequirements `json:"resources,omitempty"`
// VolumeName is the binding reference to the PersistentVolume backing this claim.
// +optional
VolumeName string `json:"volumeName,omitempty"`
// Name of the StorageClass required by the claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
// +optional
StorageClassName *string `json:"storageClassName,omitempty"`
// volumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
// +optional
VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"`
// This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.
// If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.
// +optional
DataSource *corev1.TypedLocalObjectReference `json:"dataSource,omitempty"`
// Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner.
// This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty.
// There are two important differences between DataSource and DataSourceRef:
// * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects.
// * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified.
// (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
// +optional
DataSourceRef *corev1.TypedObjectReference `json:"dataSourceRef,omitempty"`
}
// PersistentVolumeFromStorageProfile means the volume mode will be auto selected by CDI according to a matching StorageProfile
const PersistentVolumeFromStorageProfile corev1.PersistentVolumeMode = "FromStorageProfile"
// DataVolumeCheckpoint defines a stage in a warm migration.
type DataVolumeCheckpoint struct {
// Previous is the identifier of the snapshot from the previous checkpoint.
Previous string `json:"previous"`
// Current is the identifier of the snapshot created for this checkpoint.
Current string `json:"current"`
}
// DataVolumeContentType represents the types of the imported data
type DataVolumeContentType string
const (
// DataVolumeKubeVirt is the content-type of the imported file, defaults to kubevirt
DataVolumeKubeVirt DataVolumeContentType = "kubevirt"
// DataVolumeArchive is the content-type to specify if there is a need to extract the imported archive
DataVolumeArchive DataVolumeContentType = "archive"
)
// DataVolumeSource represents the source for our Data Volume, this can be HTTP, Imageio, S3, GCS, Registry or an existing PVC
type DataVolumeSource struct {
HTTP *DataVolumeSourceHTTP `json:"http,omitempty"`
S3 *DataVolumeSourceS3 `json:"s3,omitempty"`
GCS *DataVolumeSourceGCS `json:"gcs,omitempty"`
Registry *DataVolumeSourceRegistry `json:"registry,omitempty"`
PVC *DataVolumeSourcePVC `json:"pvc,omitempty"`
Upload *DataVolumeSourceUpload `json:"upload,omitempty"`
Blank *DataVolumeBlankImage `json:"blank,omitempty"`
Imageio *DataVolumeSourceImageIO `json:"imageio,omitempty"`
VDDK *DataVolumeSourceVDDK `json:"vddk,omitempty"`
Snapshot *DataVolumeSourceSnapshot `json:"snapshot,omitempty"`
}
// DataVolumeSourcePVC provides the parameters to create a Data Volume from an existing PVC
type DataVolumeSourcePVC struct {
// The namespace of the source PVC
Namespace string `json:"namespace"`
// The name of the source PVC
Name string `json:"name"`
}
// DataVolumeSourceSnapshot provides the parameters to create a Data Volume from an existing VolumeSnapshot
type DataVolumeSourceSnapshot struct {
// The namespace of the source VolumeSnapshot
Namespace string `json:"namespace"`
// The name of the source VolumeSnapshot
Name string `json:"name"`
}
// DataSourceRefSourceDataSource serves as a reference to another DataSource
// Can be resolved into a DataVolumeSourcePVC or a DataVolumeSourceSnapshot
// The maximum depth of a reference chain may not exceed 1.
type DataSourceRefSourceDataSource struct {
// The namespace of the source DataSource
Namespace string `json:"namespace"`
// The name of the source DataSource
Name string `json:"name"`
}
// DataVolumeBlankImage provides the parameters to create a new raw blank image for the PVC
type DataVolumeBlankImage struct{}
// DataVolumeSourceUpload provides the parameters to create a Data Volume by uploading the source
type DataVolumeSourceUpload struct {
}
// DataVolumeSourceS3 provides the parameters to create a Data Volume from an S3 source
type DataVolumeSourceS3 struct {
//URL is the url of the S3 source
URL string `json:"url"`
//SecretRef provides the secret reference needed to access the S3 source
SecretRef string `json:"secretRef,omitempty"`
// CertConfigMap is a configmap reference, containing a Certificate Authority(CA) public key, and a base64 encoded pem certificate
// +optional
CertConfigMap string `json:"certConfigMap,omitempty"`
}
// DataVolumeSourceGCS provides the parameters to create a Data Volume from an GCS source
type DataVolumeSourceGCS struct {
//URL is the url of the GCS source
URL string `json:"url"`
//SecretRef provides the secret reference needed to access the GCS source
SecretRef string `json:"secretRef,omitempty"`
}
// DataVolumeSourceRegistry provides the parameters to create a Data Volume from an registry source
type DataVolumeSourceRegistry struct {
//URL is the url of the registry source (starting with the scheme: docker, oci-archive)
// +optional
URL *string `json:"url,omitempty"`
//ImageStream is the name of image stream for import
// +optional
ImageStream *string `json:"imageStream,omitempty"`
//PullMethod can be either "pod" (default import), or "node" (node docker cache based import)
// +optional
PullMethod *RegistryPullMethod `json:"pullMethod,omitempty"`
//SecretRef provides the secret reference needed to access the Registry source
// +optional
SecretRef *string `json:"secretRef,omitempty"`
//CertConfigMap provides a reference to the Registry certs
// +optional
CertConfigMap *string `json:"certConfigMap,omitempty"`
//Platform describes the minimum runtime requirements of the image
// +optional
Platform *PlatformOptions `json:"platform,omitempty"`
}
type PlatformOptions struct {
//Architecture specifies the image target CPU architecture
// +optional
Architecture string `json:"architecture,omitempty"`
}
const (
// RegistrySchemeDocker is docker scheme prefix
RegistrySchemeDocker = "docker"
// RegistrySchemeOci is oci-archive scheme prefix
RegistrySchemeOci = "oci-archive"
)
// RegistryPullMethod represents the registry import pull method
type RegistryPullMethod string
const (
// RegistryPullPod is the standard import
RegistryPullPod RegistryPullMethod = "pod"
// RegistryPullNode is the node docker cache based import
RegistryPullNode RegistryPullMethod = "node"
)
// DataVolumeSourceHTTP can be either an http or https endpoint, with an optional basic auth user name and password, and an optional configmap containing additional CAs
type DataVolumeSourceHTTP struct {
// URL is the URL of the http(s) endpoint
URL string `json:"url"`
// SecretRef A Secret reference, the secret should contain accessKeyId (user name) base64 encoded, and secretKey (password) also base64 encoded
// +optional
SecretRef string `json:"secretRef,omitempty"`
// CertConfigMap is a configmap reference, containing a Certificate Authority(CA) public key, and a base64 encoded pem certificate
// +optional
CertConfigMap string `json:"certConfigMap,omitempty"`
// ExtraHeaders is a list of strings containing extra headers to include with HTTP transfer requests
// +optional
ExtraHeaders []string `json:"extraHeaders,omitempty"`
// SecretExtraHeaders is a list of Secret references, each containing an extra HTTP header that may include sensitive information
// +optional
SecretExtraHeaders []string `json:"secretExtraHeaders,omitempty"`
}
// DataVolumeSourceImageIO provides the parameters to create a Data Volume from an imageio source
type DataVolumeSourceImageIO struct {
//URL is the URL of the ovirt-engine
URL string `json:"url"`
// DiskID provides id of a disk to be imported
DiskID string `json:"diskId"`
//SecretRef provides the secret reference needed to access the ovirt-engine
SecretRef string `json:"secretRef,omitempty"`
//CertConfigMap provides a reference to the CA cert
CertConfigMap string `json:"certConfigMap,omitempty"`
// InsecureSkipVerify is a flag to skip certificate verification
InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty"`
}
// DataVolumeSourceVDDK provides the parameters to create a Data Volume from a Vmware source
type DataVolumeSourceVDDK struct {
// URL is the URL of the vCenter or ESXi host with the VM to migrate
URL string `json:"url,omitempty"`
// UUID is the UUID of the virtual machine that the backing file is attached to in vCenter/ESXi
UUID string `json:"uuid,omitempty"`
// BackingFile is the path to the virtual hard disk to migrate from vCenter/ESXi
BackingFile string `json:"backingFile,omitempty"`
// Thumbprint is the certificate thumbprint of the vCenter or ESXi host
Thumbprint string `json:"thumbprint,omitempty"`
// SecretRef provides a reference to a secret containing the username and password needed to access the vCenter or ESXi host
SecretRef string `json:"secretRef,omitempty"`
// InitImageURL is an optional URL to an image containing an extracted VDDK library, overrides v2v-vmware config map
InitImageURL string `json:"initImageURL,omitempty"`
// ExtraArgs is a reference to a ConfigMap containing extra arguments to pass directly to the VDDK library
ExtraArgs string `json:"extraArgs,omitempty"`
}
// DataVolumeSourceRef defines an indirect reference to the source of data for the DataVolume
type DataVolumeSourceRef struct {
// The kind of the source reference, currently only "DataSource" is supported
Kind string `json:"kind"`
// The namespace of the source reference, defaults to the DataVolume namespace
// +optional
Namespace *string `json:"namespace,omitempty"`
// The name of the source reference
Name string `json:"name"`
}
const (
// DataVolumeDataSource is DataSource source reference for DataVolume
DataVolumeDataSource = "DataSource"
)
// DataVolumeStatus contains the current status of the DataVolume
type DataVolumeStatus struct {
// ClaimName is the name of the underlying PVC used by the DataVolume.
ClaimName string `json:"claimName,omitempty"`
//Phase is the current phase of the data volume
Phase DataVolumePhase `json:"phase,omitempty"`
Progress DataVolumeProgress `json:"progress,omitempty"`
// RestartCount is the number of times the pod populating the DataVolume has restarted
RestartCount int32 `json:"restartCount,omitempty"`
Conditions []DataVolumeCondition `json:"conditions,omitempty" optional:"true"`
}
// DataVolumeList provides the needed parameters to do request a list of Data Volumes from the system
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type DataVolumeList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items provides a list of DataVolumes
Items []DataVolume `json:"items"`
}
// DataVolumeCondition represents the state of a data volume condition.
type DataVolumeCondition struct {
Type DataVolumeConditionType `json:"type" description:"type of condition ie. Ready|Bound|Running."`
Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"`
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty"`
Reason string `json:"reason,omitempty" description:"reason for the condition's last transition"`
Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"`
}
// DataVolumePhase is the current phase of the DataVolume
type DataVolumePhase string
// DataVolumeProgress is the current progress of the DataVolume transfer operation. Value between 0 and 100 inclusive, N/A if not available
type DataVolumeProgress string
// DataVolumeConditionType is the string representation of known condition types
type DataVolumeConditionType string
const (
// PhaseUnset represents a data volume with no current phase
PhaseUnset DataVolumePhase = ""
// Pending represents a data volume with a current phase of Pending
Pending DataVolumePhase = "Pending"
// PVCBound represents a data volume with a current phase of PVCBound
PVCBound DataVolumePhase = "PVCBound"
// ImportScheduled represents a data volume with a current phase of ImportScheduled
ImportScheduled DataVolumePhase = "ImportScheduled"
// ImportInProgress represents a data volume with a current phase of ImportInProgress
ImportInProgress DataVolumePhase = "ImportInProgress"
// CloneScheduled represents a data volume with a current phase of CloneScheduled
CloneScheduled DataVolumePhase = "CloneScheduled"
// CloneInProgress represents a data volume with a current phase of CloneInProgress
CloneInProgress DataVolumePhase = "CloneInProgress"
// SnapshotForSmartCloneInProgress represents a data volume with a current phase of SnapshotForSmartCloneInProgress
SnapshotForSmartCloneInProgress DataVolumePhase = "SnapshotForSmartCloneInProgress"
// CloneFromSnapshotSourceInProgress represents a data volume with a current phase of CloneFromSnapshotSourceInProgress
CloneFromSnapshotSourceInProgress DataVolumePhase = "CloneFromSnapshotSourceInProgress"
// SmartClonePVCInProgress represents a data volume with a current phase of SmartClonePVCInProgress
SmartClonePVCInProgress DataVolumePhase = "SmartClonePVCInProgress"
// CSICloneInProgress represents a data volume with a current phase of CSICloneInProgress
CSICloneInProgress DataVolumePhase = "CSICloneInProgress"
// ExpansionInProgress is the state when a PVC is expanded
ExpansionInProgress DataVolumePhase = "ExpansionInProgress"
// NamespaceTransferInProgress is the state when a PVC is transferred
NamespaceTransferInProgress DataVolumePhase = "NamespaceTransferInProgress"
// UploadScheduled represents a data volume with a current phase of UploadScheduled
UploadScheduled DataVolumePhase = "UploadScheduled"
// UploadReady represents a data volume with a current phase of UploadReady
UploadReady DataVolumePhase = "UploadReady"
// WaitForFirstConsumer represents a data volume with a current phase of WaitForFirstConsumer
WaitForFirstConsumer DataVolumePhase = "WaitForFirstConsumer"
// PendingPopulation represents a data volume which should be populated by
// the CDI populators but haven't created the pvc' yet
PendingPopulation DataVolumePhase = "PendingPopulation"
// Succeeded represents a DataVolumePhase of Succeeded
Succeeded DataVolumePhase = "Succeeded"
// Failed represents a DataVolumePhase of Failed
Failed DataVolumePhase = "Failed"
// Unknown represents a DataVolumePhase of Unknown
Unknown DataVolumePhase = "Unknown"
// Paused represents a DataVolumePhase of Paused
Paused DataVolumePhase = "Paused"
// PrepClaimInProgress represents a data volume with a current phase of PrepClaimInProgress
PrepClaimInProgress DataVolumePhase = "PrepClaimInProgress"
// RebindInProgress represents a data volume with a current phase of RebindInProgress
RebindInProgress DataVolumePhase = "RebindInProgress"
// DataVolumeReady is the condition that indicates if the data volume is ready to be consumed.
DataVolumeReady DataVolumeConditionType = "Ready"
// DataVolumeBound is the condition that indicates if the underlying PVC is bound or not.
DataVolumeBound DataVolumeConditionType = "Bound"
// DataVolumeRunning is the condition that indicates if the import/upload/clone container is running.
DataVolumeRunning DataVolumeConditionType = "Running"
)
// DataVolumeCloneSourceSubresource is the subresource checked for permission to clone
const DataVolumeCloneSourceSubresource = "source"
// this has to be here otherwise informer-gen doesn't recognize it
// see https://github.com/kubernetes/code-generator/issues/59
// +genclient:nonNamespaced
// StorageProfile provides a CDI specific recommendation for storage parameters
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:resource:scope=Cluster
// +kubebuilder:subresource:status
type StorageProfile struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec StorageProfileSpec `json:"spec"`
Status StorageProfileStatus `json:"status,omitempty"`
}
// StorageProfileSpec defines specification for StorageProfile
type StorageProfileSpec struct {
// CloneStrategy defines the preferred method for performing a CDI clone
CloneStrategy *CDICloneStrategy `json:"cloneStrategy,omitempty"`
// ClaimPropertySets is a provided set of properties applicable to PVC
// +kubebuilder:validation:MaxItems=8
ClaimPropertySets []ClaimPropertySet `json:"claimPropertySets,omitempty"`
// DataImportCronSourceFormat defines the format of the DataImportCron-created disk image sources
DataImportCronSourceFormat *DataImportCronSourceFormat `json:"dataImportCronSourceFormat,omitempty"`
// SnapshotClass is optional specific VolumeSnapshotClass for CloneStrategySnapshot. If not set, a VolumeSnapshotClass is chosen according to the provisioner.
SnapshotClass *string `json:"snapshotClass,omitempty"`
}
// StorageProfileStatus provides the most recently observed status of the StorageProfile
type StorageProfileStatus struct {
// The StorageClass name for which capabilities are defined
StorageClass *string `json:"storageClass,omitempty"`
// The Storage class provisioner plugin name
Provisioner *string `json:"provisioner,omitempty"`
// CloneStrategy defines the preferred method for performing a CDI clone
CloneStrategy *CDICloneStrategy `json:"cloneStrategy,omitempty"`
// ClaimPropertySets computed from the spec and detected in the system
// +kubebuilder:validation:MaxItems=8
ClaimPropertySets []ClaimPropertySet `json:"claimPropertySets,omitempty"`
// DataImportCronSourceFormat defines the format of the DataImportCron-created disk image sources
DataImportCronSourceFormat *DataImportCronSourceFormat `json:"dataImportCronSourceFormat,omitempty"`
// SnapshotClass is optional specific VolumeSnapshotClass for CloneStrategySnapshot. If not set, a VolumeSnapshotClass is chosen according to the provisioner.
SnapshotClass *string `json:"snapshotClass,omitempty"`
}
// ClaimPropertySet is a set of properties applicable to PVC
type ClaimPropertySet struct {
// AccessModes contains the desired access modes the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +kubebuilder:validation:MaxItems=4
// +kubebuilder:validation:XValidation:rule="self.all(am, am in ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany', 'ReadWriteOncePod'])", message="Illegal AccessMode"
AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes"`
// VolumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
// +kubebuilder:validation:Enum="Block";"Filesystem"
VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode"`
}
// StorageProfileList provides the needed parameters to request a list of StorageProfile from the system
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type StorageProfileList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items provides a list of StorageProfile
Items []StorageProfile `json:"items"`
}
// DataSource references an import/clone source for a DataVolume
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:resource:shortName=das,categories=all
type DataSource struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DataSourceSpec `json:"spec"`
Status DataSourceStatus `json:"status,omitempty"`
}
// DataSourceSpec defines specification for DataSource
type DataSourceSpec struct {
// Source is the source of the data referenced by the DataSource
Source DataSourceSource `json:"source"`
}
// DataSourceSource represents the source for our DataSource
type DataSourceSource struct {
// +optional
PVC *DataVolumeSourcePVC `json:"pvc,omitempty"`
// +optional
Snapshot *DataVolumeSourceSnapshot `json:"snapshot,omitempty"`
// +optional
DataSource *DataSourceRefSourceDataSource `json:"dataSource,omitempty"`
}
// DataSourceStatus provides the most recently observed status of the DataSource
type DataSourceStatus struct {
// Source is the current source of the data referenced by the DataSource
Source DataSourceSource `json:"source,omitempty"`
Conditions []DataSourceCondition `json:"conditions,omitempty" optional:"true"`
}
// DataSourceCondition represents the state of a data source condition
type DataSourceCondition struct {
Type DataSourceConditionType `json:"type" description:"type of condition ie. Ready"`
ConditionState `json:",inline"`
}
// DataSourceConditionType is the string representation of known condition types
type DataSourceConditionType string
const (
// DataSourceReady is the condition that indicates if the data source is ready to be consumed
DataSourceReady DataSourceConditionType = "Ready"
)
// ConditionState represents the state of a condition
type ConditionState struct {
Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"`
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty"`
Reason string `json:"reason,omitempty" description:"reason for the condition's last transition"`
Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"`
}
// DataSourceList provides the needed parameters to do request a list of Data Sources from the system
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type DataSourceList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items provides a list of DataSources
Items []DataSource `json:"items"`
}
// DataImportCron defines a cron job for recurring polling/importing disk images as PVCs into a golden image namespace
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:resource:shortName=dic;dics,categories=all
// +kubebuilder:printcolumn:name="Format",type="string",JSONPath=".status.sourceFormat",description="The format in which created sources are saved"
type DataImportCron struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DataImportCronSpec `json:"spec"`
Status DataImportCronStatus `json:"status,omitempty"`
}
// DataImportCronSpec defines specification for DataImportCron
type DataImportCronSpec struct {
// Template specifies template for the DVs to be created
Template DataVolume `json:"template"`
// Schedule specifies in cron format when and how often to look for new imports
Schedule string `json:"schedule"`
// GarbageCollect specifies whether old PVCs should be cleaned up after a new PVC is imported.
// Options are currently "Outdated" and "Never", defaults to "Outdated".
// +optional
GarbageCollect *DataImportCronGarbageCollect `json:"garbageCollect,omitempty"`
// Number of import PVCs to keep when garbage collecting. Default is 3.
// +optional
ImportsToKeep *int32 `json:"importsToKeep,omitempty"`
// ManagedDataSource specifies the name of the corresponding DataSource this cron will manage.
// DataSource has to be in the same namespace.
ManagedDataSource string `json:"managedDataSource"`
// RetentionPolicy specifies whether the created DataVolumes and DataSources are retained when their DataImportCron is deleted. Default is RatainAll.
// +optional
RetentionPolicy *DataImportCronRetentionPolicy `json:"retentionPolicy,omitempty"`
// CreatedBy is the JSON-marshaled UserInfo of the user who created this DataImportCron.
// This field is set by the mutating webhook and cannot be set by users.
// +optional
CreatedBy *string `json:"createdBy,omitempty"`
}
// DataImportCronGarbageCollect represents the DataImportCron garbage collection mode
type DataImportCronGarbageCollect string
const (
// DataImportCronGarbageCollectNever specifies that garbage collection is disabled
DataImportCronGarbageCollectNever DataImportCronGarbageCollect = "Never"
// DataImportCronGarbageCollectOutdated specifies that old PVCs should be cleaned up after a new PVC is imported
DataImportCronGarbageCollectOutdated DataImportCronGarbageCollect = "Outdated"
)
// DataImportCronRetentionPolicy represents the DataImportCron retention policy
type DataImportCronRetentionPolicy string
const (
// DataImportCronRetainNone specifies that the created DataVolumes and DataSources are deleted when their DataImportCron is deleted
DataImportCronRetainNone DataImportCronRetentionPolicy = "None"
// DataImportCronRetainAll specifies that the created DataVolumes and DataSources are retained when their DataImportCron is deleted
DataImportCronRetainAll DataImportCronRetentionPolicy = "All"
)
// DataImportCronStatus provides the most recently observed status of the DataImportCron
type DataImportCronStatus struct {
// CurrentImports are the imports in progress. Currently only a single import is supported.
CurrentImports []ImportStatus `json:"currentImports,omitempty"`
// LastImportedPVC is the last imported PVC
LastImportedPVC *DataVolumeSourcePVC `json:"lastImportedPVC,omitempty"`
// LastExecutionTimestamp is the time of the last polling
LastExecutionTimestamp *metav1.Time `json:"lastExecutionTimestamp,omitempty"`
// LastImportTimestamp is the time of the last import
LastImportTimestamp *metav1.Time `json:"lastImportTimestamp,omitempty"`
// SourceFormat defines the format of the DataImportCron-created disk image sources
SourceFormat *DataImportCronSourceFormat `json:"sourceFormat,omitempty"`
Conditions []DataImportCronCondition `json:"conditions,omitempty" optional:"true"`
}
// ImportStatus of a currently in progress import
type ImportStatus struct {
// DataVolumeName is the currently in progress import DataVolume
DataVolumeName string `json:"DataVolumeName"`
// Digest of the currently imported image
Digest string `json:"Digest"`
}
// DataImportCronCondition represents the state of a data import cron condition
type DataImportCronCondition struct {
Type DataImportCronConditionType `json:"type" description:"type of condition ie. Progressing, UpToDate"`
ConditionState `json:",inline"`
}
// DataImportCronConditionType is the string representation of known condition types
type DataImportCronConditionType string
const (
// DataImportCronProgressing is the condition that indicates import is progressing
DataImportCronProgressing DataImportCronConditionType = "Progressing"
// DataImportCronUpToDate is the condition that indicates latest import is up to date
DataImportCronUpToDate DataImportCronConditionType = "UpToDate"
)
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/utils.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/utils.go | /*
Copyright 2020 The CDI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// IsPopulated indicates if the persistent volume passed in has been fully populated. It follow the following logic
// 1. If the PVC is not owned by a DataVolume, return true, we assume someone else has properly populated the image
// 2. If the PVC is owned by a DataVolume, look up the DV and check the phase, if phase succeeded return true
// 3. If the PVC is owned by a DataVolume, look up the DV and check the phase, if phase !succeeded return false
func IsPopulated(pvc *corev1.PersistentVolumeClaim, getDvFunc func(name, namespace string) (*DataVolume, error)) (bool, error) {
pvcOwner := metav1.GetControllerOf(pvc)
if pvcOwner != nil && pvcOwner.Kind == "DataVolume" {
// Find the data volume:
dv, err := getDvFunc(pvcOwner.Name, pvc.Namespace)
if err != nil {
return false, err
}
if dv.Status.Phase != Succeeded {
return false, nil
}
}
return true, nil
}
// IsSucceededOrPendingPopulation indicates if the persistent volume passed in has been fully populated or is waiting for a consumer.
// It follow the following logic
// 1. If the PVC is not owned by a DataVolume, return true, we assume someone else has properly populated the image
// 2. If the PVC is owned by a DataVolume, look up the DV and check the phase, if phase succeeded or pending population return true
// 3. If the PVC is owned by a DataVolume, look up the DV and check the phase, if phase !succeeded return false
func IsSucceededOrPendingPopulation(pvc *corev1.PersistentVolumeClaim, getDvFunc func(name, namespace string) (*DataVolume, error)) (bool, error) {
pvcOwner := metav1.GetControllerOf(pvc)
if pvcOwner != nil && pvcOwner.Kind == "DataVolume" {
// Find the data volume:
dv, err := getDvFunc(pvcOwner.Name, pvc.Namespace)
if err != nil {
return false, err
}
return dv.Status.Phase == Succeeded || dv.Status.Phase == PendingPopulation, nil
}
return true, nil
}
// IsWaitForFirstConsumerBeforePopulating indicates if the persistent volume passed in is in ClaimPending state and waiting for first consumer.
// It follow the following logic
// 1. If the PVC is not owned by a DataVolume, return false, we can not assume it will be populated
// 2. If the PVC is owned by a DataVolume, look up the DV and check the phase, if phase WaitForFirstConsumer return true
// 3. If the PVC is owned by a DataVolume, look up the DV and check the phase, if phase !WaitForFirstConsumer return false
func IsWaitForFirstConsumerBeforePopulating(pvc *corev1.PersistentVolumeClaim, getDvFunc func(name, namespace string) (*DataVolume, error)) (bool, error) {
pvcOwner := metav1.GetControllerOf(pvc)
if pvcOwner != nil && pvcOwner.Kind == "DataVolume" {
// Find the data volume:
dv, err := getDvFunc(pvcOwner.Name, pvc.Namespace)
if err != nil {
return false, err
}
if dv.Status.Phase == WaitForFirstConsumer {
return true, nil
}
}
return false, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_tlssecurityprofile.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_tlssecurityprofile.go | package v1beta1
// following copied from github.com/openshift/api/config/v1
// TLSSecurityProfile defines the schema for a TLS security profile. This object
// is used by operators to apply TLS security settings to operands.
// +union
type TLSSecurityProfile struct {
// type is one of Old, Intermediate, Modern or Custom. Custom provides
// the ability to specify individual TLS security profile parameters.
// Old, Intermediate and Modern are TLS security profiles based on:
//
// https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
//
// The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers
// are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be
// reduced.
//
// Note that the Modern profile is currently not supported because it is not
// yet well adopted by common software libraries.
//
// +unionDiscriminator
// +optional
Type TLSProfileType `json:"type"`
// old is a TLS security profile based on:
//
// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
//
// and looks like this (yaml):
//
// ciphers:
// - TLS_AES_128_GCM_SHA256
// - TLS_AES_256_GCM_SHA384
// - TLS_CHACHA20_POLY1305_SHA256
// - ECDHE-ECDSA-AES128-GCM-SHA256
// - ECDHE-RSA-AES128-GCM-SHA256
// - ECDHE-ECDSA-AES256-GCM-SHA384
// - ECDHE-RSA-AES256-GCM-SHA384
// - ECDHE-ECDSA-CHACHA20-POLY1305
// - ECDHE-RSA-CHACHA20-POLY1305
// - DHE-RSA-AES128-GCM-SHA256
// - DHE-RSA-AES256-GCM-SHA384
// - DHE-RSA-CHACHA20-POLY1305
// - ECDHE-ECDSA-AES128-SHA256
// - ECDHE-RSA-AES128-SHA256
// - ECDHE-ECDSA-AES128-SHA
// - ECDHE-RSA-AES128-SHA
// - ECDHE-ECDSA-AES256-SHA384
// - ECDHE-RSA-AES256-SHA384
// - ECDHE-ECDSA-AES256-SHA
// - ECDHE-RSA-AES256-SHA
// - DHE-RSA-AES128-SHA256
// - DHE-RSA-AES256-SHA256
// - AES128-GCM-SHA256
// - AES256-GCM-SHA384
// - AES128-SHA256
// - AES256-SHA256
// - AES128-SHA
// - AES256-SHA
// - DES-CBC3-SHA
// minTLSVersion: VersionTLS10
//
// +optional
// +nullable
Old *OldTLSProfile `json:"old,omitempty"`
// intermediate is a TLS security profile based on:
//
// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29
//
// and looks like this (yaml):
//
// ciphers:
// - TLS_AES_128_GCM_SHA256
// - TLS_AES_256_GCM_SHA384
// - TLS_CHACHA20_POLY1305_SHA256
// - ECDHE-ECDSA-AES128-GCM-SHA256
// - ECDHE-RSA-AES128-GCM-SHA256
// - ECDHE-ECDSA-AES256-GCM-SHA384
// - ECDHE-RSA-AES256-GCM-SHA384
// - ECDHE-ECDSA-CHACHA20-POLY1305
// - ECDHE-RSA-CHACHA20-POLY1305
// - DHE-RSA-AES128-GCM-SHA256
// - DHE-RSA-AES256-GCM-SHA384
// minTLSVersion: VersionTLS12
//
// +optional
// +nullable
Intermediate *IntermediateTLSProfile `json:"intermediate,omitempty"`
// modern is a TLS security profile based on:
//
// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
//
// and looks like this (yaml):
//
// ciphers:
// - TLS_AES_128_GCM_SHA256
// - TLS_AES_256_GCM_SHA384
// - TLS_CHACHA20_POLY1305_SHA256
// minTLSVersion: VersionTLS13
//
// NOTE: Currently unsupported.
//
// +optional
// +nullable
Modern *ModernTLSProfile `json:"modern,omitempty"`
// custom is a user-defined TLS security profile. Be extremely careful using a custom
// profile as invalid configurations can be catastrophic. An example custom profile
// looks like this:
//
// ciphers:
// - ECDHE-ECDSA-CHACHA20-POLY1305
// - ECDHE-RSA-CHACHA20-POLY1305
// - ECDHE-RSA-AES128-GCM-SHA256
// - ECDHE-ECDSA-AES128-GCM-SHA256
// minTLSVersion: VersionTLS11
//
// +optional
// +nullable
Custom *CustomTLSProfile `json:"custom,omitempty"`
}
// OldTLSProfile is a TLS security profile based on:
// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
type OldTLSProfile struct{}
// IntermediateTLSProfile is a TLS security profile based on:
// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
type IntermediateTLSProfile struct{}
// ModernTLSProfile is a TLS security profile based on:
// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
type ModernTLSProfile struct{}
// CustomTLSProfile is a user-defined TLS security profile. Be extremely careful
// using a custom TLS profile as invalid configurations can be catastrophic.
type CustomTLSProfile struct {
TLSProfileSpec `json:",inline"`
}
// TLSProfileType defines a TLS security profile type.
// +kubebuilder:validation:Enum=Old;Intermediate;Modern;Custom
type TLSProfileType string
const (
// TLSProfileOldType is a TLS security profile based on:
// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
TLSProfileOldType TLSProfileType = "Old"
// TLSProfileIntermediateType is a TLS security profile based on:
// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
TLSProfileIntermediateType TLSProfileType = "Intermediate"
// TLSProfileModernType is a TLS security profile based on:
// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
TLSProfileModernType TLSProfileType = "Modern"
// TLSProfileCustomType is a TLS security profile that allows for user-defined parameters.
TLSProfileCustomType TLSProfileType = "Custom"
)
// TLSProfileSpec is the desired behavior of a TLSSecurityProfile.
type TLSProfileSpec struct {
// ciphers is used to specify the cipher algorithms that are negotiated
// during the TLS handshake. Operators may remove entries their operands
// do not support. For example, to use DES-CBC3-SHA (yaml):
//
// ciphers:
// - DES-CBC3-SHA
//
Ciphers []string `json:"ciphers"`
// minTLSVersion is used to specify the minimal version of the TLS protocol
// that is negotiated during the TLS handshake. For example, to use TLS
// versions 1.1, 1.2 and 1.3 (yaml):
//
// minTLSVersion: VersionTLS11
//
// NOTE: currently the highest minTLSVersion allowed is VersionTLS12
//
MinTLSVersion TLSProtocolVersion `json:"minTLSVersion"`
}
// TLSProtocolVersion is a way to specify the protocol version used for TLS connections.
// Protocol versions are based on the following most common TLS configurations:
//
// https://ssl-config.mozilla.org/
//
// Note that SSLv3.0 is not a supported protocol version due to well known
// vulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE
// +kubebuilder:validation:Enum=VersionTLS10;VersionTLS11;VersionTLS12;VersionTLS13
type TLSProtocolVersion string
const (
// VersionTLS10 is version 1.0 of the TLS security protocol.
VersionTLS10 TLSProtocolVersion = "VersionTLS10"
// VersionTLS11 is version 1.1 of the TLS security protocol.
VersionTLS11 TLSProtocolVersion = "VersionTLS11"
// VersionTLS12 is version 1.2 of the TLS security protocol.
VersionTLS12 TLSProtocolVersion = "VersionTLS12"
// VersionTLS13 is version 1.3 of the TLS security protocol.
VersionTLS13 TLSProtocolVersion = "VersionTLS13"
)
// TLSProfiles Contains a map of TLSProfileType names to TLSProfileSpec.
//
// NOTE: The caller needs to make sure to check that these constants are valid for their binary. Not all
// entries map to values for all binaries. In the case of ties, the kube-apiserver wins. Do not fail,
// just be sure to allowlist only and everything will be ok.
var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{
TLSProfileOldType: {
Ciphers: []string{
"TLS_AES_128_GCM_SHA256",
"TLS_AES_256_GCM_SHA384",
"TLS_CHACHA20_POLY1305_SHA256",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"DHE-RSA-AES128-GCM-SHA256",
"DHE-RSA-AES256-GCM-SHA384",
"DHE-RSA-CHACHA20-POLY1305",
"ECDHE-ECDSA-AES128-SHA256",
"ECDHE-RSA-AES128-SHA256",
"ECDHE-ECDSA-AES128-SHA",
"ECDHE-RSA-AES128-SHA",
"ECDHE-ECDSA-AES256-SHA384",
"ECDHE-RSA-AES256-SHA384",
"ECDHE-ECDSA-AES256-SHA",
"ECDHE-RSA-AES256-SHA",
"DHE-RSA-AES128-SHA256",
"DHE-RSA-AES256-SHA256",
"AES128-GCM-SHA256",
"AES256-GCM-SHA384",
"AES128-SHA256",
"AES256-SHA256",
"AES128-SHA",
"AES256-SHA",
"DES-CBC3-SHA",
},
MinTLSVersion: VersionTLS10,
},
TLSProfileIntermediateType: {
Ciphers: []string{
"TLS_AES_128_GCM_SHA256",
"TLS_AES_256_GCM_SHA384",
"TLS_CHACHA20_POLY1305_SHA256",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"DHE-RSA-AES128-GCM-SHA256",
"DHE-RSA-AES256-GCM-SHA384",
},
MinTLSVersion: VersionTLS12,
},
TLSProfileModernType: {
Ciphers: []string{
"TLS_AES_128_GCM_SHA256",
"TLS_AES_256_GCM_SHA384",
"TLS_CHACHA20_POLY1305_SHA256",
},
MinTLSVersion: VersionTLS13,
},
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/register.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/register.go | package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"kubevirt.io/containerized-data-importer-api/pkg/apis/core"
)
// SchemeGroupVersion and GroupVersion is group version used to register these objects
var (
SchemeGroupVersion = schema.GroupVersion{Group: core.GroupName, Version: "v1beta1"}
GroupVersion = schema.GroupVersion{Group: core.GroupName, Version: "v1beta1"}
)
// CDIGroupVersionKind group version kind
var CDIGroupVersionKind = schema.GroupVersionKind{Group: SchemeGroupVersion.Group, Version: SchemeGroupVersion.Version, Kind: "CDI"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder tbd
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme tbd
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&DataVolume{},
&DataVolumeList{},
&CDIConfig{},
&CDIConfigList{},
&CDI{},
&CDIList{},
&StorageProfile{},
&StorageProfileList{},
&DataSource{},
&DataSourceList{},
&DataImportCron{},
&DataImportCronList{},
&ObjectTransfer{},
&ObjectTransferList{},
&VolumeImportSource{},
&VolumeImportSourceList{},
&VolumeUploadSource{},
&VolumeUploadSourceList{},
&VolumeCloneSource{},
&VolumeCloneSourceList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/authorize_utils.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/authorize_utils.go | /*
Copyright 2020 The CDI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
authentication "k8s.io/api/authentication/v1"
authorization "k8s.io/api/authorization/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
)
func newCloneSourceHandler(dataVolume *DataVolume, dsGet dsGetFunc) (CloneSourceHandler, error) {
var pvcSource *DataVolumeSourcePVC
var snapshotSource *DataVolumeSourceSnapshot
if dataVolume.Spec.Source != nil {
if dataVolume.Spec.Source.PVC != nil {
pvcSource = dataVolume.Spec.Source.PVC
} else if dataVolume.Spec.Source.Snapshot != nil {
snapshotSource = dataVolume.Spec.Source.Snapshot
}
} else if dataVolume.Spec.SourceRef != nil && dataVolume.Spec.SourceRef.Kind == DataVolumeDataSource {
ns := dataVolume.Namespace
if dataVolume.Spec.SourceRef.Namespace != nil && *dataVolume.Spec.SourceRef.Namespace != "" {
ns = *dataVolume.Spec.SourceRef.Namespace
}
dataSource, err := dsGet(ns, dataVolume.Spec.SourceRef.Name)
if err != nil {
return CloneSourceHandler{}, err
}
pvcSource = dataSource.Spec.Source.PVC
snapshotSource = dataSource.Spec.Source.Snapshot
if dataSource.Spec.Source.DataSource != nil {
pvcSource = dataSource.Status.Source.PVC
snapshotSource = dataSource.Status.Source.Snapshot
}
}
switch {
case pvcSource != nil:
return CloneSourceHandler{
CloneType: pvcClone,
TokenResource: tokenResourcePvc,
UserCloneAuthFunc: CanUserClonePVC,
SACloneAuthFunc: CanServiceAccountClonePVC,
SourceName: pvcSource.Name,
SourceNamespace: pvcSource.Namespace,
}, nil
case snapshotSource != nil:
return CloneSourceHandler{
CloneType: snapshotClone,
TokenResource: tokenResourceSnapshot,
UserCloneAuthFunc: CanUserCloneSnapshot,
SACloneAuthFunc: CanServiceAccountCloneSnapshot,
SourceName: snapshotSource.Name,
SourceNamespace: snapshotSource.Namespace,
}, nil
default:
return CloneSourceHandler{
CloneType: noClone,
}, nil
}
}
var (
tokenResourcePvc = metav1.GroupVersionResource{
Group: "",
Version: "v1",
Resource: "persistentvolumeclaims",
}
tokenResourceSnapshot = metav1.GroupVersionResource{
Group: "snapshot.storage.k8s.io",
Version: "v1",
Resource: "volumesnapshots",
}
)
type cloneType int
const (
noClone cloneType = iota
pvcClone
snapshotClone
)
// CloneSourceHandler is a helper around determining the
// correct way of authorizing a particular DataVolume
// +k8s:deepcopy-gen=false
// +k8s:openapi-gen=false
type CloneSourceHandler struct {
CloneType cloneType
TokenResource metav1.GroupVersionResource
UserCloneAuthFunc UserCloneAuthFunc
SACloneAuthFunc ServiceAccountCloneAuthFunc
SourceName string
SourceNamespace string
}
// CloneAuthResponse contains various response details
// regarding authorizing a datavolume
// +k8s:deepcopy-gen=false
// +k8s:openapi-gen=false
type CloneAuthResponse struct {
Handler CloneSourceHandler
Allowed bool
Reason string
}
type createSarFunc func(*authorization.SubjectAccessReview) (*authorization.SubjectAccessReview, error)
type dsGetFunc func(string, string) (*DataSource, error)
// AuthorizationHelperProxy proxies calls to APIs used for DV authorization
type AuthorizationHelperProxy interface {
CreateSar(*authorization.SubjectAccessReview) (*authorization.SubjectAccessReview, error)
GetNamespace(string) (*corev1.Namespace, error)
GetDataSource(string, string) (*DataSource, error)
}
// UserCloneAuthFunc represents a user clone auth func
type UserCloneAuthFunc func(createSar createSarFunc, sourceNamespace, pvcName, targetNamespace string, userInfo authentication.UserInfo) (bool, string, error)
// ServiceAccountCloneAuthFunc represents a serviceaccount clone auth func
type ServiceAccountCloneAuthFunc func(createSar createSarFunc, pvcNamespace, pvcName, saNamespace, saName string) (bool, string, error)
// CanUserClonePVC checks if a user has "appropriate" permission to clone from the given PVC
func CanUserClonePVC(createSar createSarFunc, sourceNamespace, pvcName, targetNamespace string,
userInfo authentication.UserInfo) (bool, string, error) {
if sourceNamespace == targetNamespace {
return true, "", nil
}
var newExtra map[string]authorization.ExtraValue
if len(userInfo.Extra) > 0 {
newExtra = make(map[string]authorization.ExtraValue)
for k, v := range userInfo.Extra {
newExtra[k] = authorization.ExtraValue(v)
}
}
sarSpec := authorization.SubjectAccessReviewSpec{
User: userInfo.Username,
Groups: userInfo.Groups,
Extra: newExtra,
}
return sendSubjectAccessReviewsPvc(createSar, sourceNamespace, pvcName, sarSpec)
}
// CanServiceAccountClonePVC checks if a ServiceAccount has "appropriate" permission to clone from the given PVC
func CanServiceAccountClonePVC(createSar createSarFunc, pvcNamespace, pvcName, saNamespace, saName string) (bool, string, error) {
if pvcNamespace == saNamespace {
return true, "", nil
}
user := fmt.Sprintf("system:serviceaccount:%s:%s", saNamespace, saName)
sarSpec := authorization.SubjectAccessReviewSpec{
User: user,
Groups: []string{
"system:serviceaccounts",
"system:serviceaccounts:" + saNamespace,
"system:authenticated",
},
}
return sendSubjectAccessReviewsPvc(createSar, pvcNamespace, pvcName, sarSpec)
}
// CanUserCloneSnapshot checks if a user has "appropriate" permission to clone from the given snapshot
func CanUserCloneSnapshot(createSar createSarFunc, sourceNamespace, pvcName, targetNamespace string,
userInfo authentication.UserInfo) (bool, string, error) {
if sourceNamespace == targetNamespace {
return true, "", nil
}
var newExtra map[string]authorization.ExtraValue
if len(userInfo.Extra) > 0 {
newExtra = make(map[string]authorization.ExtraValue)
for k, v := range userInfo.Extra {
newExtra[k] = authorization.ExtraValue(v)
}
}
sarSpec := authorization.SubjectAccessReviewSpec{
User: userInfo.Username,
Groups: userInfo.Groups,
Extra: newExtra,
}
return sendSubjectAccessReviewsSnapshot(createSar, sourceNamespace, pvcName, sarSpec)
}
// CanServiceAccountCloneSnapshot checks if a ServiceAccount has "appropriate" permission to clone from the given snapshot
func CanServiceAccountCloneSnapshot(createSar createSarFunc, pvcNamespace, pvcName, saNamespace, saName string) (bool, string, error) {
if pvcNamespace == saNamespace {
return true, "", nil
}
user := fmt.Sprintf("system:serviceaccount:%s:%s", saNamespace, saName)
sarSpec := authorization.SubjectAccessReviewSpec{
User: user,
Groups: []string{
"system:serviceaccounts",
"system:serviceaccounts:" + saNamespace,
"system:authenticated",
},
}
return sendSubjectAccessReviewsSnapshot(createSar, pvcNamespace, pvcName, sarSpec)
}
func sendSubjectAccessReviewsPvc(createSar createSarFunc, namespace, name string, sarSpec authorization.SubjectAccessReviewSpec) (bool, string, error) {
allowed := false
for _, ra := range getResourceAttributesPvc(namespace, name) {
sar := &authorization.SubjectAccessReview{
Spec: sarSpec,
}
sar.Spec.ResourceAttributes = &ra
klog.V(3).Infof("Sending SubjectAccessReview %+v", sar)
response, err := createSar(sar)
if err != nil {
return false, "", err
}
klog.V(3).Infof("SubjectAccessReview response %+v", response)
if response.Status.Allowed {
allowed = true
break
}
}
if !allowed {
return false, fmt.Sprintf("User %s has insufficient permissions in clone source namespace %s", sarSpec.User, namespace), nil
}
return true, "", nil
}
func sendSubjectAccessReviewsSnapshot(createSar createSarFunc, namespace, name string, sarSpec authorization.SubjectAccessReviewSpec) (bool, string, error) {
// Either explicitly allowed
sar := &authorization.SubjectAccessReview{
Spec: sarSpec,
}
explicitResourceAttr := getExplicitResourceAttributeSnapshot(namespace, name)
sar.Spec.ResourceAttributes = &explicitResourceAttr
klog.V(3).Infof("Sending SubjectAccessReview %+v", sar)
response, err := createSar(sar)
if err != nil {
return false, "", err
}
klog.V(3).Infof("SubjectAccessReview response %+v", response)
if response.Status.Allowed {
return true, "", nil
}
// Or both implicit conditions hold
for _, ra := range getImplicitResourceAttributesSnapshot(namespace, name) {
sar = &authorization.SubjectAccessReview{
Spec: sarSpec,
}
sar.Spec.ResourceAttributes = &ra
klog.V(3).Infof("Sending SubjectAccessReview %+v", sar)
response, err = createSar(sar)
if err != nil {
return false, "", err
}
klog.V(3).Infof("SubjectAccessReview response %+v", response)
if !response.Status.Allowed {
return false, fmt.Sprintf("User %s has insufficient permissions in clone source namespace %s", sarSpec.User, namespace), nil
}
}
return true, "", nil
}
func getResourceAttributesPvc(namespace, name string) []authorization.ResourceAttributes {
return []authorization.ResourceAttributes{
{
Namespace: namespace,
Verb: "create",
Group: SchemeGroupVersion.Group,
Resource: "datavolumes",
Subresource: DataVolumeCloneSourceSubresource,
Name: name,
},
{
Namespace: namespace,
Verb: "create",
Resource: "pods",
Name: name,
},
}
}
func getExplicitResourceAttributeSnapshot(namespace, name string) authorization.ResourceAttributes {
return authorization.ResourceAttributes{
Namespace: namespace,
Verb: "create",
Group: SchemeGroupVersion.Group,
Resource: "datavolumes",
Subresource: DataVolumeCloneSourceSubresource,
Name: name,
}
}
func getImplicitResourceAttributesSnapshot(namespace, name string) []authorization.ResourceAttributes {
return []authorization.ResourceAttributes{
{
Namespace: namespace,
Verb: "create",
Resource: "pods",
Name: name,
},
{
Namespace: namespace,
Verb: "create",
Resource: "pvcs",
Name: name,
},
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/doc.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/doc.go | // +k8s:deepcopy-gen=package
// +k8s:openapi-gen=true
// Package v1beta1 is the v1beta1 version of the API.
// +groupName=cdi.kubevirt.io
package v1beta1
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/authorize.go | cmd/vsphere-xcopy-volume-populator/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/authorize.go | /*
Copyright 2020 The CDI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"errors"
authentication "k8s.io/api/authentication/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/klog/v2"
"kubevirt.io/containerized-data-importer-api/pkg/apis/core"
)
const (
// AnnPrePopulated is a PVC annotation telling the datavolume controller that the PVC is already populated
AnnPrePopulated = core.GroupName + "/storage.prePopulated"
// AnnCheckStaticVolume checks if a statically allocated PV exists before creating the target PVC.
// If so, PVC is still created but population is skipped
AnnCheckStaticVolume = core.GroupName + "/storage.checkStaticVolume"
)
// ErrNoTokenOkay indicates proceeding without token is allowed
// This error should only be of interest to entities that give out DataVolume tokens
var ErrNoTokenOkay = errors.New("proceeding without token is okay under the circumstances")
// AuthorizeUser indicates if the creating user is authorized to create the data volume
// For sources other than clone (import/upload/etc), this is a no-op
func (dv *DataVolume) AuthorizeUser(requestNamespace, requestName string, proxy AuthorizationHelperProxy, userInfo authentication.UserInfo) (CloneAuthResponse, error) {
_, prePopulated := dv.Annotations[AnnPrePopulated]
_, checkStaticVolume := dv.Annotations[AnnCheckStaticVolume]
noTokenOkay := prePopulated || checkStaticVolume
targetNamespace, targetName := dv.Namespace, dv.Name
if targetNamespace == "" {
targetNamespace = requestNamespace
}
if targetName == "" {
targetName = requestName
}
cloneSourceHandler, err := newCloneSourceHandler(dv, proxy.GetDataSource)
if err != nil {
if k8serrors.IsNotFound(err) && noTokenOkay {
// no token needed, likely since no datasource
klog.V(3).Infof("DataVolume %s/%s is pre/static populated, not adding token, no datasource", targetNamespace, targetName)
return CloneAuthResponse{Allowed: true, Reason: "", Handler: cloneSourceHandler}, ErrNoTokenOkay
}
return CloneAuthResponse{Allowed: false, Reason: "", Handler: cloneSourceHandler}, err
}
if cloneSourceHandler.CloneType == noClone {
klog.V(3).Infof("DataVolume %s/%s not cloning", targetNamespace, targetName)
return CloneAuthResponse{Allowed: true, Reason: "", Handler: cloneSourceHandler}, ErrNoTokenOkay
}
sourceName, sourceNamespace := cloneSourceHandler.SourceName, cloneSourceHandler.SourceNamespace
if sourceNamespace == "" {
sourceNamespace = targetNamespace
}
_, err = proxy.GetNamespace(sourceNamespace)
if err != nil {
if k8serrors.IsNotFound(err) && noTokenOkay {
// no token needed, likely since no source namespace
klog.V(3).Infof("DataVolume %s/%s is pre/static populated, not adding token, no source namespace", targetNamespace, targetName)
return CloneAuthResponse{Allowed: true, Reason: "", Handler: cloneSourceHandler}, ErrNoTokenOkay
}
return CloneAuthResponse{Allowed: false, Reason: "", Handler: cloneSourceHandler}, err
}
ok, reason, err := cloneSourceHandler.UserCloneAuthFunc(proxy.CreateSar, sourceNamespace, sourceName, targetNamespace, userInfo)
if err != nil {
return CloneAuthResponse{Allowed: false, Reason: reason, Handler: cloneSourceHandler}, err
}
if !ok {
if noTokenOkay {
klog.V(3).Infof("DataVolume %s/%s is pre/static populated, not adding token, auth failed", targetNamespace, targetName)
return CloneAuthResponse{Allowed: true, Reason: "", Handler: cloneSourceHandler}, ErrNoTokenOkay
}
}
return CloneAuthResponse{Allowed: ok, Reason: reason, Handler: cloneSourceHandler}, err
}
// AuthorizeSA indicates if the creating ServiceAccount is authorized to create the data volume
// For sources other than clone (import/upload/etc), this is a no-op
func (dv *DataVolume) AuthorizeSA(requestNamespace, requestName string, proxy AuthorizationHelperProxy, saNamespace, saName string) (CloneAuthResponse, error) {
_, prePopulated := dv.Annotations[AnnPrePopulated]
_, checkStaticVolume := dv.Annotations[AnnCheckStaticVolume]
noTokenOkay := prePopulated || checkStaticVolume
targetNamespace, targetName := dv.Namespace, dv.Name
if targetNamespace == "" {
targetNamespace = requestNamespace
}
if saNamespace == "" {
saNamespace = targetNamespace
}
if targetName == "" {
targetName = requestName
}
cloneSourceHandler, err := newCloneSourceHandler(dv, proxy.GetDataSource)
if err != nil {
if k8serrors.IsNotFound(err) && noTokenOkay {
// no token needed, likely since no datasource
klog.V(3).Infof("DataVolume %s/%s is pre/static populated, not adding token, no datasource", targetNamespace, targetName)
return CloneAuthResponse{Allowed: true, Reason: "", Handler: cloneSourceHandler}, ErrNoTokenOkay
}
return CloneAuthResponse{Allowed: false, Reason: "", Handler: cloneSourceHandler}, err
}
if cloneSourceHandler.CloneType == noClone {
klog.V(3).Infof("DataVolume %s/%s not cloning", targetNamespace, targetName)
return CloneAuthResponse{Allowed: true, Reason: "", Handler: cloneSourceHandler}, ErrNoTokenOkay
}
sourceName, sourceNamespace := cloneSourceHandler.SourceName, cloneSourceHandler.SourceNamespace
if sourceNamespace == "" {
sourceNamespace = targetNamespace
}
_, err = proxy.GetNamespace(sourceNamespace)
if err != nil {
if k8serrors.IsNotFound(err) && noTokenOkay {
// no token needed, likely since no source namespace
klog.V(3).Infof("DataVolume %s/%s is pre/static populated, not adding token, no source namespace", targetNamespace, targetName)
return CloneAuthResponse{Allowed: true, Reason: "", Handler: cloneSourceHandler}, ErrNoTokenOkay
}
return CloneAuthResponse{Allowed: false, Reason: "", Handler: cloneSourceHandler}, err
}
ok, reason, err := cloneSourceHandler.SACloneAuthFunc(proxy.CreateSar, sourceNamespace, sourceName, saNamespace, saName)
if err != nil {
return CloneAuthResponse{Allowed: false, Reason: reason, Handler: cloneSourceHandler}, err
}
if !ok {
if noTokenOkay {
klog.V(3).Infof("DataVolume %s/%s is pre/static populated, not adding token, auth failed", targetNamespace, targetName)
return CloneAuthResponse{Allowed: true, Reason: "", Handler: cloneSourceHandler}, ErrNoTokenOkay
}
}
return CloneAuthResponse{Allowed: ok, Reason: reason, Handler: cloneSourceHandler}, err
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/multierr/error.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/multierr/error.go | // Copyright (c) 2017-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package multierr allows combining one or more errors together.
//
// # Overview
//
// Errors can be combined with the use of the Combine function.
//
// multierr.Combine(
// reader.Close(),
// writer.Close(),
// conn.Close(),
// )
//
// If only two errors are being combined, the Append function may be used
// instead.
//
// err = multierr.Append(reader.Close(), writer.Close())
//
// The underlying list of errors for a returned error object may be retrieved
// with the Errors function.
//
// errors := multierr.Errors(err)
// if len(errors) > 0 {
// fmt.Println("The following errors occurred:", errors)
// }
//
// # Appending from a loop
//
// You sometimes need to append into an error from a loop.
//
// var err error
// for _, item := range items {
// err = multierr.Append(err, process(item))
// }
//
// Cases like this may require knowledge of whether an individual instance
// failed. This usually requires introduction of a new variable.
//
// var err error
// for _, item := range items {
// if perr := process(item); perr != nil {
// log.Warn("skipping item", item)
// err = multierr.Append(err, perr)
// }
// }
//
// multierr includes AppendInto to simplify cases like this.
//
// var err error
// for _, item := range items {
// if multierr.AppendInto(&err, process(item)) {
// log.Warn("skipping item", item)
// }
// }
//
// This will append the error into the err variable, and return true if that
// individual error was non-nil.
//
// See [AppendInto] for more information.
//
// # Deferred Functions
//
// Go makes it possible to modify the return value of a function in a defer
// block if the function was using named returns. This makes it possible to
// record resource cleanup failures from deferred blocks.
//
// func sendRequest(req Request) (err error) {
// conn, err := openConnection()
// if err != nil {
// return err
// }
// defer func() {
// err = multierr.Append(err, conn.Close())
// }()
// // ...
// }
//
// multierr provides the Invoker type and AppendInvoke function to make cases
// like the above simpler and obviate the need for a closure. The following is
// roughly equivalent to the example above.
//
// func sendRequest(req Request) (err error) {
// conn, err := openConnection()
// if err != nil {
// return err
// }
// defer multierr.AppendInvoke(&err, multierr.Close(conn))
// // ...
// }
//
// See [AppendInvoke] and [Invoker] for more information.
//
// NOTE: If you're modifying an error from inside a defer, you MUST use a named
// return value for that function.
//
// # Advanced Usage
//
// Errors returned by Combine and Append MAY implement the following
// interface.
//
// type errorGroup interface {
// // Returns a slice containing the underlying list of errors.
// //
// // This slice MUST NOT be modified by the caller.
// Errors() []error
// }
//
// Note that if you need access to list of errors behind a multierr error, you
// should prefer using the Errors function. That said, if you need cheap
// read-only access to the underlying errors slice, you can attempt to cast
// the error to this interface. You MUST handle the failure case gracefully
// because errors returned by Combine and Append are not guaranteed to
// implement this interface.
//
// var errors []error
// group, ok := err.(errorGroup)
// if ok {
// errors = group.Errors()
// } else {
// errors = []error{err}
// }
package multierr // import "go.uber.org/multierr"
import (
"bytes"
"errors"
"fmt"
"io"
"strings"
"sync"
"sync/atomic"
)
var (
// Separator for single-line error messages.
_singlelineSeparator = []byte("; ")
// Prefix for multi-line messages
_multilinePrefix = []byte("the following errors occurred:")
// Prefix for the first and following lines of an item in a list of
// multi-line error messages.
//
// For example, if a single item is:
//
// foo
// bar
//
// It will become,
//
// - foo
// bar
_multilineSeparator = []byte("\n - ")
_multilineIndent = []byte(" ")
)
// _bufferPool is a pool of bytes.Buffers.
var _bufferPool = sync.Pool{
New: func() interface{} {
return &bytes.Buffer{}
},
}
type errorGroup interface {
Errors() []error
}
// Errors returns a slice containing zero or more errors that the supplied
// error is composed of. If the error is nil, a nil slice is returned.
//
// err := multierr.Append(r.Close(), w.Close())
// errors := multierr.Errors(err)
//
// If the error is not composed of other errors, the returned slice contains
// just the error that was passed in.
//
// Callers of this function are free to modify the returned slice.
func Errors(err error) []error {
return extractErrors(err)
}
// multiError is an error that holds one or more errors.
//
// An instance of this is guaranteed to be non-empty and flattened. That is,
// none of the errors inside multiError are other multiErrors.
//
// multiError formats to a semi-colon delimited list of error messages with
// %v and with a more readable multi-line format with %+v.
type multiError struct {
copyNeeded atomic.Bool
errors []error
}
// Errors returns the list of underlying errors.
//
// This slice MUST NOT be modified.
func (merr *multiError) Errors() []error {
if merr == nil {
return nil
}
return merr.errors
}
func (merr *multiError) Error() string {
if merr == nil {
return ""
}
buff := _bufferPool.Get().(*bytes.Buffer)
buff.Reset()
merr.writeSingleline(buff)
result := buff.String()
_bufferPool.Put(buff)
return result
}
// Every compares every error in the given err against the given target error
// using [errors.Is], and returns true only if every comparison returned true.
func Every(err error, target error) bool {
for _, e := range extractErrors(err) {
if !errors.Is(e, target) {
return false
}
}
return true
}
func (merr *multiError) Format(f fmt.State, c rune) {
if c == 'v' && f.Flag('+') {
merr.writeMultiline(f)
} else {
merr.writeSingleline(f)
}
}
func (merr *multiError) writeSingleline(w io.Writer) {
first := true
for _, item := range merr.errors {
if first {
first = false
} else {
w.Write(_singlelineSeparator)
}
io.WriteString(w, item.Error())
}
}
func (merr *multiError) writeMultiline(w io.Writer) {
w.Write(_multilinePrefix)
for _, item := range merr.errors {
w.Write(_multilineSeparator)
writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item))
}
}
// Writes s to the writer with the given prefix added before each line after
// the first.
func writePrefixLine(w io.Writer, prefix []byte, s string) {
first := true
for len(s) > 0 {
if first {
first = false
} else {
w.Write(prefix)
}
idx := strings.IndexByte(s, '\n')
if idx < 0 {
idx = len(s) - 1
}
io.WriteString(w, s[:idx+1])
s = s[idx+1:]
}
}
type inspectResult struct {
// Number of top-level non-nil errors
Count int
// Total number of errors including multiErrors
Capacity int
// Index of the first non-nil error in the list. Value is meaningless if
// Count is zero.
FirstErrorIdx int
// Whether the list contains at least one multiError
ContainsMultiError bool
}
// Inspects the given slice of errors so that we can efficiently allocate
// space for it.
func inspect(errors []error) (res inspectResult) {
first := true
for i, err := range errors {
if err == nil {
continue
}
res.Count++
if first {
first = false
res.FirstErrorIdx = i
}
if merr, ok := err.(*multiError); ok {
res.Capacity += len(merr.errors)
res.ContainsMultiError = true
} else {
res.Capacity++
}
}
return
}
// fromSlice converts the given list of errors into a single error.
func fromSlice(errors []error) error {
// Don't pay to inspect small slices.
switch len(errors) {
case 0:
return nil
case 1:
return errors[0]
}
res := inspect(errors)
switch res.Count {
case 0:
return nil
case 1:
// only one non-nil entry
return errors[res.FirstErrorIdx]
case len(errors):
if !res.ContainsMultiError {
// Error list is flat. Make a copy of it
// Otherwise "errors" escapes to the heap
// unconditionally for all other cases.
// This lets us optimize for the "no errors" case.
out := append(([]error)(nil), errors...)
return &multiError{errors: out}
}
}
nonNilErrs := make([]error, 0, res.Capacity)
for _, err := range errors[res.FirstErrorIdx:] {
if err == nil {
continue
}
if nested, ok := err.(*multiError); ok {
nonNilErrs = append(nonNilErrs, nested.errors...)
} else {
nonNilErrs = append(nonNilErrs, err)
}
}
return &multiError{errors: nonNilErrs}
}
// Combine combines the passed errors into a single error.
//
// If zero arguments were passed or if all items are nil, a nil error is
// returned.
//
// Combine(nil, nil) // == nil
//
// If only a single error was passed, it is returned as-is.
//
// Combine(err) // == err
//
// Combine skips over nil arguments so this function may be used to combine
// together errors from operations that fail independently of each other.
//
// multierr.Combine(
// reader.Close(),
// writer.Close(),
// pipe.Close(),
// )
//
// If any of the passed errors is a multierr error, it will be flattened along
// with the other errors.
//
// multierr.Combine(multierr.Combine(err1, err2), err3)
// // is the same as
// multierr.Combine(err1, err2, err3)
//
// The returned error formats into a readable multi-line error message if
// formatted with %+v.
//
// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
func Combine(errors ...error) error {
return fromSlice(errors)
}
// Append appends the given errors together. Either value may be nil.
//
// This function is a specialization of Combine for the common case where
// there are only two errors.
//
// err = multierr.Append(reader.Close(), writer.Close())
//
// The following pattern may also be used to record failure of deferred
// operations without losing information about the original error.
//
// func doSomething(..) (err error) {
// f := acquireResource()
// defer func() {
// err = multierr.Append(err, f.Close())
// }()
//
// Note that the variable MUST be a named return to append an error to it from
// the defer statement. See also [AppendInvoke].
func Append(left error, right error) error {
switch {
case left == nil:
return right
case right == nil:
return left
}
if _, ok := right.(*multiError); !ok {
if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) {
// Common case where the error on the left is constantly being
// appended to.
errs := append(l.errors, right)
return &multiError{errors: errs}
} else if !ok {
// Both errors are single errors.
return &multiError{errors: []error{left, right}}
}
}
// Either right or both, left and right, are multiErrors. Rely on usual
// expensive logic.
errors := [2]error{left, right}
return fromSlice(errors[0:])
}
// AppendInto appends an error into the destination of an error pointer and
// returns whether the error being appended was non-nil.
//
// var err error
// multierr.AppendInto(&err, r.Close())
// multierr.AppendInto(&err, w.Close())
//
// The above is equivalent to,
//
// err := multierr.Append(r.Close(), w.Close())
//
// As AppendInto reports whether the provided error was non-nil, it may be
// used to build a multierr error in a loop more ergonomically. For example:
//
// var err error
// for line := range lines {
// var item Item
// if multierr.AppendInto(&err, parse(line, &item)) {
// continue
// }
// items = append(items, item)
// }
//
// Compare this with a version that relies solely on Append:
//
// var err error
// for line := range lines {
// var item Item
// if parseErr := parse(line, &item); parseErr != nil {
// err = multierr.Append(err, parseErr)
// continue
// }
// items = append(items, item)
// }
func AppendInto(into *error, err error) (errored bool) {
if into == nil {
// We panic if 'into' is nil. This is not documented above
// because suggesting that the pointer must be non-nil may
// confuse users into thinking that the error that it points
// to must be non-nil.
panic("misuse of multierr.AppendInto: into pointer must not be nil")
}
if err == nil {
return false
}
*into = Append(*into, err)
return true
}
// Invoker is an operation that may fail with an error. Use it with
// AppendInvoke to append the result of calling the function into an error.
// This allows you to conveniently defer capture of failing operations.
//
// See also, [Close] and [Invoke].
type Invoker interface {
Invoke() error
}
// Invoke wraps a function which may fail with an error to match the Invoker
// interface. Use it to supply functions matching this signature to
// AppendInvoke.
//
// For example,
//
// func processReader(r io.Reader) (err error) {
// scanner := bufio.NewScanner(r)
// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
// for scanner.Scan() {
// // ...
// }
// // ...
// }
//
// In this example, the following line will construct the Invoker right away,
// but defer the invocation of scanner.Err() until the function returns.
//
// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
//
// Note that the error you're appending to from the defer statement MUST be a
// named return.
type Invoke func() error
// Invoke calls the supplied function and returns its result.
func (i Invoke) Invoke() error { return i() }
// Close builds an Invoker that closes the provided io.Closer. Use it with
// AppendInvoke to close io.Closers and append their results into an error.
//
// For example,
//
// func processFile(path string) (err error) {
// f, err := os.Open(path)
// if err != nil {
// return err
// }
// defer multierr.AppendInvoke(&err, multierr.Close(f))
// return processReader(f)
// }
//
// In this example, multierr.Close will construct the Invoker right away, but
// defer the invocation of f.Close until the function returns.
//
// defer multierr.AppendInvoke(&err, multierr.Close(f))
//
// Note that the error you're appending to from the defer statement MUST be a
// named return.
func Close(closer io.Closer) Invoker {
return Invoke(closer.Close)
}
// AppendInvoke appends the result of calling the given Invoker into the
// provided error pointer. Use it with named returns to safely defer
// invocation of fallible operations until a function returns, and capture the
// resulting errors.
//
// func doSomething(...) (err error) {
// // ...
// f, err := openFile(..)
// if err != nil {
// return err
// }
//
// // multierr will call f.Close() when this function returns and
// // if the operation fails, its append its error into the
// // returned error.
// defer multierr.AppendInvoke(&err, multierr.Close(f))
//
// scanner := bufio.NewScanner(f)
// // Similarly, this scheduled scanner.Err to be called and
// // inspected when the function returns and append its error
// // into the returned error.
// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
//
// // ...
// }
//
// NOTE: If used with a defer, the error variable MUST be a named return.
//
// Without defer, AppendInvoke behaves exactly like AppendInto.
//
// err := // ...
// multierr.AppendInvoke(&err, mutltierr.Invoke(foo))
//
// // ...is roughly equivalent to...
//
// err := // ...
// multierr.AppendInto(&err, foo())
//
// The advantage of the indirection introduced by Invoker is to make it easy
// to defer the invocation of a function. Without this indirection, the
// invoked function will be evaluated at the time of the defer block rather
// than when the function returns.
//
// // BAD: This is likely not what the caller intended. This will evaluate
// // foo() right away and append its result into the error when the
// // function returns.
// defer multierr.AppendInto(&err, foo())
//
// // GOOD: This will defer invocation of foo unutil the function returns.
// defer multierr.AppendInvoke(&err, multierr.Invoke(foo))
//
// multierr provides a few Invoker implementations out of the box for
// convenience. See [Invoker] for more information.
func AppendInvoke(into *error, invoker Invoker) {
AppendInto(into, invoker.Invoke())
}
// AppendFunc is a shorthand for [AppendInvoke].
// It allows using function or method value directly
// without having to wrap it into an [Invoker] interface.
//
// func doSomething(...) (err error) {
// w, err := startWorker(...)
// if err != nil {
// return err
// }
//
// // multierr will call w.Stop() when this function returns and
// // if the operation fails, it appends its error into the
// // returned error.
// defer multierr.AppendFunc(&err, w.Stop)
// }
func AppendFunc(into *error, fn func() error) {
AppendInvoke(into, Invoke(fn))
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/multierr/error_post_go120.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/multierr/error_post_go120.go | // Copyright (c) 2017-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:build go1.20
// +build go1.20
package multierr
// Unwrap returns a list of errors wrapped by this multierr.
func (merr *multiError) Unwrap() []error {
return merr.Errors()
}
type multipleErrors interface {
Unwrap() []error
}
func extractErrors(err error) []error {
if err == nil {
return nil
}
// check if the given err is an Unwrapable error that
// implements multipleErrors interface.
eg, ok := err.(multipleErrors)
if !ok {
return []error{err}
}
return append(([]error)(nil), eg.Unwrap()...)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/multierr/error_pre_go120.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/multierr/error_pre_go120.go | // Copyright (c) 2017-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:build !go1.20
// +build !go1.20
package multierr
import "errors"
// Versions of Go before 1.20 did not support the Unwrap() []error method.
// This provides a similar behavior by implementing the Is(..) and As(..)
// methods.
// See the errors.Join proposal for details:
// https://github.com/golang/go/issues/53435
// As attempts to find the first error in the error list that matches the type
// of the value that target points to.
//
// This function allows errors.As to traverse the values stored on the
// multierr error.
func (merr *multiError) As(target interface{}) bool {
for _, err := range merr.Errors() {
if errors.As(err, target) {
return true
}
}
return false
}
// Is attempts to match the provided error against errors in the error list.
//
// This function allows errors.Is to traverse the values stored on the
// multierr error.
func (merr *multiError) Is(target error) bool {
for _, err := range merr.Errors() {
if errors.Is(err, target) {
return true
}
}
return false
}
func extractErrors(err error) []error {
if err == nil {
return nil
}
// Note that we're casting to multiError, not errorGroup. Our contract is
// that returned errors MAY implement errorGroup. Errors, however, only
// has special behavior for multierr-specific error objects.
//
// This behavior can be expanded in the future but I think it's prudent to
// start with as little as possible in terms of contract and possibility
// of misuse.
eg, ok := err.(*multiError)
if !ok {
return []error{err}
}
return append(([]error)(nil), eg.Errors()...)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/mock/gomock/matchers.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/mock/gomock/matchers.go | // Copyright 2010 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gomock
import (
"fmt"
"reflect"
"regexp"
"strings"
)
// A Matcher is a representation of a class of values.
// It is used to represent the valid or expected arguments to a mocked method.
type Matcher interface {
// Matches returns whether x is a match.
Matches(x any) bool
// String describes what the matcher matches.
String() string
}
// WantFormatter modifies the given Matcher's String() method to the given
// Stringer. This allows for control on how the "Want" is formatted when
// printing .
func WantFormatter(s fmt.Stringer, m Matcher) Matcher {
type matcher interface {
Matches(x any) bool
}
return struct {
matcher
fmt.Stringer
}{
matcher: m,
Stringer: s,
}
}
// StringerFunc type is an adapter to allow the use of ordinary functions as
// a Stringer. If f is a function with the appropriate signature,
// StringerFunc(f) is a Stringer that calls f.
type StringerFunc func() string
// String implements fmt.Stringer.
func (f StringerFunc) String() string {
return f()
}
// GotFormatter is used to better print failure messages. If a matcher
// implements GotFormatter, it will use the result from Got when printing
// the failure message.
type GotFormatter interface {
// Got is invoked with the received value. The result is used when
// printing the failure message.
Got(got any) string
}
// GotFormatterFunc type is an adapter to allow the use of ordinary
// functions as a GotFormatter. If f is a function with the appropriate
// signature, GotFormatterFunc(f) is a GotFormatter that calls f.
type GotFormatterFunc func(got any) string
// Got implements GotFormatter.
func (f GotFormatterFunc) Got(got any) string {
return f(got)
}
// GotFormatterAdapter attaches a GotFormatter to a Matcher.
func GotFormatterAdapter(s GotFormatter, m Matcher) Matcher {
return struct {
GotFormatter
Matcher
}{
GotFormatter: s,
Matcher: m,
}
}
type anyMatcher struct{}
func (anyMatcher) Matches(any) bool {
return true
}
func (anyMatcher) String() string {
return "is anything"
}
type condMatcher[T any] struct {
fn func(x T) bool
}
func (c condMatcher[T]) Matches(x any) bool {
typed, ok := x.(T)
if !ok {
return false
}
return c.fn(typed)
}
func (c condMatcher[T]) String() string {
return "adheres to a custom condition"
}
type eqMatcher struct {
x any
}
func (e eqMatcher) Matches(x any) bool {
// In case, some value is nil
if e.x == nil || x == nil {
return reflect.DeepEqual(e.x, x)
}
// Check if types assignable and convert them to common type
x1Val := reflect.ValueOf(e.x)
x2Val := reflect.ValueOf(x)
if x1Val.Type().AssignableTo(x2Val.Type()) {
x1ValConverted := x1Val.Convert(x2Val.Type())
return reflect.DeepEqual(x1ValConverted.Interface(), x2Val.Interface())
}
return false
}
func (e eqMatcher) String() string {
return fmt.Sprintf("is equal to %s (%T)", getString(e.x), e.x)
}
type nilMatcher struct{}
func (nilMatcher) Matches(x any) bool {
if x == nil {
return true
}
v := reflect.ValueOf(x)
switch v.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map,
reflect.Ptr, reflect.Slice:
return v.IsNil()
}
return false
}
func (nilMatcher) String() string {
return "is nil"
}
type notMatcher struct {
m Matcher
}
func (n notMatcher) Matches(x any) bool {
return !n.m.Matches(x)
}
func (n notMatcher) String() string {
return "not(" + n.m.String() + ")"
}
type regexMatcher struct {
regex *regexp.Regexp
}
func (m regexMatcher) Matches(x any) bool {
switch t := x.(type) {
case string:
return m.regex.MatchString(t)
case []byte:
return m.regex.Match(t)
default:
return false
}
}
func (m regexMatcher) String() string {
return "matches regex " + m.regex.String()
}
type assignableToTypeOfMatcher struct {
targetType reflect.Type
}
func (m assignableToTypeOfMatcher) Matches(x any) bool {
return reflect.TypeOf(x).AssignableTo(m.targetType)
}
func (m assignableToTypeOfMatcher) String() string {
return "is assignable to " + m.targetType.Name()
}
type anyOfMatcher struct {
matchers []Matcher
}
func (am anyOfMatcher) Matches(x any) bool {
for _, m := range am.matchers {
if m.Matches(x) {
return true
}
}
return false
}
func (am anyOfMatcher) String() string {
ss := make([]string, 0, len(am.matchers))
for _, matcher := range am.matchers {
ss = append(ss, matcher.String())
}
return strings.Join(ss, " | ")
}
type allMatcher struct {
matchers []Matcher
}
func (am allMatcher) Matches(x any) bool {
for _, m := range am.matchers {
if !m.Matches(x) {
return false
}
}
return true
}
func (am allMatcher) String() string {
ss := make([]string, 0, len(am.matchers))
for _, matcher := range am.matchers {
ss = append(ss, matcher.String())
}
return strings.Join(ss, "; ")
}
type lenMatcher struct {
i int
}
func (m lenMatcher) Matches(x any) bool {
v := reflect.ValueOf(x)
switch v.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == m.i
default:
return false
}
}
func (m lenMatcher) String() string {
return fmt.Sprintf("has length %d", m.i)
}
type inAnyOrderMatcher struct {
x any
}
func (m inAnyOrderMatcher) Matches(x any) bool {
given, ok := m.prepareValue(x)
if !ok {
return false
}
wanted, ok := m.prepareValue(m.x)
if !ok {
return false
}
if given.Len() != wanted.Len() {
return false
}
usedFromGiven := make([]bool, given.Len())
foundFromWanted := make([]bool, wanted.Len())
for i := 0; i < wanted.Len(); i++ {
wantedMatcher := Eq(wanted.Index(i).Interface())
for j := 0; j < given.Len(); j++ {
if usedFromGiven[j] {
continue
}
if wantedMatcher.Matches(given.Index(j).Interface()) {
foundFromWanted[i] = true
usedFromGiven[j] = true
break
}
}
}
missingFromWanted := 0
for _, found := range foundFromWanted {
if !found {
missingFromWanted++
}
}
extraInGiven := 0
for _, used := range usedFromGiven {
if !used {
extraInGiven++
}
}
return extraInGiven == 0 && missingFromWanted == 0
}
func (m inAnyOrderMatcher) prepareValue(x any) (reflect.Value, bool) {
xValue := reflect.ValueOf(x)
switch xValue.Kind() {
case reflect.Slice, reflect.Array:
return xValue, true
default:
return reflect.Value{}, false
}
}
func (m inAnyOrderMatcher) String() string {
return fmt.Sprintf("has the same elements as %v", m.x)
}
// Constructors
// All returns a composite Matcher that returns true if and only all of the
// matchers return true.
func All(ms ...Matcher) Matcher { return allMatcher{ms} }
// Any returns a matcher that always matches.
func Any() Matcher { return anyMatcher{} }
// Cond returns a matcher that matches when the given function returns true
// after passing it the parameter to the mock function.
// This is particularly useful in case you want to match over a field of a custom struct, or dynamic logic.
//
// Example usage:
//
// Cond(func(x int){return x == 1}).Matches(1) // returns true
// Cond(func(x int){return x == 2}).Matches(1) // returns false
func Cond[T any](fn func(x T) bool) Matcher { return condMatcher[T]{fn} }
// AnyOf returns a composite Matcher that returns true if at least one of the
// matchers returns true.
//
// Example usage:
//
// AnyOf(1, 2, 3).Matches(2) // returns true
// AnyOf(1, 2, 3).Matches(10) // returns false
// AnyOf(Nil(), Len(2)).Matches(nil) // returns true
// AnyOf(Nil(), Len(2)).Matches("hi") // returns true
// AnyOf(Nil(), Len(2)).Matches("hello") // returns false
func AnyOf(xs ...any) Matcher {
ms := make([]Matcher, 0, len(xs))
for _, x := range xs {
if m, ok := x.(Matcher); ok {
ms = append(ms, m)
} else {
ms = append(ms, Eq(x))
}
}
return anyOfMatcher{ms}
}
// Eq returns a matcher that matches on equality.
//
// Example usage:
//
// Eq(5).Matches(5) // returns true
// Eq(5).Matches(4) // returns false
func Eq(x any) Matcher { return eqMatcher{x} }
// Len returns a matcher that matches on length. This matcher returns false if
// is compared to a type that is not an array, chan, map, slice, or string.
func Len(i int) Matcher {
return lenMatcher{i}
}
// Nil returns a matcher that matches if the received value is nil.
//
// Example usage:
//
// var x *bytes.Buffer
// Nil().Matches(x) // returns true
// x = &bytes.Buffer{}
// Nil().Matches(x) // returns false
func Nil() Matcher { return nilMatcher{} }
// Not reverses the results of its given child matcher.
//
// Example usage:
//
// Not(Eq(5)).Matches(4) // returns true
// Not(Eq(5)).Matches(5) // returns false
func Not(x any) Matcher {
if m, ok := x.(Matcher); ok {
return notMatcher{m}
}
return notMatcher{Eq(x)}
}
// Regex checks whether parameter matches the associated regex.
//
// Example usage:
//
// Regex("[0-9]{2}:[0-9]{2}").Matches("23:02") // returns true
// Regex("[0-9]{2}:[0-9]{2}").Matches([]byte{'2', '3', ':', '0', '2'}) // returns true
// Regex("[0-9]{2}:[0-9]{2}").Matches("hello world") // returns false
// Regex("[0-9]{2}").Matches(21) // returns false as it's not a valid type
func Regex(regexStr string) Matcher {
return regexMatcher{regex: regexp.MustCompile(regexStr)}
}
// AssignableToTypeOf is a Matcher that matches if the parameter to the mock
// function is assignable to the type of the parameter to this function.
//
// Example usage:
//
// var s fmt.Stringer = &bytes.Buffer{}
// AssignableToTypeOf(s).Matches(time.Second) // returns true
// AssignableToTypeOf(s).Matches(99) // returns false
//
// var ctx = reflect.TypeOf((*context.Context)(nil)).Elem()
// AssignableToTypeOf(ctx).Matches(context.Background()) // returns true
func AssignableToTypeOf(x any) Matcher {
if xt, ok := x.(reflect.Type); ok {
return assignableToTypeOfMatcher{xt}
}
return assignableToTypeOfMatcher{reflect.TypeOf(x)}
}
// InAnyOrder is a Matcher that returns true for collections of the same elements ignoring the order.
//
// Example usage:
//
// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 3, 2}) // returns true
// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 2}) // returns false
func InAnyOrder(x any) Matcher {
return inAnyOrderMatcher{x}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/mock/gomock/callset.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/mock/gomock/callset.go | // Copyright 2011 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gomock
import (
"bytes"
"errors"
"fmt"
"sync"
)
// callSet represents a set of expected calls, indexed by receiver and method
// name.
type callSet struct {
// Calls that are still expected.
expected map[callSetKey][]*Call
expectedMu *sync.Mutex
// Calls that have been exhausted.
exhausted map[callSetKey][]*Call
// when set to true, existing call expectations are overridden when new call expectations are made
allowOverride bool
}
// callSetKey is the key in the maps in callSet
type callSetKey struct {
receiver any
fname string
}
func newCallSet() *callSet {
return &callSet{
expected: make(map[callSetKey][]*Call),
expectedMu: &sync.Mutex{},
exhausted: make(map[callSetKey][]*Call),
}
}
func newOverridableCallSet() *callSet {
return &callSet{
expected: make(map[callSetKey][]*Call),
expectedMu: &sync.Mutex{},
exhausted: make(map[callSetKey][]*Call),
allowOverride: true,
}
}
// Add adds a new expected call.
func (cs callSet) Add(call *Call) {
key := callSetKey{call.receiver, call.method}
cs.expectedMu.Lock()
defer cs.expectedMu.Unlock()
m := cs.expected
if call.exhausted() {
m = cs.exhausted
}
if cs.allowOverride {
m[key] = make([]*Call, 0)
}
m[key] = append(m[key], call)
}
// Remove removes an expected call.
func (cs callSet) Remove(call *Call) {
key := callSetKey{call.receiver, call.method}
cs.expectedMu.Lock()
defer cs.expectedMu.Unlock()
calls := cs.expected[key]
for i, c := range calls {
if c == call {
// maintain order for remaining calls
cs.expected[key] = append(calls[:i], calls[i+1:]...)
cs.exhausted[key] = append(cs.exhausted[key], call)
break
}
}
}
// FindMatch searches for a matching call. Returns error with explanation message if no call matched.
func (cs callSet) FindMatch(receiver any, method string, args []any) (*Call, error) {
key := callSetKey{receiver, method}
cs.expectedMu.Lock()
defer cs.expectedMu.Unlock()
// Search through the expected calls.
expected := cs.expected[key]
var callsErrors bytes.Buffer
for _, call := range expected {
err := call.matches(args)
if err != nil {
_, _ = fmt.Fprintf(&callsErrors, "\n%v", err)
} else {
return call, nil
}
}
// If we haven't found a match then search through the exhausted calls so we
// get useful error messages.
exhausted := cs.exhausted[key]
for _, call := range exhausted {
if err := call.matches(args); err != nil {
_, _ = fmt.Fprintf(&callsErrors, "\n%v", err)
continue
}
_, _ = fmt.Fprintf(
&callsErrors, "all expected calls for method %q have been exhausted", method,
)
}
if len(expected)+len(exhausted) == 0 {
_, _ = fmt.Fprintf(&callsErrors, "there are no expected calls of the method %q for that receiver", method)
}
return nil, errors.New(callsErrors.String())
}
// Failures returns the calls that are not satisfied.
func (cs callSet) Failures() []*Call {
cs.expectedMu.Lock()
defer cs.expectedMu.Unlock()
failures := make([]*Call, 0, len(cs.expected))
for _, calls := range cs.expected {
for _, call := range calls {
if !call.satisfied() {
failures = append(failures, call)
}
}
}
return failures
}
// Satisfied returns true in case all expected calls in this callSet are satisfied.
func (cs callSet) Satisfied() bool {
cs.expectedMu.Lock()
defer cs.expectedMu.Unlock()
for _, calls := range cs.expected {
for _, call := range calls {
if !call.satisfied() {
return false
}
}
}
return true
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/mock/gomock/call.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/mock/gomock/call.go | // Copyright 2010 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gomock
import (
"fmt"
"reflect"
"strconv"
"strings"
)
// Call represents an expected call to a mock.
type Call struct {
t TestHelper // for triggering test failures on invalid call setup
receiver any // the receiver of the method call
method string // the name of the method
methodType reflect.Type // the type of the method
args []Matcher // the args
origin string // file and line number of call setup
preReqs []*Call // prerequisite calls
// Expectations
minCalls, maxCalls int
numCalls int // actual number made
// actions are called when this Call is called. Each action gets the args and
// can set the return values by returning a non-nil slice. Actions run in the
// order they are created.
actions []func([]any) []any
}
// newCall creates a *Call. It requires the method type in order to support
// unexported methods.
func newCall(t TestHelper, receiver any, method string, methodType reflect.Type, args ...any) *Call {
t.Helper()
// TODO: check arity, types.
mArgs := make([]Matcher, len(args))
for i, arg := range args {
if m, ok := arg.(Matcher); ok {
mArgs[i] = m
} else if arg == nil {
// Handle nil specially so that passing a nil interface value
// will match the typed nils of concrete args.
mArgs[i] = Nil()
} else {
mArgs[i] = Eq(arg)
}
}
// callerInfo's skip should be updated if the number of calls between the user's test
// and this line changes, i.e. this code is wrapped in another anonymous function.
// 0 is us, 1 is RecordCallWithMethodType(), 2 is the generated recorder, and 3 is the user's test.
origin := callerInfo(3)
actions := []func([]any) []any{func([]any) []any {
// Synthesize the zero value for each of the return args' types.
rets := make([]any, methodType.NumOut())
for i := 0; i < methodType.NumOut(); i++ {
rets[i] = reflect.Zero(methodType.Out(i)).Interface()
}
return rets
}}
return &Call{
t: t, receiver: receiver, method: method, methodType: methodType,
args: mArgs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions,
}
}
// AnyTimes allows the expectation to be called 0 or more times
func (c *Call) AnyTimes() *Call {
c.minCalls, c.maxCalls = 0, 1e8 // close enough to infinity
return c
}
// MinTimes requires the call to occur at least n times. If AnyTimes or MaxTimes have not been called or if MaxTimes
// was previously called with 1, MinTimes also sets the maximum number of calls to infinity.
func (c *Call) MinTimes(n int) *Call {
c.minCalls = n
if c.maxCalls == 1 {
c.maxCalls = 1e8
}
return c
}
// MaxTimes limits the number of calls to n times. If AnyTimes or MinTimes have not been called or if MinTimes was
// previously called with 1, MaxTimes also sets the minimum number of calls to 0.
func (c *Call) MaxTimes(n int) *Call {
c.maxCalls = n
if c.minCalls == 1 {
c.minCalls = 0
}
return c
}
// DoAndReturn declares the action to run when the call is matched.
// The return values from this function are returned by the mocked function.
// It takes an any argument to support n-arity functions.
// The anonymous function must match the function signature mocked method.
func (c *Call) DoAndReturn(f any) *Call {
// TODO: Check arity and types here, rather than dying badly elsewhere.
v := reflect.ValueOf(f)
c.addAction(func(args []any) []any {
c.t.Helper()
ft := v.Type()
if c.methodType.NumIn() != ft.NumIn() {
if ft.IsVariadic() {
c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v The function signature must match the mocked method, a variadic function cannot be used.",
c.receiver, c.method)
} else {
c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v: got %d, want %d [%s]",
c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin)
}
return nil
}
vArgs := make([]reflect.Value, len(args))
for i := 0; i < len(args); i++ {
if args[i] != nil {
vArgs[i] = reflect.ValueOf(args[i])
} else {
// Use the zero value for the arg.
vArgs[i] = reflect.Zero(ft.In(i))
}
}
vRets := v.Call(vArgs)
rets := make([]any, len(vRets))
for i, ret := range vRets {
rets[i] = ret.Interface()
}
return rets
})
return c
}
// Do declares the action to run when the call is matched. The function's
// return values are ignored to retain backward compatibility. To use the
// return values call DoAndReturn.
// It takes an any argument to support n-arity functions.
// The anonymous function must match the function signature mocked method.
func (c *Call) Do(f any) *Call {
// TODO: Check arity and types here, rather than dying badly elsewhere.
v := reflect.ValueOf(f)
c.addAction(func(args []any) []any {
c.t.Helper()
ft := v.Type()
if c.methodType.NumIn() != ft.NumIn() {
if ft.IsVariadic() {
c.t.Fatalf("wrong number of arguments in Do func for %T.%v The function signature must match the mocked method, a variadic function cannot be used.",
c.receiver, c.method)
} else {
c.t.Fatalf("wrong number of arguments in Do func for %T.%v: got %d, want %d [%s]",
c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin)
}
return nil
}
vArgs := make([]reflect.Value, len(args))
for i := 0; i < len(args); i++ {
if args[i] != nil {
vArgs[i] = reflect.ValueOf(args[i])
} else {
// Use the zero value for the arg.
vArgs[i] = reflect.Zero(ft.In(i))
}
}
v.Call(vArgs)
return nil
})
return c
}
// Return declares the values to be returned by the mocked function call.
func (c *Call) Return(rets ...any) *Call {
c.t.Helper()
mt := c.methodType
if len(rets) != mt.NumOut() {
c.t.Fatalf("wrong number of arguments to Return for %T.%v: got %d, want %d [%s]",
c.receiver, c.method, len(rets), mt.NumOut(), c.origin)
}
for i, ret := range rets {
if got, want := reflect.TypeOf(ret), mt.Out(i); got == want {
// Identical types; nothing to do.
} else if got == nil {
// Nil needs special handling.
switch want.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
// ok
default:
c.t.Fatalf("argument %d to Return for %T.%v is nil, but %v is not nillable [%s]",
i, c.receiver, c.method, want, c.origin)
}
} else if got.AssignableTo(want) {
// Assignable type relation. Make the assignment now so that the generated code
// can return the values with a type assertion.
v := reflect.New(want).Elem()
v.Set(reflect.ValueOf(ret))
rets[i] = v.Interface()
} else {
c.t.Fatalf("wrong type of argument %d to Return for %T.%v: %v is not assignable to %v [%s]",
i, c.receiver, c.method, got, want, c.origin)
}
}
c.addAction(func([]any) []any {
return rets
})
return c
}
// Times declares the exact number of times a function call is expected to be executed.
func (c *Call) Times(n int) *Call {
c.minCalls, c.maxCalls = n, n
return c
}
// SetArg declares an action that will set the nth argument's value,
// indirected through a pointer. Or, in the case of a slice and map, SetArg
// will copy value's elements/key-value pairs into the nth argument.
func (c *Call) SetArg(n int, value any) *Call {
c.t.Helper()
mt := c.methodType
// TODO: This will break on variadic methods.
// We will need to check those at invocation time.
if n < 0 || n >= mt.NumIn() {
c.t.Fatalf("SetArg(%d, ...) called for a method with %d args [%s]",
n, mt.NumIn(), c.origin)
}
// Permit setting argument through an interface.
// In the interface case, we don't (nay, can't) check the type here.
at := mt.In(n)
switch at.Kind() {
case reflect.Ptr:
dt := at.Elem()
if vt := reflect.TypeOf(value); !vt.AssignableTo(dt) {
c.t.Fatalf("SetArg(%d, ...) argument is a %v, not assignable to %v [%s]",
n, vt, dt, c.origin)
}
case reflect.Interface, reflect.Slice, reflect.Map:
// nothing to do
default:
c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface non-slice non-map type %v [%s]",
n, at, c.origin)
}
c.addAction(func(args []any) []any {
v := reflect.ValueOf(value)
switch reflect.TypeOf(args[n]).Kind() {
case reflect.Slice:
setSlice(args[n], v)
case reflect.Map:
setMap(args[n], v)
default:
reflect.ValueOf(args[n]).Elem().Set(v)
}
return nil
})
return c
}
// isPreReq returns true if other is a direct or indirect prerequisite to c.
func (c *Call) isPreReq(other *Call) bool {
for _, preReq := range c.preReqs {
if other == preReq || preReq.isPreReq(other) {
return true
}
}
return false
}
// After declares that the call may only match after preReq has been exhausted.
func (c *Call) After(preReq *Call) *Call {
c.t.Helper()
if c == preReq {
c.t.Fatalf("A call isn't allowed to be its own prerequisite")
}
if preReq.isPreReq(c) {
c.t.Fatalf("Loop in call order: %v is a prerequisite to %v (possibly indirectly).", c, preReq)
}
c.preReqs = append(c.preReqs, preReq)
return c
}
// Returns true if the minimum number of calls have been made.
func (c *Call) satisfied() bool {
return c.numCalls >= c.minCalls
}
// Returns true if the maximum number of calls have been made.
func (c *Call) exhausted() bool {
return c.numCalls >= c.maxCalls
}
func (c *Call) String() string {
args := make([]string, len(c.args))
for i, arg := range c.args {
args[i] = arg.String()
}
arguments := strings.Join(args, ", ")
return fmt.Sprintf("%T.%v(%s) %s", c.receiver, c.method, arguments, c.origin)
}
// Tests if the given call matches the expected call.
// If yes, returns nil. If no, returns error with message explaining why it does not match.
func (c *Call) matches(args []any) error {
if !c.methodType.IsVariadic() {
if len(args) != len(c.args) {
return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d",
c.origin, len(args), len(c.args))
}
for i, m := range c.args {
if !m.Matches(args[i]) {
return fmt.Errorf(
"expected call at %s doesn't match the argument at index %d.\nGot: %v\nWant: %v",
c.origin, i, formatGottenArg(m, args[i]), m,
)
}
}
} else {
if len(c.args) < c.methodType.NumIn()-1 {
return fmt.Errorf("expected call at %s has the wrong number of matchers. Got: %d, want: %d",
c.origin, len(c.args), c.methodType.NumIn()-1)
}
if len(c.args) != c.methodType.NumIn() && len(args) != len(c.args) {
return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d",
c.origin, len(args), len(c.args))
}
if len(args) < len(c.args)-1 {
return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: greater than or equal to %d",
c.origin, len(args), len(c.args)-1)
}
for i, m := range c.args {
if i < c.methodType.NumIn()-1 {
// Non-variadic args
if !m.Matches(args[i]) {
return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
c.origin, strconv.Itoa(i), formatGottenArg(m, args[i]), m)
}
continue
}
// The last arg has a possibility of a variadic argument, so let it branch
// sample: Foo(a int, b int, c ...int)
if i < len(c.args) && i < len(args) {
if m.Matches(args[i]) {
// Got Foo(a, b, c) want Foo(matcherA, matcherB, gomock.Any())
// Got Foo(a, b, c) want Foo(matcherA, matcherB, someSliceMatcher)
// Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC)
// Got Foo(a, b) want Foo(matcherA, matcherB)
// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD)
continue
}
}
// The number of actual args don't match the number of matchers,
// or the last matcher is a slice and the last arg is not.
// If this function still matches it is because the last matcher
// matches all the remaining arguments or the lack of any.
// Convert the remaining arguments, if any, into a slice of the
// expected type.
vArgsType := c.methodType.In(c.methodType.NumIn() - 1)
vArgs := reflect.MakeSlice(vArgsType, 0, len(args)-i)
for _, arg := range args[i:] {
vArgs = reflect.Append(vArgs, reflect.ValueOf(arg))
}
if m.Matches(vArgs.Interface()) {
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, gomock.Any())
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, someSliceMatcher)
// Got Foo(a, b) want Foo(matcherA, matcherB, gomock.Any())
// Got Foo(a, b) want Foo(matcherA, matcherB, someEmptySliceMatcher)
break
}
// Wrong number of matchers or not match. Fail.
// Got Foo(a, b) want Foo(matcherA, matcherB, matcherC, matcherD)
// Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC, matcherD)
// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE)
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD)
// Got Foo(a, b, c) want Foo(matcherA, matcherB)
return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
c.origin, strconv.Itoa(i), formatGottenArg(m, args[i:]), c.args[i])
}
}
// Check that all prerequisite calls have been satisfied.
for _, preReqCall := range c.preReqs {
if !preReqCall.satisfied() {
return fmt.Errorf("expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v",
c.origin, preReqCall, c)
}
}
// Check that the call is not exhausted.
if c.exhausted() {
return fmt.Errorf("expected call at %s has already been called the max number of times", c.origin)
}
return nil
}
// dropPrereqs tells the expected Call to not re-check prerequisite calls any
// longer, and to return its current set.
func (c *Call) dropPrereqs() (preReqs []*Call) {
preReqs = c.preReqs
c.preReqs = nil
return
}
func (c *Call) call() []func([]any) []any {
c.numCalls++
return c.actions
}
// InOrder declares that the given calls should occur in order.
// It panics if the type of any of the arguments isn't *Call or a generated
// mock with an embedded *Call.
func InOrder(args ...any) {
calls := make([]*Call, 0, len(args))
for i := 0; i < len(args); i++ {
if call := getCall(args[i]); call != nil {
calls = append(calls, call)
continue
}
panic(fmt.Sprintf(
"invalid argument at position %d of type %T, InOrder expects *gomock.Call or generated mock types with an embedded *gomock.Call",
i,
args[i],
))
}
for i := 1; i < len(calls); i++ {
calls[i].After(calls[i-1])
}
}
// getCall checks if the parameter is a *Call or a generated struct
// that wraps a *Call and returns the *Call pointer - if neither, it returns nil.
func getCall(arg any) *Call {
if call, ok := arg.(*Call); ok {
return call
}
t := reflect.ValueOf(arg)
if t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface {
return nil
}
t = t.Elem()
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if !f.CanInterface() {
continue
}
if call, ok := f.Interface().(*Call); ok {
return call
}
}
return nil
}
func setSlice(arg any, v reflect.Value) {
va := reflect.ValueOf(arg)
for i := 0; i < v.Len(); i++ {
va.Index(i).Set(v.Index(i))
}
}
func setMap(arg any, v reflect.Value) {
va := reflect.ValueOf(arg)
for _, e := range va.MapKeys() {
va.SetMapIndex(e, reflect.Value{})
}
for _, e := range v.MapKeys() {
va.SetMapIndex(e, v.MapIndex(e))
}
}
func (c *Call) addAction(action func([]any) []any) {
c.actions = append(c.actions, action)
}
func formatGottenArg(m Matcher, arg any) string {
got := fmt.Sprintf("%v (%T)", arg, arg)
if gs, ok := m.(GotFormatter); ok {
got = gs.Got(arg)
}
return got
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/mock/gomock/string.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/mock/gomock/string.go | package gomock
import (
"fmt"
"reflect"
)
// getString is a safe way to convert a value to a string for printing results
// If the value is a a mock, getString avoids calling the mocked String() method,
// which avoids potential deadlocks
func getString(x any) string {
if isGeneratedMock(x) {
return fmt.Sprintf("%T", x)
}
if s, ok := x.(fmt.Stringer); ok {
return s.String()
}
return fmt.Sprintf("%v", x)
}
// isGeneratedMock checks if the given type has a "isgomock" field,
// indicating it is a generated mock.
func isGeneratedMock(x any) bool {
typ := reflect.TypeOf(x)
if typ == nil {
return false
}
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
if typ.Kind() != reflect.Struct {
return false
}
_, isgomock := typ.FieldByName("isgomock")
return isgomock
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/mock/gomock/controller.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/mock/gomock/controller.go | // Copyright 2010 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gomock
import (
"context"
"fmt"
"reflect"
"runtime"
"sync"
)
// A TestReporter is something that can be used to report test failures. It
// is satisfied by the standard library's *testing.T.
type TestReporter interface {
Errorf(format string, args ...any)
Fatalf(format string, args ...any)
}
// TestHelper is a TestReporter that has the Helper method. It is satisfied
// by the standard library's *testing.T.
type TestHelper interface {
TestReporter
Helper()
}
// cleanuper is used to check if TestHelper also has the `Cleanup` method. A
// common pattern is to pass in a `*testing.T` to
// `NewController(t TestReporter)`. In Go 1.14+, `*testing.T` has a cleanup
// method. This can be utilized to call `Finish()` so the caller of this library
// does not have to.
type cleanuper interface {
Cleanup(func())
}
// A Controller represents the top-level control of a mock ecosystem. It
// defines the scope and lifetime of mock objects, as well as their
// expectations. It is safe to call Controller's methods from multiple
// goroutines. Each test should create a new Controller.
//
// func TestFoo(t *testing.T) {
// ctrl := gomock.NewController(t)
// // ..
// }
//
// func TestBar(t *testing.T) {
// t.Run("Sub-Test-1", st) {
// ctrl := gomock.NewController(st)
// // ..
// })
// t.Run("Sub-Test-2", st) {
// ctrl := gomock.NewController(st)
// // ..
// })
// })
type Controller struct {
// T should only be called within a generated mock. It is not intended to
// be used in user code and may be changed in future versions. T is the
// TestReporter passed in when creating the Controller via NewController.
// If the TestReporter does not implement a TestHelper it will be wrapped
// with a nopTestHelper.
T TestHelper
mu sync.Mutex
expectedCalls *callSet
finished bool
}
// NewController returns a new Controller. It is the preferred way to create a Controller.
//
// Passing [*testing.T] registers cleanup function to automatically call [Controller.Finish]
// when the test and all its subtests complete.
func NewController(t TestReporter, opts ...ControllerOption) *Controller {
h, ok := t.(TestHelper)
if !ok {
h = &nopTestHelper{t}
}
ctrl := &Controller{
T: h,
expectedCalls: newCallSet(),
}
for _, opt := range opts {
opt.apply(ctrl)
}
if c, ok := isCleanuper(ctrl.T); ok {
c.Cleanup(func() {
ctrl.T.Helper()
ctrl.finish(true, nil)
})
}
return ctrl
}
// ControllerOption configures how a Controller should behave.
type ControllerOption interface {
apply(*Controller)
}
type overridableExpectationsOption struct{}
// WithOverridableExpectations allows for overridable call expectations
// i.e., subsequent call expectations override existing call expectations
func WithOverridableExpectations() overridableExpectationsOption {
return overridableExpectationsOption{}
}
func (o overridableExpectationsOption) apply(ctrl *Controller) {
ctrl.expectedCalls = newOverridableCallSet()
}
type cancelReporter struct {
t TestHelper
cancel func()
}
func (r *cancelReporter) Errorf(format string, args ...any) {
r.t.Errorf(format, args...)
}
func (r *cancelReporter) Fatalf(format string, args ...any) {
defer r.cancel()
r.t.Fatalf(format, args...)
}
func (r *cancelReporter) Helper() {
r.t.Helper()
}
// WithContext returns a new Controller and a Context, which is cancelled on any
// fatal failure.
func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) {
h, ok := t.(TestHelper)
if !ok {
h = &nopTestHelper{t: t}
}
ctx, cancel := context.WithCancel(ctx)
return NewController(&cancelReporter{t: h, cancel: cancel}), ctx
}
type nopTestHelper struct {
t TestReporter
}
func (h *nopTestHelper) Errorf(format string, args ...any) {
h.t.Errorf(format, args...)
}
func (h *nopTestHelper) Fatalf(format string, args ...any) {
h.t.Fatalf(format, args...)
}
func (h nopTestHelper) Helper() {}
// RecordCall is called by a mock. It should not be called by user code.
func (ctrl *Controller) RecordCall(receiver any, method string, args ...any) *Call {
ctrl.T.Helper()
recv := reflect.ValueOf(receiver)
for i := 0; i < recv.Type().NumMethod(); i++ {
if recv.Type().Method(i).Name == method {
return ctrl.RecordCallWithMethodType(receiver, method, recv.Method(i).Type(), args...)
}
}
ctrl.T.Fatalf("gomock: failed finding method %s on %T", method, receiver)
panic("unreachable")
}
// RecordCallWithMethodType is called by a mock. It should not be called by user code.
func (ctrl *Controller) RecordCallWithMethodType(receiver any, method string, methodType reflect.Type, args ...any) *Call {
ctrl.T.Helper()
call := newCall(ctrl.T, receiver, method, methodType, args...)
ctrl.mu.Lock()
defer ctrl.mu.Unlock()
ctrl.expectedCalls.Add(call)
return call
}
// Call is called by a mock. It should not be called by user code.
func (ctrl *Controller) Call(receiver any, method string, args ...any) []any {
ctrl.T.Helper()
// Nest this code so we can use defer to make sure the lock is released.
actions := func() []func([]any) []any {
ctrl.T.Helper()
ctrl.mu.Lock()
defer ctrl.mu.Unlock()
expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args)
if err != nil {
// callerInfo's skip should be updated if the number of calls between the user's test
// and this line changes, i.e. this code is wrapped in another anonymous function.
// 0 is us, 1 is controller.Call(), 2 is the generated mock, and 3 is the user's test.
origin := callerInfo(3)
stringArgs := make([]string, len(args))
for i, arg := range args {
stringArgs[i] = getString(arg)
}
ctrl.T.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, stringArgs, origin, err)
}
// Two things happen here:
// * the matching call no longer needs to check prerequisite calls,
// * and the prerequisite calls are no longer expected, so remove them.
preReqCalls := expected.dropPrereqs()
for _, preReqCall := range preReqCalls {
ctrl.expectedCalls.Remove(preReqCall)
}
actions := expected.call()
if expected.exhausted() {
ctrl.expectedCalls.Remove(expected)
}
return actions
}()
var rets []any
for _, action := range actions {
if r := action(args); r != nil {
rets = r
}
}
return rets
}
// Finish checks to see if all the methods that were expected to be called were called.
// It is not idempotent and therefore can only be invoked once.
//
// Note: If you pass a *testing.T into [NewController], you no longer
// need to call ctrl.Finish() in your test methods.
func (ctrl *Controller) Finish() {
// If we're currently panicking, probably because this is a deferred call.
// This must be recovered in the deferred function.
err := recover()
ctrl.finish(false, err)
}
// Satisfied returns whether all expected calls bound to this Controller have been satisfied.
// Calling Finish is then guaranteed to not fail due to missing calls.
func (ctrl *Controller) Satisfied() bool {
ctrl.mu.Lock()
defer ctrl.mu.Unlock()
return ctrl.expectedCalls.Satisfied()
}
func (ctrl *Controller) finish(cleanup bool, panicErr any) {
ctrl.T.Helper()
ctrl.mu.Lock()
defer ctrl.mu.Unlock()
if ctrl.finished {
if _, ok := isCleanuper(ctrl.T); !ok {
ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.")
}
return
}
ctrl.finished = true
// Short-circuit, pass through the panic.
if panicErr != nil {
panic(panicErr)
}
// Check that all remaining expected calls are satisfied.
failures := ctrl.expectedCalls.Failures()
for _, call := range failures {
ctrl.T.Errorf("missing call(s) to %v", call)
}
if len(failures) != 0 {
if !cleanup {
ctrl.T.Fatalf("aborting test due to missing call(s)")
return
}
ctrl.T.Errorf("aborting test due to missing call(s)")
}
}
// callerInfo returns the file:line of the call site. skip is the number
// of stack frames to skip when reporting. 0 is callerInfo's call site.
func callerInfo(skip int) string {
if _, file, line, ok := runtime.Caller(skip + 1); ok {
return fmt.Sprintf("%s:%d", file, line)
}
return "unknown file"
}
// isCleanuper checks it if t's base TestReporter has a Cleanup method.
func isCleanuper(t TestReporter) (cleanuper, bool) {
tr := unwrapTestReporter(t)
c, ok := tr.(cleanuper)
return c, ok
}
// unwrapTestReporter unwraps TestReporter to the base implementation.
func unwrapTestReporter(t TestReporter) TestReporter {
tr := t
switch nt := t.(type) {
case *cancelReporter:
tr = nt.t
if h, check := tr.(*nopTestHelper); check {
tr = h.t
}
case *nopTestHelper:
tr = nt.t
default:
// not wrapped
}
return tr
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/mock/gomock/doc.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/mock/gomock/doc.go | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package gomock is a mock framework for Go.
//
// Standard usage:
//
// (1) Define an interface that you wish to mock.
// type MyInterface interface {
// SomeMethod(x int64, y string)
// }
// (2) Use mockgen to generate a mock from the interface.
// (3) Use the mock in a test:
// func TestMyThing(t *testing.T) {
// mockCtrl := gomock.NewController(t)
// mockObj := something.NewMockMyInterface(mockCtrl)
// mockObj.EXPECT().SomeMethod(4, "blah")
// // pass mockObj to a real object and play with it.
// }
//
// By default, expected calls are not enforced to run in any particular order.
// Call order dependency can be enforced by use of InOrder and/or Call.After.
// Call.After can create more varied call order dependencies, but InOrder is
// often more convenient.
//
// The following examples create equivalent call order dependencies.
//
// Example of using Call.After to chain expected call order:
//
// firstCall := mockObj.EXPECT().SomeMethod(1, "first")
// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall)
// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall)
//
// Example of using InOrder to declare expected call order:
//
// gomock.InOrder(
// mockObj.EXPECT().SomeMethod(1, "first"),
// mockObj.EXPECT().SomeMethod(2, "second"),
// mockObj.EXPECT().SomeMethod(3, "third"),
// )
//
// The standard TestReporter most users will pass to `NewController` is a
// `*testing.T` from the context of the test. Note that this will use the
// standard `t.Error` and `t.Fatal` methods to report what happened in the test.
// In some cases this can leave your testing package in a weird state if global
// state is used since `t.Fatal` is like calling panic in the middle of a
// function. In these cases it is recommended that you pass in your own
// `TestReporter`.
package gomock
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/automaxprocs.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/automaxprocs.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package automaxprocs automatically sets GOMAXPROCS to match the Linux
// container CPU quota, if any.
package automaxprocs // import "go.uber.org/automaxprocs"
import (
"log"
"go.uber.org/automaxprocs/maxprocs"
)
func init() {
maxprocs.Set(maxprocs.Logger(log.Printf))
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to
// match the configured Linux CPU quota. Unlike the top-level automaxprocs
// package, it lets the caller configure logging and handle errors.
package maxprocs // import "go.uber.org/automaxprocs/maxprocs"
import (
"os"
"runtime"
iruntime "go.uber.org/automaxprocs/internal/runtime"
)
const _maxProcsKey = "GOMAXPROCS"
func currentMaxProcs() int {
return runtime.GOMAXPROCS(0)
}
type config struct {
printf func(string, ...interface{})
procs func(int, func(v float64) int) (int, iruntime.CPUQuotaStatus, error)
minGOMAXPROCS int
roundQuotaFunc func(v float64) int
}
func (c *config) log(fmt string, args ...interface{}) {
if c.printf != nil {
c.printf(fmt, args...)
}
}
// An Option alters the behavior of Set.
type Option interface {
apply(*config)
}
// Logger uses the supplied printf implementation for log output. By default,
// Set doesn't log anything.
func Logger(printf func(string, ...interface{})) Option {
return optionFunc(func(cfg *config) {
cfg.printf = printf
})
}
// Min sets the minimum GOMAXPROCS value that will be used.
// Any value below 1 is ignored.
func Min(n int) Option {
return optionFunc(func(cfg *config) {
if n >= 1 {
cfg.minGOMAXPROCS = n
}
})
}
// RoundQuotaFunc sets the function that will be used to covert the CPU quota from float to int.
func RoundQuotaFunc(rf func(v float64) int) Option {
return optionFunc(func(cfg *config) {
cfg.roundQuotaFunc = rf
})
}
type optionFunc func(*config)
func (of optionFunc) apply(cfg *config) { of(cfg) }
// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning
// any error encountered and an undo function.
//
// Set is a no-op on non-Linux systems and in Linux environments without a
// configured CPU quota.
func Set(opts ...Option) (func(), error) {
cfg := &config{
procs: iruntime.CPUQuotaToGOMAXPROCS,
roundQuotaFunc: iruntime.DefaultRoundFunc,
minGOMAXPROCS: 1,
}
for _, o := range opts {
o.apply(cfg)
}
undoNoop := func() {
cfg.log("maxprocs: No GOMAXPROCS change to reset")
}
// Honor the GOMAXPROCS environment variable if present. Otherwise, amend
// `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is
// Linux, and guarantee a minimum value of 1. The minimum guaranteed value
// can be overridden using `maxprocs.Min()`.
if max, exists := os.LookupEnv(_maxProcsKey); exists {
cfg.log("maxprocs: Honoring GOMAXPROCS=%q as set in environment", max)
return undoNoop, nil
}
maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc)
if err != nil {
return undoNoop, err
}
if status == iruntime.CPUQuotaUndefined {
cfg.log("maxprocs: Leaving GOMAXPROCS=%v: CPU quota undefined", currentMaxProcs())
return undoNoop, nil
}
prev := currentMaxProcs()
undo := func() {
cfg.log("maxprocs: Resetting GOMAXPROCS to %v", prev)
runtime.GOMAXPROCS(prev)
}
switch status {
case iruntime.CPUQuotaMinUsed:
cfg.log("maxprocs: Updating GOMAXPROCS=%v: using minimum allowed GOMAXPROCS", maxProcs)
case iruntime.CPUQuotaUsed:
cfg.log("maxprocs: Updating GOMAXPROCS=%v: determined from CPU quota", maxProcs)
}
runtime.GOMAXPROCS(maxProcs)
return undo, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/maxprocs/version.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/maxprocs/version.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package maxprocs
// Version is the current package version.
const Version = "1.6.0"
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:build !linux
// +build !linux
package runtime
// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
// to a valid GOMAXPROCS value. This is Linux-specific and not supported in the
// current OS.
func CPUQuotaToGOMAXPROCS(_ int, _ func(v float64) int) (int, CPUQuotaStatus, error) {
return -1, CPUQuotaUndefined, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:build linux
// +build linux
package runtime
import (
"errors"
cg "go.uber.org/automaxprocs/internal/cgroups"
)
// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
// to a valid GOMAXPROCS value. The quota is converted from float to int using round.
// If round == nil, DefaultRoundFunc is used.
func CPUQuotaToGOMAXPROCS(minValue int, round func(v float64) int) (int, CPUQuotaStatus, error) {
if round == nil {
round = DefaultRoundFunc
}
cgroups, err := _newQueryer()
if err != nil {
return -1, CPUQuotaUndefined, err
}
quota, defined, err := cgroups.CPUQuota()
if !defined || err != nil {
return -1, CPUQuotaUndefined, err
}
maxProcs := round(quota)
if minValue > 0 && maxProcs < minValue {
return minValue, CPUQuotaMinUsed, nil
}
return maxProcs, CPUQuotaUsed, nil
}
type queryer interface {
CPUQuota() (float64, bool, error)
}
var (
_newCgroups2 = cg.NewCGroups2ForCurrentProcess
_newCgroups = cg.NewCGroupsForCurrentProcess
_newQueryer = newQueryer
)
func newQueryer() (queryer, error) {
cgroups, err := _newCgroups2()
if err == nil {
return cgroups, nil
}
if errors.Is(err, cg.ErrNotV2) {
return _newCgroups()
}
return nil, err
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package runtime
import "math"
// CPUQuotaStatus presents the status of how CPU quota is used
type CPUQuotaStatus int
const (
// CPUQuotaUndefined is returned when CPU quota is undefined
CPUQuotaUndefined CPUQuotaStatus = iota
// CPUQuotaUsed is returned when a valid CPU quota can be used
CPUQuotaUsed
// CPUQuotaMinUsed is returned when CPU quota is smaller than the min value
CPUQuotaMinUsed
)
// DefaultRoundFunc is the default function to convert CPU quota from float to int. It rounds the value down (floor).
func DefaultRoundFunc(v float64) int {
return int(math.Floor(v))
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:build linux
// +build linux
package cgroups
import (
"bufio"
"io"
"os"
"path/filepath"
"strconv"
)
// CGroup represents the data structure for a Linux control group.
type CGroup struct {
path string
}
// NewCGroup returns a new *CGroup from a given path.
func NewCGroup(path string) *CGroup {
return &CGroup{path: path}
}
// Path returns the path of the CGroup*.
func (cg *CGroup) Path() string {
return cg.path
}
// ParamPath returns the path of the given cgroup param under itself.
func (cg *CGroup) ParamPath(param string) string {
return filepath.Join(cg.path, param)
}
// readFirstLine reads the first line from a cgroup param file.
func (cg *CGroup) readFirstLine(param string) (string, error) {
paramFile, err := os.Open(cg.ParamPath(param))
if err != nil {
return "", err
}
defer paramFile.Close()
scanner := bufio.NewScanner(paramFile)
if scanner.Scan() {
return scanner.Text(), nil
}
if err := scanner.Err(); err != nil {
return "", err
}
return "", io.ErrUnexpectedEOF
}
// readInt parses the first line from a cgroup param file as int.
func (cg *CGroup) readInt(param string) (int, error) {
text, err := cg.readFirstLine(param)
if err != nil {
return 0, err
}
return strconv.Atoi(text)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:build linux
// +build linux
package cgroups
import (
"bufio"
"os"
"path/filepath"
"strconv"
"strings"
)
const (
_mountInfoSep = " "
_mountInfoOptsSep = ","
_mountInfoOptionalFieldsSep = "-"
)
const (
_miFieldIDMountID = iota
_miFieldIDParentID
_miFieldIDDeviceID
_miFieldIDRoot
_miFieldIDMountPoint
_miFieldIDOptions
_miFieldIDOptionalFields
_miFieldCountFirstHalf
)
const (
_miFieldOffsetFSType = iota
_miFieldOffsetMountSource
_miFieldOffsetSuperOptions
_miFieldCountSecondHalf
)
const _miFieldCountMin = _miFieldCountFirstHalf + _miFieldCountSecondHalf
// MountPoint is the data structure for the mount points in
// `/proc/$PID/mountinfo`. See also proc(5) for more information.
type MountPoint struct {
MountID int
ParentID int
DeviceID string
Root string
MountPoint string
Options []string
OptionalFields []string
FSType string
MountSource string
SuperOptions []string
}
// NewMountPointFromLine parses a line read from `/proc/$PID/mountinfo` and
// returns a new *MountPoint.
func NewMountPointFromLine(line string) (*MountPoint, error) {
fields := strings.Split(line, _mountInfoSep)
if len(fields) < _miFieldCountMin {
return nil, mountPointFormatInvalidError{line}
}
mountID, err := strconv.Atoi(fields[_miFieldIDMountID])
if err != nil {
return nil, err
}
parentID, err := strconv.Atoi(fields[_miFieldIDParentID])
if err != nil {
return nil, err
}
for i, field := range fields[_miFieldIDOptionalFields:] {
if field == _mountInfoOptionalFieldsSep {
// End of optional fields.
fsTypeStart := _miFieldIDOptionalFields + i + 1
// Now we know where the optional fields end, split the line again with a
// limit to avoid issues with spaces in super options as present on WSL.
fields = strings.SplitN(line, _mountInfoSep, fsTypeStart+_miFieldCountSecondHalf)
if len(fields) != fsTypeStart+_miFieldCountSecondHalf {
return nil, mountPointFormatInvalidError{line}
}
miFieldIDFSType := _miFieldOffsetFSType + fsTypeStart
miFieldIDMountSource := _miFieldOffsetMountSource + fsTypeStart
miFieldIDSuperOptions := _miFieldOffsetSuperOptions + fsTypeStart
return &MountPoint{
MountID: mountID,
ParentID: parentID,
DeviceID: fields[_miFieldIDDeviceID],
Root: fields[_miFieldIDRoot],
MountPoint: fields[_miFieldIDMountPoint],
Options: strings.Split(fields[_miFieldIDOptions], _mountInfoOptsSep),
OptionalFields: fields[_miFieldIDOptionalFields:(fsTypeStart - 1)],
FSType: fields[miFieldIDFSType],
MountSource: fields[miFieldIDMountSource],
SuperOptions: strings.Split(fields[miFieldIDSuperOptions], _mountInfoOptsSep),
}, nil
}
}
return nil, mountPointFormatInvalidError{line}
}
// Translate converts an absolute path inside the *MountPoint's file system to
// the host file system path in the mount namespace the *MountPoint belongs to.
func (mp *MountPoint) Translate(absPath string) (string, error) {
relPath, err := filepath.Rel(mp.Root, absPath)
if err != nil {
return "", err
}
if relPath == ".." || strings.HasPrefix(relPath, "../") {
return "", pathNotExposedFromMountPointError{
mountPoint: mp.MountPoint,
root: mp.Root,
path: absPath,
}
}
return filepath.Join(mp.MountPoint, relPath), nil
}
// parseMountInfo parses procPathMountInfo (usually at `/proc/$PID/mountinfo`)
// and yields parsed *MountPoint into newMountPoint.
func parseMountInfo(procPathMountInfo string, newMountPoint func(*MountPoint) error) error {
mountInfoFile, err := os.Open(procPathMountInfo)
if err != nil {
return err
}
defer mountInfoFile.Close()
scanner := bufio.NewScanner(mountInfoFile)
for scanner.Scan() {
mountPoint, err := NewMountPointFromLine(scanner.Text())
if err != nil {
return err
}
if err := newMountPoint(mountPoint); err != nil {
return err
}
}
return scanner.Err()
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:build linux
// +build linux
package cgroups
import "fmt"
type cgroupSubsysFormatInvalidError struct {
line string
}
type mountPointFormatInvalidError struct {
line string
}
type pathNotExposedFromMountPointError struct {
mountPoint string
root string
path string
}
func (err cgroupSubsysFormatInvalidError) Error() string {
return fmt.Sprintf("invalid format for CGroupSubsys: %q", err.line)
}
func (err mountPointFormatInvalidError) Error() string {
return fmt.Sprintf("invalid format for MountPoint: %q", err.line)
}
func (err pathNotExposedFromMountPointError) Error() string {
return fmt.Sprintf("path %q is not a descendant of mount point root %q and cannot be exposed from %q", err.path, err.root, err.mountPoint)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:build linux
// +build linux
package cgroups
const (
// _cgroupFSType is the Linux CGroup file system type used in
// `/proc/$PID/mountinfo`.
_cgroupFSType = "cgroup"
// _cgroupSubsysCPU is the CPU CGroup subsystem.
_cgroupSubsysCPU = "cpu"
// _cgroupSubsysCPUAcct is the CPU accounting CGroup subsystem.
_cgroupSubsysCPUAcct = "cpuacct"
// _cgroupSubsysCPUSet is the CPUSet CGroup subsystem.
_cgroupSubsysCPUSet = "cpuset"
// _cgroupSubsysMemory is the Memory CGroup subsystem.
_cgroupSubsysMemory = "memory"
// _cgroupCPUCFSQuotaUsParam is the file name for the CGroup CFS quota
// parameter.
_cgroupCPUCFSQuotaUsParam = "cpu.cfs_quota_us"
// _cgroupCPUCFSPeriodUsParam is the file name for the CGroup CFS period
// parameter.
_cgroupCPUCFSPeriodUsParam = "cpu.cfs_period_us"
)
const (
_procPathCGroup = "/proc/self/cgroup"
_procPathMountInfo = "/proc/self/mountinfo"
)
// CGroups is a map that associates each CGroup with its subsystem name.
type CGroups map[string]*CGroup
// NewCGroups returns a new *CGroups from given `mountinfo` and `cgroup` files
// under for some process under `/proc` file system (see also proc(5) for more
// information).
func NewCGroups(procPathMountInfo, procPathCGroup string) (CGroups, error) {
cgroupSubsystems, err := parseCGroupSubsystems(procPathCGroup)
if err != nil {
return nil, err
}
cgroups := make(CGroups)
newMountPoint := func(mp *MountPoint) error {
if mp.FSType != _cgroupFSType {
return nil
}
for _, opt := range mp.SuperOptions {
subsys, exists := cgroupSubsystems[opt]
if !exists {
continue
}
cgroupPath, err := mp.Translate(subsys.Name)
if err != nil {
return err
}
cgroups[opt] = NewCGroup(cgroupPath)
}
return nil
}
if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil {
return nil, err
}
return cgroups, nil
}
// NewCGroupsForCurrentProcess returns a new *CGroups instance for the current
// process.
func NewCGroupsForCurrentProcess() (CGroups, error) {
return NewCGroups(_procPathMountInfo, _procPathCGroup)
}
// CPUQuota returns the CPU quota applied with the CPU cgroup controller.
// It is a result of `cpu.cfs_quota_us / cpu.cfs_period_us`. If the value of
// `cpu.cfs_quota_us` was not set (-1), the method returns `(-1, nil)`.
func (cg CGroups) CPUQuota() (float64, bool, error) {
cpuCGroup, exists := cg[_cgroupSubsysCPU]
if !exists {
return -1, false, nil
}
cfsQuotaUs, err := cpuCGroup.readInt(_cgroupCPUCFSQuotaUsParam)
if defined := cfsQuotaUs > 0; err != nil || !defined {
return -1, defined, err
}
cfsPeriodUs, err := cpuCGroup.readInt(_cgroupCPUCFSPeriodUsParam)
if defined := cfsPeriodUs > 0; err != nil || !defined {
return -1, defined, err
}
return float64(cfsQuotaUs) / float64(cfsPeriodUs), true, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:build linux
// +build linux
package cgroups
import (
"bufio"
"os"
"strconv"
"strings"
)
const (
_cgroupSep = ":"
_cgroupSubsysSep = ","
)
const (
_csFieldIDID = iota
_csFieldIDSubsystems
_csFieldIDName
_csFieldCount
)
// CGroupSubsys represents the data structure for entities in
// `/proc/$PID/cgroup`. See also proc(5) for more information.
type CGroupSubsys struct {
ID int
Subsystems []string
Name string
}
// NewCGroupSubsysFromLine returns a new *CGroupSubsys by parsing a string in
// the format of `/proc/$PID/cgroup`
func NewCGroupSubsysFromLine(line string) (*CGroupSubsys, error) {
fields := strings.SplitN(line, _cgroupSep, _csFieldCount)
if len(fields) != _csFieldCount {
return nil, cgroupSubsysFormatInvalidError{line}
}
id, err := strconv.Atoi(fields[_csFieldIDID])
if err != nil {
return nil, err
}
cgroup := &CGroupSubsys{
ID: id,
Subsystems: strings.Split(fields[_csFieldIDSubsystems], _cgroupSubsysSep),
Name: fields[_csFieldIDName],
}
return cgroup, nil
}
// parseCGroupSubsystems parses procPathCGroup (usually at `/proc/$PID/cgroup`)
// and returns a new map[string]*CGroupSubsys.
func parseCGroupSubsystems(procPathCGroup string) (map[string]*CGroupSubsys, error) {
cgroupFile, err := os.Open(procPathCGroup)
if err != nil {
return nil, err
}
defer cgroupFile.Close()
scanner := bufio.NewScanner(cgroupFile)
subsystems := make(map[string]*CGroupSubsys)
for scanner.Scan() {
cgroup, err := NewCGroupSubsysFromLine(scanner.Text())
if err != nil {
return nil, err
}
for _, subsys := range cgroup.Subsystems {
subsystems[subsys] = cgroup
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
return subsystems, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go | // Copyright (c) 2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:build linux
// +build linux
package cgroups
import (
"bufio"
"errors"
"fmt"
"io"
"os"
"path"
"strconv"
"strings"
)
const (
// _cgroupv2CPUMax is the file name for the CGroup-V2 CPU max and period
// parameter.
_cgroupv2CPUMax = "cpu.max"
// _cgroupFSType is the Linux CGroup-V2 file system type used in
// `/proc/$PID/mountinfo`.
_cgroupv2FSType = "cgroup2"
_cgroupv2MountPoint = "/sys/fs/cgroup"
_cgroupV2CPUMaxDefaultPeriod = 100000
_cgroupV2CPUMaxQuotaMax = "max"
)
const (
_cgroupv2CPUMaxQuotaIndex = iota
_cgroupv2CPUMaxPeriodIndex
)
// ErrNotV2 indicates that the system is not using cgroups2.
var ErrNotV2 = errors.New("not using cgroups2")
// CGroups2 provides access to cgroups data for systems using cgroups2.
type CGroups2 struct {
mountPoint string
groupPath string
cpuMaxFile string
}
// NewCGroups2ForCurrentProcess builds a CGroups2 for the current process.
//
// This returns ErrNotV2 if the system is not using cgroups2.
func NewCGroups2ForCurrentProcess() (*CGroups2, error) {
return newCGroups2From(_procPathMountInfo, _procPathCGroup)
}
func newCGroups2From(mountInfoPath, procPathCGroup string) (*CGroups2, error) {
isV2, err := isCGroupV2(mountInfoPath)
if err != nil {
return nil, err
}
if !isV2 {
return nil, ErrNotV2
}
subsystems, err := parseCGroupSubsystems(procPathCGroup)
if err != nil {
return nil, err
}
// Find v2 subsystem by looking for the `0` id
var v2subsys *CGroupSubsys
for _, subsys := range subsystems {
if subsys.ID == 0 {
v2subsys = subsys
break
}
}
if v2subsys == nil {
return nil, ErrNotV2
}
return &CGroups2{
mountPoint: _cgroupv2MountPoint,
groupPath: v2subsys.Name,
cpuMaxFile: _cgroupv2CPUMax,
}, nil
}
func isCGroupV2(procPathMountInfo string) (bool, error) {
var (
isV2 bool
newMountPoint = func(mp *MountPoint) error {
isV2 = isV2 || (mp.FSType == _cgroupv2FSType && mp.MountPoint == _cgroupv2MountPoint)
return nil
}
)
if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil {
return false, err
}
return isV2, nil
}
// CPUQuota returns the CPU quota applied with the CPU cgroup2 controller.
// It is a result of reading cpu quota and period from cpu.max file.
// It will return `cpu.max / cpu.period`. If cpu.max is set to max, it returns
// (-1, false, nil)
func (cg *CGroups2) CPUQuota() (float64, bool, error) {
cpuMaxParams, err := os.Open(path.Join(cg.mountPoint, cg.groupPath, cg.cpuMaxFile))
if err != nil {
if os.IsNotExist(err) {
return -1, false, nil
}
return -1, false, err
}
defer cpuMaxParams.Close()
scanner := bufio.NewScanner(cpuMaxParams)
if scanner.Scan() {
fields := strings.Fields(scanner.Text())
if len(fields) == 0 || len(fields) > 2 {
return -1, false, fmt.Errorf("invalid format")
}
if fields[_cgroupv2CPUMaxQuotaIndex] == _cgroupV2CPUMaxQuotaMax {
return -1, false, nil
}
max, err := strconv.Atoi(fields[_cgroupv2CPUMaxQuotaIndex])
if err != nil {
return -1, false, err
}
var period int
if len(fields) == 1 {
period = _cgroupV2CPUMaxDefaultPeriod
} else {
period, err = strconv.Atoi(fields[_cgroupv2CPUMaxPeriodIndex])
if err != nil {
return -1, false, err
}
if period == 0 {
return -1, false, errors.New("zero value for period is not allowed")
}
}
return float64(max) / float64(period), true, nil
}
if err := scanner.Err(); err != nil {
return -1, false, err
}
return 0, false, io.ErrUnexpectedEOF
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package cgroups provides utilities to access Linux control group (CGroups)
// parameters (CPU quota, for example) for a given process.
package cgroups
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/flag.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/flag.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"flag"
"go.uber.org/zap/zapcore"
)
// LevelFlag uses the standard library's flag.Var to declare a global flag
// with the specified name, default, and usage guidance. The returned value is
// a pointer to the value of the flag.
//
// If you don't want to use the flag package's global state, you can use any
// non-nil *Level as a flag.Value with your own *flag.FlagSet.
func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level {
lvl := defaultLevel
flag.Var(&lvl, name, usage)
return &lvl
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/time.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/time.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import "time"
func timeToMillis(t time.Time) int64 {
return t.UnixNano() / int64(time.Millisecond)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/level.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/level.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"sync/atomic"
"go.uber.org/zap/internal"
"go.uber.org/zap/zapcore"
)
const (
// DebugLevel logs are typically voluminous, and are usually disabled in
// production.
DebugLevel = zapcore.DebugLevel
// InfoLevel is the default logging priority.
InfoLevel = zapcore.InfoLevel
// WarnLevel logs are more important than Info, but don't need individual
// human review.
WarnLevel = zapcore.WarnLevel
// ErrorLevel logs are high-priority. If an application is running smoothly,
// it shouldn't generate any error-level logs.
ErrorLevel = zapcore.ErrorLevel
// DPanicLevel logs are particularly important errors. In development the
// logger panics after writing the message.
DPanicLevel = zapcore.DPanicLevel
// PanicLevel logs a message, then panics.
PanicLevel = zapcore.PanicLevel
// FatalLevel logs a message, then calls os.Exit(1).
FatalLevel = zapcore.FatalLevel
)
// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with
// an anonymous function.
//
// It's particularly useful when splitting log output between different
// outputs (e.g., standard error and standard out). For sample code, see the
// package-level AdvancedConfiguration example.
type LevelEnablerFunc func(zapcore.Level) bool
// Enabled calls the wrapped function.
func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) }
// An AtomicLevel is an atomically changeable, dynamic logging level. It lets
// you safely change the log level of a tree of loggers (the root logger and
// any children created by adding context) at runtime.
//
// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to
// alter its level.
//
// AtomicLevels must be created with the NewAtomicLevel constructor to allocate
// their internal atomic pointer.
type AtomicLevel struct {
l *atomic.Int32
}
var _ internal.LeveledEnabler = AtomicLevel{}
// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
// enabled.
func NewAtomicLevel() AtomicLevel {
lvl := AtomicLevel{l: new(atomic.Int32)}
lvl.l.Store(int32(InfoLevel))
return lvl
}
// NewAtomicLevelAt is a convenience function that creates an AtomicLevel
// and then calls SetLevel with the given level.
func NewAtomicLevelAt(l zapcore.Level) AtomicLevel {
a := NewAtomicLevel()
a.SetLevel(l)
return a
}
// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII
// representation of the log level. If the provided ASCII representation is
// invalid an error is returned.
//
// This is particularly useful when dealing with text input to configure log
// levels.
func ParseAtomicLevel(text string) (AtomicLevel, error) {
a := NewAtomicLevel()
l, err := zapcore.ParseLevel(text)
if err != nil {
return a, err
}
a.SetLevel(l)
return a, nil
}
// Enabled implements the zapcore.LevelEnabler interface, which allows the
// AtomicLevel to be used in place of traditional static levels.
func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
return lvl.Level().Enabled(l)
}
// Level returns the minimum enabled log level.
func (lvl AtomicLevel) Level() zapcore.Level {
return zapcore.Level(int8(lvl.l.Load()))
}
// SetLevel alters the logging level.
func (lvl AtomicLevel) SetLevel(l zapcore.Level) {
lvl.l.Store(int32(l))
}
// String returns the string representation of the underlying Level.
func (lvl AtomicLevel) String() string {
return lvl.Level().String()
}
// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text
// representations as the static zapcore.Levels ("debug", "info", "warn",
// "error", "dpanic", "panic", and "fatal").
func (lvl *AtomicLevel) UnmarshalText(text []byte) error {
if lvl.l == nil {
lvl.l = &atomic.Int32{}
}
var l zapcore.Level
if err := l.UnmarshalText(text); err != nil {
return err
}
lvl.SetLevel(l)
return nil
}
// MarshalText marshals the AtomicLevel to a byte slice. It uses the same
// text representation as the static zapcore.Levels ("debug", "info", "warn",
// "error", "dpanic", "panic", and "fatal").
func (lvl AtomicLevel) MarshalText() (text []byte, err error) {
return lvl.Level().MarshalText()
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/sink.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/sink.go | // Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"errors"
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
"go.uber.org/zap/zapcore"
)
const schemeFile = "file"
var _sinkRegistry = newSinkRegistry()
// Sink defines the interface to write to and close logger destinations.
type Sink interface {
zapcore.WriteSyncer
io.Closer
}
type errSinkNotFound struct {
scheme string
}
func (e *errSinkNotFound) Error() string {
return fmt.Sprintf("no sink found for scheme %q", e.scheme)
}
type nopCloserSink struct{ zapcore.WriteSyncer }
func (nopCloserSink) Close() error { return nil }
type sinkRegistry struct {
mu sync.Mutex
factories map[string]func(*url.URL) (Sink, error) // keyed by scheme
openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile
}
func newSinkRegistry() *sinkRegistry {
sr := &sinkRegistry{
factories: make(map[string]func(*url.URL) (Sink, error)),
openFile: os.OpenFile,
}
// Infallible operation: the registry is empty, so we can't have a conflict.
_ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
return sr
}
// RegisterScheme registers the given factory for the specific scheme.
func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
sr.mu.Lock()
defer sr.mu.Unlock()
if scheme == "" {
return errors.New("can't register a sink factory for empty string")
}
normalized, err := normalizeScheme(scheme)
if err != nil {
return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
}
if _, ok := sr.factories[normalized]; ok {
return fmt.Errorf("sink factory already registered for scheme %q", normalized)
}
sr.factories[normalized] = factory
return nil
}
func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) {
// URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to
// the drive, and path is unset unless `c:/log.txt` is used.
// To avoid Windows-specific URL handling, we instead check IsAbs to open as a file.
// filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows.
if filepath.IsAbs(rawURL) {
return sr.newFileSinkFromPath(rawURL)
}
u, err := url.Parse(rawURL)
if err != nil {
return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
}
if u.Scheme == "" {
u.Scheme = schemeFile
}
sr.mu.Lock()
factory, ok := sr.factories[u.Scheme]
sr.mu.Unlock()
if !ok {
return nil, &errSinkNotFound{u.Scheme}
}
return factory(u)
}
// RegisterSink registers a user-supplied factory for all sinks with a
// particular scheme.
//
// All schemes must be ASCII, valid under section 0.1 of RFC 3986
// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already
// have a factory registered. Zap automatically registers a factory for the
// "file" scheme.
func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
return _sinkRegistry.RegisterSink(scheme, factory)
}
func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) {
if u.User != nil {
return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
}
if u.Fragment != "" {
return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u)
}
if u.RawQuery != "" {
return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u)
}
// Error messages are better if we check hostname and port separately.
if u.Port() != "" {
return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u)
}
if hn := u.Hostname(); hn != "" && hn != "localhost" {
return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
}
return sr.newFileSinkFromPath(u.Path)
}
func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
switch path {
case "stdout":
return nopCloserSink{os.Stdout}, nil
case "stderr":
return nopCloserSink{os.Stderr}, nil
}
return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666)
}
func normalizeScheme(s string) (string, error) {
// https://tools.ietf.org/html/rfc3986#section-3.1
s = strings.ToLower(s)
if first := s[0]; 'a' > first || 'z' < first {
return "", errors.New("must start with a letter")
}
for i := 1; i < len(s); i++ { // iterate over bytes, not runes
c := s[i]
switch {
case 'a' <= c && c <= 'z':
continue
case '0' <= c && c <= '9':
continue
case c == '.' || c == '+' || c == '-':
continue
}
return "", fmt.Errorf("may not contain %q", c)
}
return s, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/error.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/error.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"go.uber.org/zap/internal/pool"
"go.uber.org/zap/zapcore"
)
var _errArrayElemPool = pool.New(func() *errArrayElem {
return &errArrayElem{}
})
// Error is shorthand for the common idiom NamedError("error", err).
func Error(err error) Field {
return NamedError("error", err)
}
// NamedError constructs a field that lazily stores err.Error() under the
// provided key. Errors which also implement fmt.Formatter (like those produced
// by github.com/pkg/errors) will also have their verbose representation stored
// under key+"Verbose". If passed a nil error, the field is a no-op.
//
// For the common case in which the key is simply "error", the Error function
// is shorter and less repetitive.
func NamedError(key string, err error) Field {
if err == nil {
return Skip()
}
return Field{Key: key, Type: zapcore.ErrorType, Interface: err}
}
type errArray []error
func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range errs {
if errs[i] == nil {
continue
}
// To represent each error as an object with an "error" attribute and
// potentially an "errorVerbose" attribute, we need to wrap it in a
// type that implements LogObjectMarshaler. To prevent this from
// allocating, pool the wrapper type.
elem := _errArrayElemPool.Get()
elem.error = errs[i]
err := arr.AppendObject(elem)
elem.error = nil
_errArrayElemPool.Put(elem)
if err != nil {
return err
}
}
return nil
}
type errArrayElem struct {
error
}
func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error {
// Re-use the error field's logic, which supports non-standard error types.
Error(e.error).AddTo(enc)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/config.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/config.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"errors"
"sort"
"time"
"go.uber.org/zap/zapcore"
)
// SamplingConfig sets a sampling strategy for the logger. Sampling caps the
// global CPU and I/O load that logging puts on your process while attempting
// to preserve a representative subset of your logs.
//
// If specified, the Sampler will invoke the Hook after each decision.
//
// Values configured here are per-second. See zapcore.NewSamplerWithOptions for
// details.
type SamplingConfig struct {
Initial int `json:"initial" yaml:"initial"`
Thereafter int `json:"thereafter" yaml:"thereafter"`
Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"`
}
// Config offers a declarative way to construct a logger. It doesn't do
// anything that can't be done with New, Options, and the various
// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to
// toggle common options.
//
// Note that Config intentionally supports only the most common options. More
// unusual logging setups (logging to network connections or message queues,
// splitting output between multiple files, etc.) are possible, but require
// direct use of the zapcore package. For sample code, see the package-level
// BasicConfiguration and AdvancedConfiguration examples.
//
// For an example showing runtime log level changes, see the documentation for
// AtomicLevel.
type Config struct {
// Level is the minimum enabled logging level. Note that this is a dynamic
// level, so calling Config.Level.SetLevel will atomically change the log
// level of all loggers descended from this config.
Level AtomicLevel `json:"level" yaml:"level"`
// Development puts the logger in development mode, which changes the
// behavior of DPanicLevel and takes stacktraces more liberally.
Development bool `json:"development" yaml:"development"`
// DisableCaller stops annotating logs with the calling function's file
// name and line number. By default, all logs are annotated.
DisableCaller bool `json:"disableCaller" yaml:"disableCaller"`
// DisableStacktrace completely disables automatic stacktrace capturing. By
// default, stacktraces are captured for WarnLevel and above logs in
// development and ErrorLevel and above in production.
DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"`
// Sampling sets a sampling policy. A nil SamplingConfig disables sampling.
Sampling *SamplingConfig `json:"sampling" yaml:"sampling"`
// Encoding sets the logger's encoding. Valid values are "json" and
// "console", as well as any third-party encodings registered via
// RegisterEncoder.
Encoding string `json:"encoding" yaml:"encoding"`
// EncoderConfig sets options for the chosen encoder. See
// zapcore.EncoderConfig for details.
EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"`
// OutputPaths is a list of URLs or file paths to write logging output to.
// See Open for details.
OutputPaths []string `json:"outputPaths" yaml:"outputPaths"`
// ErrorOutputPaths is a list of URLs to write internal logger errors to.
// The default is standard error.
//
// Note that this setting only affects internal errors; for sample code that
// sends error-level logs to a different location from info- and debug-level
// logs, see the package-level AdvancedConfiguration example.
ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"`
// InitialFields is a collection of fields to add to the root logger.
InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"`
}
// NewProductionEncoderConfig returns an opinionated EncoderConfig for
// production environments.
//
// Messages encoded with this configuration will be JSON-formatted
// and will have the following keys by default:
//
// - "level": The logging level (e.g. "info", "error").
// - "ts": The current time in number of seconds since the Unix epoch.
// - "msg": The message passed to the log statement.
// - "caller": If available, a short path to the file and line number
// where the log statement was issued.
// The logger configuration determines whether this field is captured.
// - "stacktrace": If available, a stack trace from the line
// where the log statement was issued.
// The logger configuration determines whether this field is captured.
//
// By default, the following formats are used for different types:
//
// - Time is formatted as floating-point number of seconds since the Unix
// epoch.
// - Duration is formatted as floating-point number of seconds.
//
// You may change these by setting the appropriate fields in the returned
// object.
// For example, use the following to change the time encoding format:
//
// cfg := zap.NewProductionEncoderConfig()
// cfg.EncodeTime = zapcore.ISO8601TimeEncoder
func NewProductionEncoderConfig() zapcore.EncoderConfig {
return zapcore.EncoderConfig{
TimeKey: "ts",
LevelKey: "level",
NameKey: "logger",
CallerKey: "caller",
FunctionKey: zapcore.OmitKey,
MessageKey: "msg",
StacktraceKey: "stacktrace",
LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: zapcore.LowercaseLevelEncoder,
EncodeTime: zapcore.EpochTimeEncoder,
EncodeDuration: zapcore.SecondsDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
}
}
// NewProductionConfig builds a reasonable default production logging
// configuration.
// Logging is enabled at InfoLevel and above, and uses a JSON encoder.
// Logs are written to standard error.
// Stacktraces are included on logs of ErrorLevel and above.
// DPanicLevel logs will not panic, but will write a stacktrace.
//
// Sampling is enabled at 100:100 by default,
// meaning that after the first 100 log entries
// with the same level and message in the same second,
// it will log every 100th entry
// with the same level and message in the same second.
// You may disable this behavior by setting Sampling to nil.
//
// See [NewProductionEncoderConfig] for information
// on the default encoder configuration.
func NewProductionConfig() Config {
return Config{
Level: NewAtomicLevelAt(InfoLevel),
Development: false,
Sampling: &SamplingConfig{
Initial: 100,
Thereafter: 100,
},
Encoding: "json",
EncoderConfig: NewProductionEncoderConfig(),
OutputPaths: []string{"stderr"},
ErrorOutputPaths: []string{"stderr"},
}
}
// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for
// development environments.
//
// Messages encoded with this configuration will use Zap's console encoder
// intended to print human-readable output.
// It will print log messages with the following information:
//
// - The log level (e.g. "INFO", "ERROR").
// - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
// - The message passed to the log statement.
// - If available, a short path to the file and line number
// where the log statement was issued.
// The logger configuration determines whether this field is captured.
// - If available, a stacktrace from the line
// where the log statement was issued.
// The logger configuration determines whether this field is captured.
//
// By default, the following formats are used for different types:
//
// - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
// - Duration is formatted as a string (e.g. "1.234s").
//
// You may change these by setting the appropriate fields in the returned
// object.
// For example, use the following to change the time encoding format:
//
// cfg := zap.NewDevelopmentEncoderConfig()
// cfg.EncodeTime = zapcore.ISO8601TimeEncoder
func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
return zapcore.EncoderConfig{
// Keys can be anything except the empty string.
TimeKey: "T",
LevelKey: "L",
NameKey: "N",
CallerKey: "C",
FunctionKey: zapcore.OmitKey,
MessageKey: "M",
StacktraceKey: "S",
LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: zapcore.CapitalLevelEncoder,
EncodeTime: zapcore.ISO8601TimeEncoder,
EncodeDuration: zapcore.StringDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
}
}
// NewDevelopmentConfig builds a reasonable default development logging
// configuration.
// Logging is enabled at DebugLevel and above, and uses a console encoder.
// Logs are written to standard error.
// Stacktraces are included on logs of WarnLevel and above.
// DPanicLevel logs will panic.
//
// See [NewDevelopmentEncoderConfig] for information
// on the default encoder configuration.
func NewDevelopmentConfig() Config {
return Config{
Level: NewAtomicLevelAt(DebugLevel),
Development: true,
Encoding: "console",
EncoderConfig: NewDevelopmentEncoderConfig(),
OutputPaths: []string{"stderr"},
ErrorOutputPaths: []string{"stderr"},
}
}
// Build constructs a logger from the Config and Options.
func (cfg Config) Build(opts ...Option) (*Logger, error) {
enc, err := cfg.buildEncoder()
if err != nil {
return nil, err
}
sink, errSink, err := cfg.openSinks()
if err != nil {
return nil, err
}
if cfg.Level == (AtomicLevel{}) {
return nil, errors.New("missing Level")
}
log := New(
zapcore.NewCore(enc, sink, cfg.Level),
cfg.buildOptions(errSink)...,
)
if len(opts) > 0 {
log = log.WithOptions(opts...)
}
return log, nil
}
func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option {
opts := []Option{ErrorOutput(errSink)}
if cfg.Development {
opts = append(opts, Development())
}
if !cfg.DisableCaller {
opts = append(opts, AddCaller())
}
stackLevel := ErrorLevel
if cfg.Development {
stackLevel = WarnLevel
}
if !cfg.DisableStacktrace {
opts = append(opts, AddStacktrace(stackLevel))
}
if scfg := cfg.Sampling; scfg != nil {
opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core {
var samplerOpts []zapcore.SamplerOption
if scfg.Hook != nil {
samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook))
}
return zapcore.NewSamplerWithOptions(
core,
time.Second,
cfg.Sampling.Initial,
cfg.Sampling.Thereafter,
samplerOpts...,
)
}))
}
if len(cfg.InitialFields) > 0 {
fs := make([]Field, 0, len(cfg.InitialFields))
keys := make([]string, 0, len(cfg.InitialFields))
for k := range cfg.InitialFields {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
fs = append(fs, Any(k, cfg.InitialFields[k]))
}
opts = append(opts, Fields(fs...))
}
return opts
}
func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) {
sink, closeOut, err := Open(cfg.OutputPaths...)
if err != nil {
return nil, nil, err
}
errSink, _, err := Open(cfg.ErrorOutputPaths...)
if err != nil {
closeOut()
return nil, nil, err
}
return sink, errSink, nil
}
func (cfg Config) buildEncoder() (zapcore.Encoder, error) {
return newEncoder(cfg.Encoding, cfg.EncoderConfig)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/writer.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/writer.go | // Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"fmt"
"io"
"go.uber.org/zap/zapcore"
"go.uber.org/multierr"
)
// Open is a high-level wrapper that takes a variadic number of URLs, opens or
// creates each of the specified resources, and combines them into a locked
// WriteSyncer. It also returns any error encountered and a function to close
// any opened files.
//
// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a
// scheme and URLs with the "file" scheme. Third-party code may register
// factories for other schemes using RegisterSink.
//
// URLs with the "file" scheme must use absolute paths on the local
// filesystem. No user, password, port, fragments, or query parameters are
// allowed, and the hostname must be empty or "localhost".
//
// Since it's common to write logs to the local filesystem, URLs without a
// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without
// a scheme, the special paths "stdout" and "stderr" are interpreted as
// os.Stdout and os.Stderr. When specified without a scheme, relative file
// paths also work.
func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
writers, closeAll, err := open(paths)
if err != nil {
return nil, nil, err
}
writer := CombineWriteSyncers(writers...)
return writer, closeAll, nil
}
func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
writers := make([]zapcore.WriteSyncer, 0, len(paths))
closers := make([]io.Closer, 0, len(paths))
closeAll := func() {
for _, c := range closers {
_ = c.Close()
}
}
var openErr error
for _, path := range paths {
sink, err := _sinkRegistry.newSink(path)
if err != nil {
openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err))
continue
}
writers = append(writers, sink)
closers = append(closers, sink)
}
if openErr != nil {
closeAll()
return nil, nil, openErr
}
return writers, closeAll, nil
}
// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op
// WriteSyncer.
//
// It's provided purely as a convenience; the result is no different from
// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
if len(writers) == 0 {
return zapcore.AddSync(io.Discard)
}
return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/array.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/array.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"fmt"
"time"
"go.uber.org/zap/zapcore"
)
// Array constructs a field with the given key and ArrayMarshaler. It provides
// a flexible, but still type-safe and efficient, way to add array-like types
// to the logging context. The struct's MarshalLogArray method is called lazily.
func Array(key string, val zapcore.ArrayMarshaler) Field {
return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val}
}
// Bools constructs a field that carries a slice of bools.
func Bools(key string, bs []bool) Field {
return Array(key, bools(bs))
}
// ByteStrings constructs a field that carries a slice of []byte, each of which
// must be UTF-8 encoded text.
func ByteStrings(key string, bss [][]byte) Field {
return Array(key, byteStringsArray(bss))
}
// Complex128s constructs a field that carries a slice of complex numbers.
func Complex128s(key string, nums []complex128) Field {
return Array(key, complex128s(nums))
}
// Complex64s constructs a field that carries a slice of complex numbers.
func Complex64s(key string, nums []complex64) Field {
return Array(key, complex64s(nums))
}
// Durations constructs a field that carries a slice of time.Durations.
func Durations(key string, ds []time.Duration) Field {
return Array(key, durations(ds))
}
// Float64s constructs a field that carries a slice of floats.
func Float64s(key string, nums []float64) Field {
return Array(key, float64s(nums))
}
// Float32s constructs a field that carries a slice of floats.
func Float32s(key string, nums []float32) Field {
return Array(key, float32s(nums))
}
// Ints constructs a field that carries a slice of integers.
func Ints(key string, nums []int) Field {
return Array(key, ints(nums))
}
// Int64s constructs a field that carries a slice of integers.
func Int64s(key string, nums []int64) Field {
return Array(key, int64s(nums))
}
// Int32s constructs a field that carries a slice of integers.
func Int32s(key string, nums []int32) Field {
return Array(key, int32s(nums))
}
// Int16s constructs a field that carries a slice of integers.
func Int16s(key string, nums []int16) Field {
return Array(key, int16s(nums))
}
// Int8s constructs a field that carries a slice of integers.
func Int8s(key string, nums []int8) Field {
return Array(key, int8s(nums))
}
// Objects constructs a field with the given key, holding a list of the
// provided objects that can be marshaled by Zap.
//
// Note that these objects must implement zapcore.ObjectMarshaler directly.
// That is, if you're trying to marshal a []Request, the MarshalLogObject
// method must be declared on the Request type, not its pointer (*Request).
// If it's on the pointer, use ObjectValues.
//
// Given an object that implements MarshalLogObject on the value receiver, you
// can log a slice of those objects with Objects like so:
//
// type Author struct{ ... }
// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var authors []Author = ...
// logger.Info("loading article", zap.Objects("authors", authors))
//
// Similarly, given a type that implements MarshalLogObject on its pointer
// receiver, you can log a slice of pointers to that object with Objects like
// so:
//
// type Request struct{ ... }
// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var requests []*Request = ...
// logger.Info("sending requests", zap.Objects("requests", requests))
//
// If instead, you have a slice of values of such an object, use the
// ObjectValues constructor.
//
// var requests []Request = ...
// logger.Info("sending requests", zap.ObjectValues("requests", requests))
func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
return Array(key, objects[T](values))
}
type objects[T zapcore.ObjectMarshaler] []T
func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for _, o := range os {
if err := arr.AppendObject(o); err != nil {
return err
}
}
return nil
}
// ObjectMarshalerPtr is a constraint that specifies that the given type
// implements zapcore.ObjectMarshaler on a pointer receiver.
type ObjectMarshalerPtr[T any] interface {
*T
zapcore.ObjectMarshaler
}
// ObjectValues constructs a field with the given key, holding a list of the
// provided objects, where pointers to these objects can be marshaled by Zap.
//
// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
// That is, if you're trying to marshal a []Request, the MarshalLogObject
// method must be declared on the *Request type, not the value (Request).
// If it's on the value, use Objects.
//
// Given an object that implements MarshalLogObject on the pointer receiver,
// you can log a slice of those objects with ObjectValues like so:
//
// type Request struct{ ... }
// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var requests []Request = ...
// logger.Info("sending requests", zap.ObjectValues("requests", requests))
//
// If instead, you have a slice of pointers of such an object, use the Objects
// field constructor.
//
// var requests []*Request = ...
// logger.Info("sending requests", zap.Objects("requests", requests))
func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
return Array(key, objectValues[T, P](values))
}
type objectValues[T any, P ObjectMarshalerPtr[T]] []T
func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range os {
// It is necessary for us to explicitly reference the "P" type.
// We cannot simply pass "&os[i]" to AppendObject because its type
// is "*T", which the type system does not consider as
// implementing ObjectMarshaler.
// Only the type "P" satisfies ObjectMarshaler, which we have
// to convert "*T" to explicitly.
var p P = &os[i]
if err := arr.AppendObject(p); err != nil {
return err
}
}
return nil
}
// Strings constructs a field that carries a slice of strings.
func Strings(key string, ss []string) Field {
return Array(key, stringArray(ss))
}
// Stringers constructs a field with the given key, holding a list of the
// output provided by the value's String method
//
// Given an object that implements String on the value receiver, you
// can log a slice of those objects with Objects like so:
//
// type Request struct{ ... }
// func (a Request) String() string
//
// var requests []Request = ...
// logger.Info("sending requests", zap.Stringers("requests", requests))
//
// Note that these objects must implement fmt.Stringer directly.
// That is, if you're trying to marshal a []Request, the String method
// must be declared on the Request type, not its pointer (*Request).
func Stringers[T fmt.Stringer](key string, values []T) Field {
return Array(key, stringers[T](values))
}
type stringers[T fmt.Stringer] []T
func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for _, o := range os {
arr.AppendString(o.String())
}
return nil
}
// Times constructs a field that carries a slice of time.Times.
func Times(key string, ts []time.Time) Field {
return Array(key, times(ts))
}
// Uints constructs a field that carries a slice of unsigned integers.
func Uints(key string, nums []uint) Field {
return Array(key, uints(nums))
}
// Uint64s constructs a field that carries a slice of unsigned integers.
func Uint64s(key string, nums []uint64) Field {
return Array(key, uint64s(nums))
}
// Uint32s constructs a field that carries a slice of unsigned integers.
func Uint32s(key string, nums []uint32) Field {
return Array(key, uint32s(nums))
}
// Uint16s constructs a field that carries a slice of unsigned integers.
func Uint16s(key string, nums []uint16) Field {
return Array(key, uint16s(nums))
}
// Uint8s constructs a field that carries a slice of unsigned integers.
func Uint8s(key string, nums []uint8) Field {
return Array(key, uint8s(nums))
}
// Uintptrs constructs a field that carries a slice of pointer addresses.
func Uintptrs(key string, us []uintptr) Field {
return Array(key, uintptrs(us))
}
// Errors constructs a field that carries a slice of errors.
func Errors(key string, errs []error) Field {
return Array(key, errArray(errs))
}
type bools []bool
func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range bs {
arr.AppendBool(bs[i])
}
return nil
}
type byteStringsArray [][]byte
func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range bss {
arr.AppendByteString(bss[i])
}
return nil
}
type complex128s []complex128
func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendComplex128(nums[i])
}
return nil
}
type complex64s []complex64
func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendComplex64(nums[i])
}
return nil
}
type durations []time.Duration
func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range ds {
arr.AppendDuration(ds[i])
}
return nil
}
type float64s []float64
func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendFloat64(nums[i])
}
return nil
}
type float32s []float32
func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendFloat32(nums[i])
}
return nil
}
type ints []int
func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendInt(nums[i])
}
return nil
}
type int64s []int64
func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendInt64(nums[i])
}
return nil
}
type int32s []int32
func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendInt32(nums[i])
}
return nil
}
type int16s []int16
func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendInt16(nums[i])
}
return nil
}
type int8s []int8
func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendInt8(nums[i])
}
return nil
}
type stringArray []string
func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range ss {
arr.AppendString(ss[i])
}
return nil
}
type times []time.Time
func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range ts {
arr.AppendTime(ts[i])
}
return nil
}
type uints []uint
func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendUint(nums[i])
}
return nil
}
type uint64s []uint64
func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendUint64(nums[i])
}
return nil
}
type uint32s []uint32
func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendUint32(nums[i])
}
return nil
}
type uint16s []uint16
func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendUint16(nums[i])
}
return nil
}
type uint8s []uint8
func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendUint8(nums[i])
}
return nil
}
type uintptrs []uintptr
func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range nums {
arr.AppendUintptr(nums[i])
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/field.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/field.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"fmt"
"math"
"time"
"go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore"
)
// Field is an alias for Field. Aliasing this type dramatically
// improves the navigability of this package's API documentation.
type Field = zapcore.Field
var (
_minTimeInt64 = time.Unix(0, math.MinInt64)
_maxTimeInt64 = time.Unix(0, math.MaxInt64)
)
// Skip constructs a no-op field, which is often useful when handling invalid
// inputs in other Field constructors.
func Skip() Field {
return Field{Type: zapcore.SkipType}
}
// nilField returns a field which will marshal explicitly as nil. See motivation
// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking
// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the
// implementation here should be changed to reflect that.
func nilField(key string) Field { return Reflect(key, nil) }
// Binary constructs a field that carries an opaque binary blob.
//
// Binary data is serialized in an encoding-appropriate format. For example,
// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text,
// use ByteString.
func Binary(key string, val []byte) Field {
return Field{Key: key, Type: zapcore.BinaryType, Interface: val}
}
// Bool constructs a field that carries a bool.
func Bool(key string, val bool) Field {
var ival int64
if val {
ival = 1
}
return Field{Key: key, Type: zapcore.BoolType, Integer: ival}
}
// Boolp constructs a field that carries a *bool. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Boolp(key string, val *bool) Field {
if val == nil {
return nilField(key)
}
return Bool(key, *val)
}
// ByteString constructs a field that carries UTF-8 encoded text as a []byte.
// To log opaque binary blobs (which aren't necessarily valid UTF-8), use
// Binary.
func ByteString(key string, val []byte) Field {
return Field{Key: key, Type: zapcore.ByteStringType, Interface: val}
}
// Complex128 constructs a field that carries a complex number. Unlike most
// numeric fields, this costs an allocation (to convert the complex128 to
// interface{}).
func Complex128(key string, val complex128) Field {
return Field{Key: key, Type: zapcore.Complex128Type, Interface: val}
}
// Complex128p constructs a field that carries a *complex128. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Complex128p(key string, val *complex128) Field {
if val == nil {
return nilField(key)
}
return Complex128(key, *val)
}
// Complex64 constructs a field that carries a complex number. Unlike most
// numeric fields, this costs an allocation (to convert the complex64 to
// interface{}).
func Complex64(key string, val complex64) Field {
return Field{Key: key, Type: zapcore.Complex64Type, Interface: val}
}
// Complex64p constructs a field that carries a *complex64. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Complex64p(key string, val *complex64) Field {
if val == nil {
return nilField(key)
}
return Complex64(key, *val)
}
// Float64 constructs a field that carries a float64. The way the
// floating-point value is represented is encoder-dependent, so marshaling is
// necessarily lazy.
func Float64(key string, val float64) Field {
return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))}
}
// Float64p constructs a field that carries a *float64. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Float64p(key string, val *float64) Field {
if val == nil {
return nilField(key)
}
return Float64(key, *val)
}
// Float32 constructs a field that carries a float32. The way the
// floating-point value is represented is encoder-dependent, so marshaling is
// necessarily lazy.
func Float32(key string, val float32) Field {
return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))}
}
// Float32p constructs a field that carries a *float32. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Float32p(key string, val *float32) Field {
if val == nil {
return nilField(key)
}
return Float32(key, *val)
}
// Int constructs a field with the given key and value.
func Int(key string, val int) Field {
return Int64(key, int64(val))
}
// Intp constructs a field that carries a *int. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Intp(key string, val *int) Field {
if val == nil {
return nilField(key)
}
return Int(key, *val)
}
// Int64 constructs a field with the given key and value.
func Int64(key string, val int64) Field {
return Field{Key: key, Type: zapcore.Int64Type, Integer: val}
}
// Int64p constructs a field that carries a *int64. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Int64p(key string, val *int64) Field {
if val == nil {
return nilField(key)
}
return Int64(key, *val)
}
// Int32 constructs a field with the given key and value.
func Int32(key string, val int32) Field {
return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)}
}
// Int32p constructs a field that carries a *int32. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Int32p(key string, val *int32) Field {
if val == nil {
return nilField(key)
}
return Int32(key, *val)
}
// Int16 constructs a field with the given key and value.
func Int16(key string, val int16) Field {
return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)}
}
// Int16p constructs a field that carries a *int16. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Int16p(key string, val *int16) Field {
if val == nil {
return nilField(key)
}
return Int16(key, *val)
}
// Int8 constructs a field with the given key and value.
func Int8(key string, val int8) Field {
return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)}
}
// Int8p constructs a field that carries a *int8. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Int8p(key string, val *int8) Field {
if val == nil {
return nilField(key)
}
return Int8(key, *val)
}
// String constructs a field with the given key and value.
func String(key string, val string) Field {
return Field{Key: key, Type: zapcore.StringType, String: val}
}
// Stringp constructs a field that carries a *string. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Stringp(key string, val *string) Field {
if val == nil {
return nilField(key)
}
return String(key, *val)
}
// Uint constructs a field with the given key and value.
func Uint(key string, val uint) Field {
return Uint64(key, uint64(val))
}
// Uintp constructs a field that carries a *uint. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Uintp(key string, val *uint) Field {
if val == nil {
return nilField(key)
}
return Uint(key, *val)
}
// Uint64 constructs a field with the given key and value.
func Uint64(key string, val uint64) Field {
return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)}
}
// Uint64p constructs a field that carries a *uint64. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Uint64p(key string, val *uint64) Field {
if val == nil {
return nilField(key)
}
return Uint64(key, *val)
}
// Uint32 constructs a field with the given key and value.
func Uint32(key string, val uint32) Field {
return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)}
}
// Uint32p constructs a field that carries a *uint32. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Uint32p(key string, val *uint32) Field {
if val == nil {
return nilField(key)
}
return Uint32(key, *val)
}
// Uint16 constructs a field with the given key and value.
func Uint16(key string, val uint16) Field {
return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)}
}
// Uint16p constructs a field that carries a *uint16. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Uint16p(key string, val *uint16) Field {
if val == nil {
return nilField(key)
}
return Uint16(key, *val)
}
// Uint8 constructs a field with the given key and value.
func Uint8(key string, val uint8) Field {
return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)}
}
// Uint8p constructs a field that carries a *uint8. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Uint8p(key string, val *uint8) Field {
if val == nil {
return nilField(key)
}
return Uint8(key, *val)
}
// Uintptr constructs a field with the given key and value.
func Uintptr(key string, val uintptr) Field {
return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)}
}
// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Uintptrp(key string, val *uintptr) Field {
if val == nil {
return nilField(key)
}
return Uintptr(key, *val)
}
// Reflect constructs a field with the given key and an arbitrary object. It uses
// an encoding-appropriate, reflection-based function to lazily serialize nearly
// any object into the logging context, but it's relatively slow and
// allocation-heavy. Outside tests, Any is always a better choice.
//
// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect
// includes the error message in the final log output.
func Reflect(key string, val interface{}) Field {
return Field{Key: key, Type: zapcore.ReflectType, Interface: val}
}
// Namespace creates a named, isolated scope within the logger's context. All
// subsequent fields will be added to the new namespace.
//
// This helps prevent key collisions when injecting loggers into sub-components
// or third-party libraries.
func Namespace(key string) Field {
return Field{Key: key, Type: zapcore.NamespaceType}
}
// Stringer constructs a field with the given key and the output of the value's
// String method. The Stringer's String method is called lazily.
func Stringer(key string, val fmt.Stringer) Field {
return Field{Key: key, Type: zapcore.StringerType, Interface: val}
}
// Time constructs a Field with the given key and value. The encoder
// controls how the time is serialized.
func Time(key string, val time.Time) Field {
if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) {
return Field{Key: key, Type: zapcore.TimeFullType, Interface: val}
}
return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()}
}
// Timep constructs a field that carries a *time.Time. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Timep(key string, val *time.Time) Field {
if val == nil {
return nilField(key)
}
return Time(key, *val)
}
// Stack constructs a field that stores a stacktrace of the current goroutine
// under provided key. Keep in mind that taking a stacktrace is eager and
// expensive (relatively speaking); this function both makes an allocation and
// takes about two microseconds.
func Stack(key string) Field {
return StackSkip(key, 1) // skip Stack
}
// StackSkip constructs a field similarly to Stack, but also skips the given
// number of frames from the top of the stacktrace.
func StackSkip(key string, skip int) Field {
// Returning the stacktrace as a string costs an allocation, but saves us
// from expanding the zapcore.Field union struct to include a byte slice. Since
// taking a stacktrace is already so expensive (~10us), the extra allocation
// is okay.
return String(key, stacktrace.Take(skip+1)) // skip StackSkip
}
// Duration constructs a field with the given key and value. The encoder
// controls how the duration is serialized.
func Duration(key string, val time.Duration) Field {
return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)}
}
// Durationp constructs a field that carries a *time.Duration. The returned Field will safely
// and explicitly represent `nil` when appropriate.
func Durationp(key string, val *time.Duration) Field {
if val == nil {
return nilField(key)
}
return Duration(key, *val)
}
// Object constructs a field with the given key and ObjectMarshaler. It
// provides a flexible, but still type-safe and efficient, way to add map- or
// struct-like user-defined types to the logging context. The struct's
// MarshalLogObject method is called lazily.
func Object(key string, val zapcore.ObjectMarshaler) Field {
return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val}
}
// Inline constructs a Field that is similar to Object, but it
// will add the elements of the provided ObjectMarshaler to the
// current namespace.
func Inline(val zapcore.ObjectMarshaler) Field {
return zapcore.Field{
Type: zapcore.InlineMarshalerType,
Interface: val,
}
}
// Dict constructs a field containing the provided key-value pairs.
// It acts similar to [Object], but with the fields specified as arguments.
func Dict(key string, val ...Field) Field {
return dictField(key, val)
}
// We need a function with the signature (string, T) for zap.Any.
func dictField(key string, val []Field) Field {
return Object(key, dictObject(val))
}
type dictObject []Field
func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error {
for _, f := range d {
f.AddTo(enc)
}
return nil
}
// We discovered an issue where zap.Any can cause a performance degradation
// when used in new goroutines.
//
// This happens because the compiler assigns 4.8kb (one zap.Field per arm of
// switch statement) of stack space for zap.Any when it takes the form:
//
// switch v := v.(type) {
// case string:
// return String(key, v)
// case int:
// return Int(key, v)
// // ...
// default:
// return Reflect(key, v)
// }
//
// To avoid this, we use the type switch to assign a value to a single local variable
// and then call a function on it.
// The local variable is just a function reference so it doesn't allocate
// when converted to an interface{}.
//
// A fair bit of experimentation went into this.
// See also:
//
// - https://github.com/uber-go/zap/pull/1301
// - https://github.com/uber-go/zap/pull/1303
// - https://github.com/uber-go/zap/pull/1304
// - https://github.com/uber-go/zap/pull/1305
// - https://github.com/uber-go/zap/pull/1308
//
// See https://github.com/golang/go/issues/62077 for upstream issue.
type anyFieldC[T any] func(string, T) Field
func (f anyFieldC[T]) Any(key string, val any) Field {
v, _ := val.(T)
// val is guaranteed to be a T, except when it's nil.
return f(key, v)
}
// Any takes a key and an arbitrary value and chooses the best way to represent
// them as a field, falling back to a reflection-based approach only if
// necessary.
//
// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between
// them. To minimize surprises, []byte values are treated as binary blobs, byte
// values are treated as uint8, and runes are always treated as integers.
func Any(key string, value interface{}) Field {
var c interface{ Any(string, any) Field }
switch value.(type) {
case zapcore.ObjectMarshaler:
c = anyFieldC[zapcore.ObjectMarshaler](Object)
case zapcore.ArrayMarshaler:
c = anyFieldC[zapcore.ArrayMarshaler](Array)
case []Field:
c = anyFieldC[[]Field](dictField)
case bool:
c = anyFieldC[bool](Bool)
case *bool:
c = anyFieldC[*bool](Boolp)
case []bool:
c = anyFieldC[[]bool](Bools)
case complex128:
c = anyFieldC[complex128](Complex128)
case *complex128:
c = anyFieldC[*complex128](Complex128p)
case []complex128:
c = anyFieldC[[]complex128](Complex128s)
case complex64:
c = anyFieldC[complex64](Complex64)
case *complex64:
c = anyFieldC[*complex64](Complex64p)
case []complex64:
c = anyFieldC[[]complex64](Complex64s)
case float64:
c = anyFieldC[float64](Float64)
case *float64:
c = anyFieldC[*float64](Float64p)
case []float64:
c = anyFieldC[[]float64](Float64s)
case float32:
c = anyFieldC[float32](Float32)
case *float32:
c = anyFieldC[*float32](Float32p)
case []float32:
c = anyFieldC[[]float32](Float32s)
case int:
c = anyFieldC[int](Int)
case *int:
c = anyFieldC[*int](Intp)
case []int:
c = anyFieldC[[]int](Ints)
case int64:
c = anyFieldC[int64](Int64)
case *int64:
c = anyFieldC[*int64](Int64p)
case []int64:
c = anyFieldC[[]int64](Int64s)
case int32:
c = anyFieldC[int32](Int32)
case *int32:
c = anyFieldC[*int32](Int32p)
case []int32:
c = anyFieldC[[]int32](Int32s)
case int16:
c = anyFieldC[int16](Int16)
case *int16:
c = anyFieldC[*int16](Int16p)
case []int16:
c = anyFieldC[[]int16](Int16s)
case int8:
c = anyFieldC[int8](Int8)
case *int8:
c = anyFieldC[*int8](Int8p)
case []int8:
c = anyFieldC[[]int8](Int8s)
case string:
c = anyFieldC[string](String)
case *string:
c = anyFieldC[*string](Stringp)
case []string:
c = anyFieldC[[]string](Strings)
case uint:
c = anyFieldC[uint](Uint)
case *uint:
c = anyFieldC[*uint](Uintp)
case []uint:
c = anyFieldC[[]uint](Uints)
case uint64:
c = anyFieldC[uint64](Uint64)
case *uint64:
c = anyFieldC[*uint64](Uint64p)
case []uint64:
c = anyFieldC[[]uint64](Uint64s)
case uint32:
c = anyFieldC[uint32](Uint32)
case *uint32:
c = anyFieldC[*uint32](Uint32p)
case []uint32:
c = anyFieldC[[]uint32](Uint32s)
case uint16:
c = anyFieldC[uint16](Uint16)
case *uint16:
c = anyFieldC[*uint16](Uint16p)
case []uint16:
c = anyFieldC[[]uint16](Uint16s)
case uint8:
c = anyFieldC[uint8](Uint8)
case *uint8:
c = anyFieldC[*uint8](Uint8p)
case []byte:
c = anyFieldC[[]byte](Binary)
case uintptr:
c = anyFieldC[uintptr](Uintptr)
case *uintptr:
c = anyFieldC[*uintptr](Uintptrp)
case []uintptr:
c = anyFieldC[[]uintptr](Uintptrs)
case time.Time:
c = anyFieldC[time.Time](Time)
case *time.Time:
c = anyFieldC[*time.Time](Timep)
case []time.Time:
c = anyFieldC[[]time.Time](Times)
case time.Duration:
c = anyFieldC[time.Duration](Duration)
case *time.Duration:
c = anyFieldC[*time.Duration](Durationp)
case []time.Duration:
c = anyFieldC[[]time.Duration](Durations)
case error:
c = anyFieldC[error](NamedError)
case []error:
c = anyFieldC[[]error](Errors)
case fmt.Stringer:
c = anyFieldC[fmt.Stringer](Stringer)
default:
c = anyFieldC[any](Reflect)
}
return c.Any(key, value)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/options.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/options.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"fmt"
"go.uber.org/zap/zapcore"
)
// An Option configures a Logger.
type Option interface {
apply(*Logger)
}
// optionFunc wraps a func so it satisfies the Option interface.
type optionFunc func(*Logger)
func (f optionFunc) apply(log *Logger) {
f(log)
}
// WrapCore wraps or replaces the Logger's underlying zapcore.Core.
func WrapCore(f func(zapcore.Core) zapcore.Core) Option {
return optionFunc(func(log *Logger) {
log.core = f(log.core)
})
}
// Hooks registers functions which will be called each time the Logger writes
// out an Entry. Repeated use of Hooks is additive.
//
// Hooks are useful for simple side effects, like capturing metrics for the
// number of emitted logs. More complex side effects, including anything that
// requires access to the Entry's structured fields, should be implemented as
// a zapcore.Core instead. See zapcore.RegisterHooks for details.
func Hooks(hooks ...func(zapcore.Entry) error) Option {
return optionFunc(func(log *Logger) {
log.core = zapcore.RegisterHooks(log.core, hooks...)
})
}
// Fields adds fields to the Logger.
func Fields(fs ...Field) Option {
return optionFunc(func(log *Logger) {
log.core = log.core.With(fs)
})
}
// ErrorOutput sets the destination for errors generated by the Logger. Note
// that this option only affects internal errors; for sample code that sends
// error-level logs to a different location from info- and debug-level logs,
// see the package-level AdvancedConfiguration example.
//
// The supplied WriteSyncer must be safe for concurrent use. The Open and
// zapcore.Lock functions are the simplest ways to protect files with a mutex.
func ErrorOutput(w zapcore.WriteSyncer) Option {
return optionFunc(func(log *Logger) {
log.errorOutput = w
})
}
// Development puts the logger in development mode, which makes DPanic-level
// logs panic instead of simply logging an error.
func Development() Option {
return optionFunc(func(log *Logger) {
log.development = true
})
}
// AddCaller configures the Logger to annotate each message with the filename,
// line number, and function name of zap's caller. See also WithCaller.
func AddCaller() Option {
return WithCaller(true)
}
// WithCaller configures the Logger to annotate each message with the filename,
// line number, and function name of zap's caller, or not, depending on the
// value of enabled. This is a generalized form of AddCaller.
func WithCaller(enabled bool) Option {
return optionFunc(func(log *Logger) {
log.addCaller = enabled
})
}
// AddCallerSkip increases the number of callers skipped by caller annotation
// (as enabled by the AddCaller option). When building wrappers around the
// Logger and SugaredLogger, supplying this Option prevents zap from always
// reporting the wrapper code as the caller.
func AddCallerSkip(skip int) Option {
return optionFunc(func(log *Logger) {
log.callerSkip += skip
})
}
// AddStacktrace configures the Logger to record a stack trace for all messages at
// or above a given level.
func AddStacktrace(lvl zapcore.LevelEnabler) Option {
return optionFunc(func(log *Logger) {
log.addStack = lvl
})
}
// IncreaseLevel increase the level of the logger. It has no effect if
// the passed in level tries to decrease the level of the logger.
func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
return optionFunc(func(log *Logger) {
core, err := zapcore.NewIncreaseLevelCore(log.core, lvl)
if err != nil {
fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err)
} else {
log.core = core
}
})
}
// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs.
// Zap will call this hook after writing a log statement with a Panic/DPanic level.
//
// For example, the following builds a logger that will exit the current
// goroutine after writing a Panic/DPanic log message, but it will not start a panic.
//
// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit))
//
// This is useful for testing Panic/DPanic log output.
func WithPanicHook(hook zapcore.CheckWriteHook) Option {
return optionFunc(func(log *Logger) {
log.onPanic = hook
})
}
// OnFatal sets the action to take on fatal logs.
//
// Deprecated: Use [WithFatalHook] instead.
func OnFatal(action zapcore.CheckWriteAction) Option {
return WithFatalHook(action)
}
// WithFatalHook sets a CheckWriteHook to run on fatal logs.
// Zap will call this hook after writing a log statement with a Fatal level.
//
// For example, the following builds a logger that will exit the current
// goroutine after writing a fatal log message, but it will not exit the
// program.
//
// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit))
//
// It is important that the provided CheckWriteHook stops the control flow at
// the current statement to meet expectations of callers of the logger.
// We recommend calling os.Exit or runtime.Goexit inside custom hooks at
// minimum.
func WithFatalHook(hook zapcore.CheckWriteHook) Option {
return optionFunc(func(log *Logger) {
log.onFatal = hook
})
}
// WithClock specifies the clock used by the logger to determine the current
// time for logged entries. Defaults to the system clock with time.Now.
func WithClock(clock zapcore.Clock) Option {
return optionFunc(func(log *Logger) {
log.clock = clock
})
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/sugar.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/sugar.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"fmt"
"go.uber.org/zap/zapcore"
"go.uber.org/multierr"
)
const (
_oddNumberErrMsg = "Ignored key without a value."
_nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
_multipleErrMsg = "Multiple errors without a key."
)
// A SugaredLogger wraps the base Logger functionality in a slower, but less
// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar
// method.
//
// Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
// For each log level, it exposes four methods:
//
// - methods named after the log level for log.Print-style logging
// - methods ending in "w" for loosely-typed structured logging
// - methods ending in "f" for log.Printf-style logging
// - methods ending in "ln" for log.Println-style logging
//
// For example, the methods for InfoLevel are:
//
// Info(...any) Print-style logging
// Infow(...any) Structured logging (read as "info with")
// Infof(string, ...any) Printf-style logging
// Infoln(...any) Println-style logging
type SugaredLogger struct {
base *Logger
}
// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring
// is quite inexpensive, so it's reasonable for a single application to use
// both Loggers and SugaredLoggers, converting between them on the boundaries
// of performance-sensitive code.
func (s *SugaredLogger) Desugar() *Logger {
base := s.base.clone()
base.callerSkip -= 2
return base
}
// Named adds a sub-scope to the logger's name. See Logger.Named for details.
func (s *SugaredLogger) Named(name string) *SugaredLogger {
return &SugaredLogger{base: s.base.Named(name)}
}
// WithOptions clones the current SugaredLogger, applies the supplied Options,
// and returns the result. It's safe to use concurrently.
func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger {
base := s.base.clone()
for _, opt := range opts {
opt.apply(base)
}
return &SugaredLogger{base: base}
}
// With adds a variadic number of fields to the logging context. It accepts a
// mix of strongly-typed Field objects and loosely-typed key-value pairs. When
// processing pairs, the first element of the pair is used as the field key
// and the second as the field value.
//
// For example,
//
// sugaredLogger.With(
// "hello", "world",
// "failure", errors.New("oh no"),
// Stack(),
// "count", 42,
// "user", User{Name: "alice"},
// )
//
// is the equivalent of
//
// unsugared.With(
// String("hello", "world"),
// String("failure", "oh no"),
// Stack(),
// Int("count", 42),
// Object("user", User{Name: "alice"}),
// )
//
// Note that the keys in key-value pairs should be strings. In development,
// passing a non-string key panics. In production, the logger is more
// forgiving: a separate error is logged, but the key-value pair is skipped
// and execution continues. Passing an orphaned key triggers similar behavior:
// panics in development and errors in production.
func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
}
// WithLazy adds a variadic number of fields to the logging context lazily.
// The fields are evaluated only if the logger is further chained with [With]
// or is written to with any of the log level methods.
// Until that occurs, the logger may retain references to objects inside the fields,
// and logging will reflect the state of an object at the time of logging,
// not the time of WithLazy().
//
// Similar to [With], fields added to the child don't affect the parent,
// and vice versa. Also, the keys in key-value pairs should be strings. In development,
// passing a non-string key panics, while in production it logs an error and skips the pair.
// Passing an orphaned key has the same behavior.
func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger {
return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)}
}
// Level reports the minimum enabled level for this logger.
//
// For NopLoggers, this is [zapcore.InvalidLevel].
func (s *SugaredLogger) Level() zapcore.Level {
return zapcore.LevelOf(s.base.core)
}
// Log logs the provided arguments at provided level.
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) {
s.log(lvl, "", args, nil)
}
// Debug logs the provided arguments at [DebugLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Debug(args ...interface{}) {
s.log(DebugLevel, "", args, nil)
}
// Info logs the provided arguments at [InfoLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Info(args ...interface{}) {
s.log(InfoLevel, "", args, nil)
}
// Warn logs the provided arguments at [WarnLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Warn(args ...interface{}) {
s.log(WarnLevel, "", args, nil)
}
// Error logs the provided arguments at [ErrorLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Error(args ...interface{}) {
s.log(ErrorLevel, "", args, nil)
}
// DPanic logs the provided arguments at [DPanicLevel].
// In development, the logger then panics. (See [DPanicLevel] for details.)
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) DPanic(args ...interface{}) {
s.log(DPanicLevel, "", args, nil)
}
// Panic constructs a message with the provided arguments and panics.
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Panic(args ...interface{}) {
s.log(PanicLevel, "", args, nil)
}
// Fatal constructs a message with the provided arguments and calls os.Exit.
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Fatal(args ...interface{}) {
s.log(FatalLevel, "", args, nil)
}
// Logf formats the message according to the format specifier
// and logs it at provided level.
func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) {
s.log(lvl, template, args, nil)
}
// Debugf formats the message according to the format specifier
// and logs it at [DebugLevel].
func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
s.log(DebugLevel, template, args, nil)
}
// Infof formats the message according to the format specifier
// and logs it at [InfoLevel].
func (s *SugaredLogger) Infof(template string, args ...interface{}) {
s.log(InfoLevel, template, args, nil)
}
// Warnf formats the message according to the format specifier
// and logs it at [WarnLevel].
func (s *SugaredLogger) Warnf(template string, args ...interface{}) {
s.log(WarnLevel, template, args, nil)
}
// Errorf formats the message according to the format specifier
// and logs it at [ErrorLevel].
func (s *SugaredLogger) Errorf(template string, args ...interface{}) {
s.log(ErrorLevel, template, args, nil)
}
// DPanicf formats the message according to the format specifier
// and logs it at [DPanicLevel].
// In development, the logger then panics. (See [DPanicLevel] for details.)
func (s *SugaredLogger) DPanicf(template string, args ...interface{}) {
s.log(DPanicLevel, template, args, nil)
}
// Panicf formats the message according to the format specifier
// and panics.
func (s *SugaredLogger) Panicf(template string, args ...interface{}) {
s.log(PanicLevel, template, args, nil)
}
// Fatalf formats the message according to the format specifier
// and calls os.Exit.
func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
s.log(FatalLevel, template, args, nil)
}
// Logw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) {
s.log(lvl, msg, nil, keysAndValues)
}
// Debugw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
//
// When debug-level logging is disabled, this is much faster than
//
// s.With(keysAndValues).Debug(msg)
func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
s.log(DebugLevel, msg, nil, keysAndValues)
}
// Infow logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) {
s.log(InfoLevel, msg, nil, keysAndValues)
}
// Warnw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) {
s.log(WarnLevel, msg, nil, keysAndValues)
}
// Errorw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) {
s.log(ErrorLevel, msg, nil, keysAndValues)
}
// DPanicw logs a message with some additional context. In development, the
// logger then panics. (See DPanicLevel for details.) The variadic key-value
// pairs are treated as they are in With.
func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) {
s.log(DPanicLevel, msg, nil, keysAndValues)
}
// Panicw logs a message with some additional context, then panics. The
// variadic key-value pairs are treated as they are in With.
func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) {
s.log(PanicLevel, msg, nil, keysAndValues)
}
// Fatalw logs a message with some additional context, then calls os.Exit. The
// variadic key-value pairs are treated as they are in With.
func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
s.log(FatalLevel, msg, nil, keysAndValues)
}
// Logln logs a message at provided level.
// Spaces are always added between arguments.
func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) {
s.logln(lvl, args, nil)
}
// Debugln logs a message at [DebugLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Debugln(args ...interface{}) {
s.logln(DebugLevel, args, nil)
}
// Infoln logs a message at [InfoLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Infoln(args ...interface{}) {
s.logln(InfoLevel, args, nil)
}
// Warnln logs a message at [WarnLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Warnln(args ...interface{}) {
s.logln(WarnLevel, args, nil)
}
// Errorln logs a message at [ErrorLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Errorln(args ...interface{}) {
s.logln(ErrorLevel, args, nil)
}
// DPanicln logs a message at [DPanicLevel].
// In development, the logger then panics. (See [DPanicLevel] for details.)
// Spaces are always added between arguments.
func (s *SugaredLogger) DPanicln(args ...interface{}) {
s.logln(DPanicLevel, args, nil)
}
// Panicln logs a message at [PanicLevel] and panics.
// Spaces are always added between arguments.
func (s *SugaredLogger) Panicln(args ...interface{}) {
s.logln(PanicLevel, args, nil)
}
// Fatalln logs a message at [FatalLevel] and calls os.Exit.
// Spaces are always added between arguments.
func (s *SugaredLogger) Fatalln(args ...interface{}) {
s.logln(FatalLevel, args, nil)
}
// Sync flushes any buffered log entries.
func (s *SugaredLogger) Sync() error {
return s.base.Sync()
}
// log message with Sprint, Sprintf, or neither.
func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
// If logging at this level is completely disabled, skip the overhead of
// string formatting.
if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
return
}
msg := getMessage(template, fmtArgs)
if ce := s.base.Check(lvl, msg); ce != nil {
ce.Write(s.sweetenFields(context)...)
}
}
// logln message with Sprintln
func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) {
if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
return
}
msg := getMessageln(fmtArgs)
if ce := s.base.Check(lvl, msg); ce != nil {
ce.Write(s.sweetenFields(context)...)
}
}
// getMessage format with Sprint, Sprintf, or neither.
func getMessage(template string, fmtArgs []interface{}) string {
if len(fmtArgs) == 0 {
return template
}
if template != "" {
return fmt.Sprintf(template, fmtArgs...)
}
if len(fmtArgs) == 1 {
if str, ok := fmtArgs[0].(string); ok {
return str
}
}
return fmt.Sprint(fmtArgs...)
}
// getMessageln format with Sprintln.
func getMessageln(fmtArgs []interface{}) string {
msg := fmt.Sprintln(fmtArgs...)
return msg[:len(msg)-1]
}
func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
if len(args) == 0 {
return nil
}
var (
// Allocate enough space for the worst case; if users pass only structured
// fields, we shouldn't penalize them with extra allocations.
fields = make([]Field, 0, len(args))
invalid invalidPairs
seenError bool
)
for i := 0; i < len(args); {
// This is a strongly-typed field. Consume it and move on.
if f, ok := args[i].(Field); ok {
fields = append(fields, f)
i++
continue
}
// If it is an error, consume it and move on.
if err, ok := args[i].(error); ok {
if !seenError {
seenError = true
fields = append(fields, Error(err))
} else {
s.base.Error(_multipleErrMsg, Error(err))
}
i++
continue
}
// Make sure this element isn't a dangling key.
if i == len(args)-1 {
s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))
break
}
// Consume this value and the next, treating them as a key-value pair. If the
// key isn't a string, add this pair to the slice of invalid pairs.
key, val := args[i], args[i+1]
if keyStr, ok := key.(string); !ok {
// Subsequent errors are likely, so allocate once up front.
if cap(invalid) == 0 {
invalid = make(invalidPairs, 0, len(args)/2)
}
invalid = append(invalid, invalidPair{i, key, val})
} else {
fields = append(fields, Any(keyStr, val))
}
i += 2
}
// If we encountered any invalid key-value pairs, log an error.
if len(invalid) > 0 {
s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid))
}
return fields
}
type invalidPair struct {
position int
key, value interface{}
}
func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddInt64("position", int64(p.position))
Any("key", p.key).AddTo(enc)
Any("value", p.value).AddTo(enc)
return nil
}
type invalidPairs []invalidPair
func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error {
var err error
for i := range ps {
err = multierr.Append(err, enc.AppendObject(ps[i]))
}
return err
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/doc.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/doc.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package zap provides fast, structured, leveled logging.
//
// For applications that log in the hot path, reflection-based serialization
// and string formatting are prohibitively expensive - they're CPU-intensive
// and make many small allocations. Put differently, using json.Marshal and
// fmt.Fprintf to log tons of interface{} makes your application slow.
//
// Zap takes a different approach. It includes a reflection-free,
// zero-allocation JSON encoder, and the base Logger strives to avoid
// serialization overhead and allocations wherever possible. By building the
// high-level SugaredLogger on that foundation, zap lets users choose when
// they need to count every allocation and when they'd prefer a more familiar,
// loosely typed API.
//
// # Choosing a Logger
//
// In contexts where performance is nice, but not critical, use the
// SugaredLogger. It's 4-10x faster than other structured logging packages and
// supports both structured and printf-style logging. Like log15 and go-kit,
// the SugaredLogger's structured logging APIs are loosely typed and accept a
// variadic number of key-value pairs. (For more advanced use cases, they also
// accept strongly typed fields - see the SugaredLogger.With documentation for
// details.)
//
// sugar := zap.NewExample().Sugar()
// defer sugar.Sync()
// sugar.Infow("failed to fetch URL",
// "url", "http://example.com",
// "attempt", 3,
// "backoff", time.Second,
// )
// sugar.Infof("failed to fetch URL: %s", "http://example.com")
//
// By default, loggers are unbuffered. However, since zap's low-level APIs
// allow buffering, calling Sync before letting your process exit is a good
// habit.
//
// In the rare contexts where every microsecond and every allocation matter,
// use the Logger. It's even faster than the SugaredLogger and allocates far
// less, but it only supports strongly-typed, structured logging.
//
// logger := zap.NewExample()
// defer logger.Sync()
// logger.Info("failed to fetch URL",
// zap.String("url", "http://example.com"),
// zap.Int("attempt", 3),
// zap.Duration("backoff", time.Second),
// )
//
// Choosing between the Logger and SugaredLogger doesn't need to be an
// application-wide decision: converting between the two is simple and
// inexpensive.
//
// logger := zap.NewExample()
// defer logger.Sync()
// sugar := logger.Sugar()
// plain := sugar.Desugar()
//
// # Configuring Zap
//
// The simplest way to build a Logger is to use zap's opinionated presets:
// NewExample, NewProduction, and NewDevelopment. These presets build a logger
// with a single function call:
//
// logger, err := zap.NewProduction()
// if err != nil {
// log.Fatalf("can't initialize zap logger: %v", err)
// }
// defer logger.Sync()
//
// Presets are fine for small projects, but larger projects and organizations
// naturally require a bit more customization. For most users, zap's Config
// struct strikes the right balance between flexibility and convenience. See
// the package-level BasicConfiguration example for sample code.
//
// More unusual configurations (splitting output between files, sending logs
// to a message queue, etc.) are possible, but require direct use of
// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
// example for sample code.
//
// # Extending Zap
//
// The zap package itself is a relatively thin wrapper around the interfaces
// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an
// exception aggregation service, like Sentry or Rollbar) typically requires
// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core
// interfaces. See the zapcore documentation for details.
//
// Similarly, package authors can use the high-performance Encoder and Core
// implementations in the zapcore package to build their own loggers.
//
// # Frequently Asked Questions
//
// An FAQ covering everything from installation errors to design decisions is
// available at https://github.com/uber-go/zap/blob/master/FAQ.md.
package zap // import "go.uber.org/zap"
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/logger.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/logger.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"fmt"
"io"
"os"
"strings"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore"
)
// A Logger provides fast, leveled, structured logging. All methods are safe
// for concurrent use.
//
// The Logger is designed for contexts in which every microsecond and every
// allocation matters, so its API intentionally favors performance and type
// safety over brevity. For most applications, the SugaredLogger strikes a
// better balance between performance and ergonomics.
type Logger struct {
core zapcore.Core
development bool
addCaller bool
onPanic zapcore.CheckWriteHook // default is WriteThenPanic
onFatal zapcore.CheckWriteHook // default is WriteThenFatal
name string
errorOutput zapcore.WriteSyncer
addStack zapcore.LevelEnabler
callerSkip int
clock zapcore.Clock
}
// New constructs a new Logger from the provided zapcore.Core and Options. If
// the passed zapcore.Core is nil, it falls back to using a no-op
// implementation.
//
// This is the most flexible way to construct a Logger, but also the most
// verbose. For typical use cases, the highly-opinionated presets
// (NewProduction, NewDevelopment, and NewExample) or the Config struct are
// more convenient.
//
// For sample code, see the package-level AdvancedConfiguration example.
func New(core zapcore.Core, options ...Option) *Logger {
if core == nil {
return NewNop()
}
log := &Logger{
core: core,
errorOutput: zapcore.Lock(os.Stderr),
addStack: zapcore.FatalLevel + 1,
clock: zapcore.DefaultClock,
}
return log.WithOptions(options...)
}
// NewNop returns a no-op Logger. It never writes out logs or internal errors,
// and it never runs user-defined hooks.
//
// Using WithOptions to replace the Core or error output of a no-op Logger can
// re-enable logging.
func NewNop() *Logger {
return &Logger{
core: zapcore.NewNopCore(),
errorOutput: zapcore.AddSync(io.Discard),
addStack: zapcore.FatalLevel + 1,
clock: zapcore.DefaultClock,
}
}
// NewProduction builds a sensible production Logger that writes InfoLevel and
// above logs to standard error as JSON.
//
// It's a shortcut for NewProductionConfig().Build(...Option).
func NewProduction(options ...Option) (*Logger, error) {
return NewProductionConfig().Build(options...)
}
// NewDevelopment builds a development Logger that writes DebugLevel and above
// logs to standard error in a human-friendly format.
//
// It's a shortcut for NewDevelopmentConfig().Build(...Option).
func NewDevelopment(options ...Option) (*Logger, error) {
return NewDevelopmentConfig().Build(options...)
}
// Must is a helper that wraps a call to a function returning (*Logger, error)
// and panics if the error is non-nil. It is intended for use in variable
// initialization such as:
//
// var logger = zap.Must(zap.NewProduction())
func Must(logger *Logger, err error) *Logger {
if err != nil {
panic(err)
}
return logger
}
// NewExample builds a Logger that's designed for use in zap's testable
// examples. It writes DebugLevel and above logs to standard out as JSON, but
// omits the timestamp and calling function to keep example output
// short and deterministic.
func NewExample(options ...Option) *Logger {
encoderCfg := zapcore.EncoderConfig{
MessageKey: "msg",
LevelKey: "level",
NameKey: "logger",
EncodeLevel: zapcore.LowercaseLevelEncoder,
EncodeTime: zapcore.ISO8601TimeEncoder,
EncodeDuration: zapcore.StringDurationEncoder,
}
core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel)
return New(core).WithOptions(options...)
}
// Sugar wraps the Logger to provide a more ergonomic, but slightly slower,
// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a
// single application to use both Loggers and SugaredLoggers, converting
// between them on the boundaries of performance-sensitive code.
func (log *Logger) Sugar() *SugaredLogger {
core := log.clone()
core.callerSkip += 2
return &SugaredLogger{core}
}
// Named adds a new path segment to the logger's name. Segments are joined by
// periods. By default, Loggers are unnamed.
func (log *Logger) Named(s string) *Logger {
if s == "" {
return log
}
l := log.clone()
if log.name == "" {
l.name = s
} else {
l.name = strings.Join([]string{l.name, s}, ".")
}
return l
}
// WithOptions clones the current Logger, applies the supplied Options, and
// returns the resulting Logger. It's safe to use concurrently.
func (log *Logger) WithOptions(opts ...Option) *Logger {
c := log.clone()
for _, opt := range opts {
opt.apply(c)
}
return c
}
// With creates a child logger and adds structured context to it. Fields added
// to the child don't affect the parent, and vice versa. Any fields that
// require evaluation (such as Objects) are evaluated upon invocation of With.
func (log *Logger) With(fields ...Field) *Logger {
if len(fields) == 0 {
return log
}
l := log.clone()
l.core = l.core.With(fields)
return l
}
// WithLazy creates a child logger and adds structured context to it lazily.
//
// The fields are evaluated only if the logger is further chained with [With]
// or is written to with any of the log level methods.
// Until that occurs, the logger may retain references to objects inside the fields,
// and logging will reflect the state of an object at the time of logging,
// not the time of WithLazy().
//
// WithLazy provides a worthwhile performance optimization for contextual loggers
// when the likelihood of using the child logger is low,
// such as error paths and rarely taken branches.
//
// Similar to [With], fields added to the child don't affect the parent, and vice versa.
func (log *Logger) WithLazy(fields ...Field) *Logger {
if len(fields) == 0 {
return log
}
return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core {
return zapcore.NewLazyWith(core, fields)
}))
}
// Level reports the minimum enabled level for this logger.
//
// For NopLoggers, this is [zapcore.InvalidLevel].
func (log *Logger) Level() zapcore.Level {
return zapcore.LevelOf(log.core)
}
// Check returns a CheckedEntry if logging a message at the specified level
// is enabled. It's a completely optional optimization; in high-performance
// applications, Check can help avoid allocating a slice to hold fields.
func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
return log.check(lvl, msg)
}
// Log logs a message at the specified level. The message includes any fields
// passed at the log site, as well as any fields accumulated on the logger.
// Any Fields that require evaluation (such as Objects) are evaluated upon
// invocation of Log.
func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) {
if ce := log.check(lvl, msg); ce != nil {
ce.Write(fields...)
}
}
// Debug logs a message at DebugLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func (log *Logger) Debug(msg string, fields ...Field) {
if ce := log.check(DebugLevel, msg); ce != nil {
ce.Write(fields...)
}
}
// Info logs a message at InfoLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func (log *Logger) Info(msg string, fields ...Field) {
if ce := log.check(InfoLevel, msg); ce != nil {
ce.Write(fields...)
}
}
// Warn logs a message at WarnLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func (log *Logger) Warn(msg string, fields ...Field) {
if ce := log.check(WarnLevel, msg); ce != nil {
ce.Write(fields...)
}
}
// Error logs a message at ErrorLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func (log *Logger) Error(msg string, fields ...Field) {
if ce := log.check(ErrorLevel, msg); ce != nil {
ce.Write(fields...)
}
}
// DPanic logs a message at DPanicLevel. The message includes any fields
// passed at the log site, as well as any fields accumulated on the logger.
//
// If the logger is in development mode, it then panics (DPanic means
// "development panic"). This is useful for catching errors that are
// recoverable, but shouldn't ever happen.
func (log *Logger) DPanic(msg string, fields ...Field) {
if ce := log.check(DPanicLevel, msg); ce != nil {
ce.Write(fields...)
}
}
// Panic logs a message at PanicLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
//
// The logger then panics, even if logging at PanicLevel is disabled.
func (log *Logger) Panic(msg string, fields ...Field) {
if ce := log.check(PanicLevel, msg); ce != nil {
ce.Write(fields...)
}
}
// Fatal logs a message at FatalLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
//
// The logger then calls os.Exit(1), even if logging at FatalLevel is
// disabled.
func (log *Logger) Fatal(msg string, fields ...Field) {
if ce := log.check(FatalLevel, msg); ce != nil {
ce.Write(fields...)
}
}
// Sync calls the underlying Core's Sync method, flushing any buffered log
// entries. Applications should take care to call Sync before exiting.
func (log *Logger) Sync() error {
return log.core.Sync()
}
// Core returns the Logger's underlying zapcore.Core.
func (log *Logger) Core() zapcore.Core {
return log.core
}
// Name returns the Logger's underlying name,
// or an empty string if the logger is unnamed.
func (log *Logger) Name() string {
return log.name
}
func (log *Logger) clone() *Logger {
clone := *log
return &clone
}
func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Logger.check must always be called directly by a method in the
// Logger interface (e.g., Check, Info, Fatal).
// This skips Logger.check and the Info/Fatal/Check/etc. method that
// called it.
const callerSkipOffset = 2
// Check the level first to reduce the cost of disabled log calls.
// Since Panic and higher may exit, we skip the optimization for those levels.
if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) {
return nil
}
// Create basic checked entry thru the core; this will be non-nil if the
// log message will actually be written somewhere.
ent := zapcore.Entry{
LoggerName: log.name,
Time: log.clock.Now(),
Level: lvl,
Message: msg,
}
ce := log.core.Check(ent, nil)
willWrite := ce != nil
// Set up any required terminal behavior.
switch ent.Level {
case zapcore.PanicLevel:
ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
case zapcore.FatalLevel:
ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal))
case zapcore.DPanicLevel:
if log.development {
ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
}
}
// Only do further annotation if we're going to write this message; checked
// entries that exist only for terminal behavior don't benefit from
// annotation.
if !willWrite {
return ce
}
// Thread the error output through to the CheckedEntry.
ce.ErrorOutput = log.errorOutput
addStack := log.addStack.Enabled(ce.Level)
if !log.addCaller && !addStack {
return ce
}
// Adding the caller or stack trace requires capturing the callers of
// this function. We'll share information between these two.
stackDepth := stacktrace.First
if addStack {
stackDepth = stacktrace.Full
}
stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth)
defer stack.Free()
if stack.Count() == 0 {
if log.addCaller {
fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
_ = log.errorOutput.Sync()
}
return ce
}
frame, more := stack.Next()
if log.addCaller {
ce.Caller = zapcore.EntryCaller{
Defined: frame.PC != 0,
PC: frame.PC,
File: frame.File,
Line: frame.Line,
Function: frame.Function,
}
}
if addStack {
buffer := bufferpool.Get()
defer buffer.Free()
stackfmt := stacktrace.NewFormatter(buffer)
// We've already extracted the first frame, so format that
// separately and defer to stackfmt for the rest.
stackfmt.FormatFrame(frame)
if more {
stackfmt.FormatStack(stack)
}
ce.Stack = buffer.String()
}
return ce
}
func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook {
// A nil or WriteThenNoop hook will lead to continued execution after
// a Panic or Fatal log entry, which is unexpected. For example,
//
// f, err := os.Open(..)
// if err != nil {
// log.Fatal("cannot open", zap.Error(err))
// }
// fmt.Println(f.Name())
//
// The f.Name() will panic if we continue execution after the log.Fatal.
if override == nil || override == zapcore.WriteThenNoop {
return defaultHook
}
return override
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/http_handler.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/http_handler.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"go.uber.org/zap/zapcore"
)
// ServeHTTP is a simple JSON endpoint that can report on or change the current
// logging level.
//
// # GET
//
// The GET request returns a JSON description of the current logging level like:
//
// {"level":"info"}
//
// # PUT
//
// The PUT request changes the logging level. It is perfectly safe to change the
// logging level while a program is running. Two content types are supported:
//
// Content-Type: application/x-www-form-urlencoded
//
// With this content type, the level can be provided through the request body or
// a query parameter. The log level is URL encoded like:
//
// level=debug
//
// The request body takes precedence over the query parameter, if both are
// specified.
//
// This content type is the default for a curl PUT request. Following are two
// example curl requests that both set the logging level to debug.
//
// curl -X PUT localhost:8080/log/level?level=debug
// curl -X PUT localhost:8080/log/level -d level=debug
//
// For any other content type, the payload is expected to be JSON encoded and
// look like:
//
// {"level":"info"}
//
// An example curl request could look like this:
//
// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if err := lvl.serveHTTP(w, r); err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "internal error: %v", err)
}
}
func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error {
type errorResponse struct {
Error string `json:"error"`
}
type payload struct {
Level zapcore.Level `json:"level"`
}
enc := json.NewEncoder(w)
switch r.Method {
case http.MethodGet:
return enc.Encode(payload{Level: lvl.Level()})
case http.MethodPut:
requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return enc.Encode(errorResponse{Error: err.Error()})
}
lvl.SetLevel(requestedLvl)
return enc.Encode(payload{Level: lvl.Level()})
default:
w.WriteHeader(http.StatusMethodNotAllowed)
return enc.Encode(errorResponse{
Error: "Only GET and PUT are supported.",
})
}
}
// Decodes incoming PUT requests and returns the requested logging level.
func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) {
if contentType == "application/x-www-form-urlencoded" {
return decodePutURL(r)
}
return decodePutJSON(r.Body)
}
func decodePutURL(r *http.Request) (zapcore.Level, error) {
lvl := r.FormValue("level")
if lvl == "" {
return 0, errors.New("must specify logging level")
}
var l zapcore.Level
if err := l.UnmarshalText([]byte(lvl)); err != nil {
return 0, err
}
return l, nil
}
func decodePutJSON(body io.Reader) (zapcore.Level, error) {
var pld struct {
Level *zapcore.Level `json:"level"`
}
if err := json.NewDecoder(body).Decode(&pld); err != nil {
return 0, fmt.Errorf("malformed request body: %v", err)
}
if pld.Level == nil {
return 0, errors.New("must specify logging level")
}
return *pld.Level, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/global.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/global.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"bytes"
"fmt"
"log"
"os"
"sync"
"go.uber.org/zap/zapcore"
)
const (
_stdLogDefaultDepth = 1
_loggerWriterDepth = 2
_programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
"https://github.com/uber-go/zap/issues/new and reference this error: %v"
)
var (
_globalMu sync.RWMutex
_globalL = NewNop()
_globalS = _globalL.Sugar()
)
// L returns the global Logger, which can be reconfigured with ReplaceGlobals.
// It's safe for concurrent use.
func L() *Logger {
_globalMu.RLock()
l := _globalL
_globalMu.RUnlock()
return l
}
// S returns the global SugaredLogger, which can be reconfigured with
// ReplaceGlobals. It's safe for concurrent use.
func S() *SugaredLogger {
_globalMu.RLock()
s := _globalS
_globalMu.RUnlock()
return s
}
// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a
// function to restore the original values. It's safe for concurrent use.
func ReplaceGlobals(logger *Logger) func() {
_globalMu.Lock()
prev := _globalL
_globalL = logger
_globalS = logger.Sugar()
_globalMu.Unlock()
return func() { ReplaceGlobals(prev) }
}
// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at
// InfoLevel. To redirect the standard library's package-global logging
// functions, use RedirectStdLog instead.
func NewStdLog(l *Logger) *log.Logger {
logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
f := logger.Info
return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */)
}
// NewStdLogAt returns *log.Logger which writes to supplied zap logger at
// required level.
func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) {
logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
logFunc, err := levelToFunc(logger, level)
if err != nil {
return nil, err
}
return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil
}
// RedirectStdLog redirects output from the standard library's package-global
// logger to the supplied logger at InfoLevel. Since zap already handles caller
// annotations, timestamps, etc., it automatically disables the standard
// library's annotations and prefixing.
//
// It returns a function to restore the original prefix and flags and reset the
// standard library's output to os.Stderr.
func RedirectStdLog(l *Logger) func() {
f, err := redirectStdLogAt(l, InfoLevel)
if err != nil {
// Can't get here, since passing InfoLevel to redirectStdLogAt always
// works.
panic(fmt.Sprintf(_programmerErrorTemplate, err))
}
return f
}
// RedirectStdLogAt redirects output from the standard library's package-global
// logger to the supplied logger at the specified level. Since zap already
// handles caller annotations, timestamps, etc., it automatically disables the
// standard library's annotations and prefixing.
//
// It returns a function to restore the original prefix and flags and reset the
// standard library's output to os.Stderr.
func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) {
return redirectStdLogAt(l, level)
}
func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) {
flags := log.Flags()
prefix := log.Prefix()
log.SetFlags(0)
log.SetPrefix("")
logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
logFunc, err := levelToFunc(logger, level)
if err != nil {
return nil, err
}
log.SetOutput(&loggerWriter{logFunc})
return func() {
log.SetFlags(flags)
log.SetPrefix(prefix)
log.SetOutput(os.Stderr)
}, nil
}
func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) {
switch lvl {
case DebugLevel:
return logger.Debug, nil
case InfoLevel:
return logger.Info, nil
case WarnLevel:
return logger.Warn, nil
case ErrorLevel:
return logger.Error, nil
case DPanicLevel:
return logger.DPanic, nil
case PanicLevel:
return logger.Panic, nil
case FatalLevel:
return logger.Fatal, nil
}
return nil, fmt.Errorf("unrecognized level: %q", lvl)
}
type loggerWriter struct {
logFunc func(msg string, fields ...Field)
}
func (l *loggerWriter) Write(p []byte) (int, error) {
p = bytes.TrimSpace(p)
l.logFunc(string(p))
return len(p), nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/encoder.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/encoder.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"errors"
"fmt"
"sync"
"go.uber.org/zap/zapcore"
)
var (
errNoEncoderNameSpecified = errors.New("no encoder name specified")
_encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){
"console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
return zapcore.NewConsoleEncoder(encoderConfig), nil
},
"json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
return zapcore.NewJSONEncoder(encoderConfig), nil
},
}
_encoderMutex sync.RWMutex
)
// RegisterEncoder registers an encoder constructor, which the Config struct
// can then reference. By default, the "json" and "console" encoders are
// registered.
//
// Attempting to register an encoder whose name is already taken returns an
// error.
func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error {
_encoderMutex.Lock()
defer _encoderMutex.Unlock()
if name == "" {
return errNoEncoderNameSpecified
}
if _, ok := _encoderNameToConstructor[name]; ok {
return fmt.Errorf("encoder already registered for name %q", name)
}
_encoderNameToConstructor[name] = constructor
return nil
}
func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil {
return nil, errors.New("missing EncodeTime in EncoderConfig")
}
_encoderMutex.RLock()
defer _encoderMutex.RUnlock()
if name == "" {
return nil, errNoEncoderNameSpecified
}
constructor, ok := _encoderNameToConstructor[name]
if !ok {
return nil, fmt.Errorf("no encoder registered for name %q", name)
}
return constructor(encoderConfig)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/zapcore/hook.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/zapcore/hook.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zapcore
import "go.uber.org/multierr"
type hooked struct {
Core
funcs []func(Entry) error
}
var (
_ Core = (*hooked)(nil)
_ leveledEnabler = (*hooked)(nil)
)
// RegisterHooks wraps a Core and runs a collection of user-defined callback
// hooks each time a message is logged. Execution of the callbacks is blocking.
//
// This offers users an easy way to register simple callbacks (e.g., metrics
// collection) without implementing the full Core interface.
func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
funcs := append([]func(Entry) error{}, hooks...)
return &hooked{
Core: core,
funcs: funcs,
}
}
func (h *hooked) Level() Level {
return LevelOf(h.Core)
}
func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
// Let the wrapped Core decide whether to log this message or not. This
// also gives the downstream a chance to register itself directly with the
// CheckedEntry.
if downstream := h.Core.Check(ent, ce); downstream != nil {
return downstream.AddCore(ent, h)
}
return ce
}
func (h *hooked) With(fields []Field) Core {
return &hooked{
Core: h.Core.With(fields),
funcs: h.funcs,
}
}
func (h *hooked) Write(ent Entry, _ []Field) error {
// Since our downstream had a chance to register itself directly with the
// CheckedMessage, we don't need to call it here.
var err error
for i := range h.funcs {
err = multierr.Append(err, h.funcs[i](ent))
}
return err
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/zapcore/sampler.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/zapcore/sampler.go | // Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zapcore
import (
"sync/atomic"
"time"
)
const (
_numLevels = _maxLevel - _minLevel + 1
_countersPerLevel = 4096
)
type counter struct {
resetAt atomic.Int64
counter atomic.Uint64
}
type counters [_numLevels][_countersPerLevel]counter
func newCounters() *counters {
return &counters{}
}
func (cs *counters) get(lvl Level, key string) *counter {
i := lvl - _minLevel
j := fnv32a(key) % _countersPerLevel
return &cs[i][j]
}
// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc
func fnv32a(s string) uint32 {
const (
offset32 = 2166136261
prime32 = 16777619
)
hash := uint32(offset32)
for i := 0; i < len(s); i++ {
hash ^= uint32(s[i])
hash *= prime32
}
return hash
}
func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 {
tn := t.UnixNano()
resetAfter := c.resetAt.Load()
if resetAfter > tn {
return c.counter.Add(1)
}
c.counter.Store(1)
newResetAfter := tn + tick.Nanoseconds()
if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) {
// We raced with another goroutine trying to reset, and it also reset
// the counter to 1, so we need to reincrement the counter.
return c.counter.Add(1)
}
return 1
}
// SamplingDecision is a decision represented as a bit field made by sampler.
// More decisions may be added in the future.
type SamplingDecision uint32
const (
// LogDropped indicates that the Sampler dropped a log entry.
LogDropped SamplingDecision = 1 << iota
// LogSampled indicates that the Sampler sampled a log entry.
LogSampled
)
// optionFunc wraps a func so it satisfies the SamplerOption interface.
type optionFunc func(*sampler)
func (f optionFunc) apply(s *sampler) {
f(s)
}
// SamplerOption configures a Sampler.
type SamplerOption interface {
apply(*sampler)
}
// nopSamplingHook is the default hook used by sampler.
func nopSamplingHook(Entry, SamplingDecision) {}
// SamplerHook registers a function which will be called when Sampler makes a
// decision.
//
// This hook may be used to get visibility into the performance of the sampler.
// For example, use it to track metrics of dropped versus sampled logs.
//
// var dropped atomic.Int64
// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
// if dec&zapcore.LogDropped > 0 {
// dropped.Inc()
// }
// })
func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
return optionFunc(func(s *sampler) {
s.hook = hook
})
}
// NewSamplerWithOptions creates a Core that samples incoming entries, which
// caps the CPU and I/O load of logging while attempting to preserve a
// representative subset of your logs.
//
// Zap samples by logging the first N entries with a given level and message
// each tick. If more Entries with the same level and message are seen during
// the same interval, every Mth message is logged and the rest are dropped.
//
// For example,
//
// core = NewSamplerWithOptions(core, time.Second, 10, 5)
//
// This will log the first 10 log entries with the same level and message
// in a one second interval as-is. Following that, it will allow through
// every 5th log entry with the same level and message in that interval.
//
// If thereafter is zero, the Core will drop all log entries after the first N
// in that interval.
//
// Sampler can be configured to report sampling decisions with the SamplerHook
// option.
//
// Keep in mind that Zap's sampling implementation is optimized for speed over
// absolute precision; under load, each tick may be slightly over- or
// under-sampled.
func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
s := &sampler{
Core: core,
tick: tick,
counts: newCounters(),
first: uint64(first),
thereafter: uint64(thereafter),
hook: nopSamplingHook,
}
for _, opt := range opts {
opt.apply(s)
}
return s
}
type sampler struct {
Core
counts *counters
tick time.Duration
first, thereafter uint64
hook func(Entry, SamplingDecision)
}
var (
_ Core = (*sampler)(nil)
_ leveledEnabler = (*sampler)(nil)
)
// NewSampler creates a Core that samples incoming entries, which
// caps the CPU and I/O load of logging while attempting to preserve a
// representative subset of your logs.
//
// Zap samples by logging the first N entries with a given level and message
// each tick. If more Entries with the same level and message are seen during
// the same interval, every Mth message is logged and the rest are dropped.
//
// Keep in mind that zap's sampling implementation is optimized for speed over
// absolute precision; under load, each tick may be slightly over- or
// under-sampled.
//
// Deprecated: use NewSamplerWithOptions.
func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
return NewSamplerWithOptions(core, tick, first, thereafter)
}
func (s *sampler) Level() Level {
return LevelOf(s.Core)
}
func (s *sampler) With(fields []Field) Core {
return &sampler{
Core: s.Core.With(fields),
tick: s.tick,
counts: s.counts,
first: s.first,
thereafter: s.thereafter,
hook: s.hook,
}
}
func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
if !s.Enabled(ent.Level) {
return ce
}
if ent.Level >= _minLevel && ent.Level <= _maxLevel {
counter := s.counts.get(ent.Level, ent.Message)
n := counter.IncCheckReset(ent.Time, s.tick)
if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) {
s.hook(ent, LogDropped)
return ce
}
s.hook(ent, LogSampled)
}
return s.Core.Check(ent, ce)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/zapcore/tee.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/zapcore/tee.go | // Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zapcore
import "go.uber.org/multierr"
type multiCore []Core
var (
_ leveledEnabler = multiCore(nil)
_ Core = multiCore(nil)
)
// NewTee creates a Core that duplicates log entries into two or more
// underlying Cores.
//
// Calling it with a single Core returns the input unchanged, and calling
// it with no input returns a no-op Core.
func NewTee(cores ...Core) Core {
switch len(cores) {
case 0:
return NewNopCore()
case 1:
return cores[0]
default:
return multiCore(cores)
}
}
func (mc multiCore) With(fields []Field) Core {
clone := make(multiCore, len(mc))
for i := range mc {
clone[i] = mc[i].With(fields)
}
return clone
}
func (mc multiCore) Level() Level {
minLvl := _maxLevel // mc is never empty
for i := range mc {
if lvl := LevelOf(mc[i]); lvl < minLvl {
minLvl = lvl
}
}
return minLvl
}
func (mc multiCore) Enabled(lvl Level) bool {
for i := range mc {
if mc[i].Enabled(lvl) {
return true
}
}
return false
}
func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
for i := range mc {
ce = mc[i].Check(ent, ce)
}
return ce
}
func (mc multiCore) Write(ent Entry, fields []Field) error {
var err error
for i := range mc {
err = multierr.Append(err, mc[i].Write(ent, fields))
}
return err
}
func (mc multiCore) Sync() error {
var err error
for i := range mc {
err = multierr.Append(err, mc[i].Sync())
}
return err
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/zapcore/entry.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/zapcore/entry.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zapcore
import (
"fmt"
"runtime"
"strings"
"time"
"go.uber.org/multierr"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/exit"
"go.uber.org/zap/internal/pool"
)
var _cePool = pool.New(func() *CheckedEntry {
// Pre-allocate some space for cores.
return &CheckedEntry{
cores: make([]Core, 4),
}
})
func getCheckedEntry() *CheckedEntry {
ce := _cePool.Get()
ce.reset()
return ce
}
func putCheckedEntry(ce *CheckedEntry) {
if ce == nil {
return
}
_cePool.Put(ce)
}
// NewEntryCaller makes an EntryCaller from the return signature of
// runtime.Caller.
func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller {
if !ok {
return EntryCaller{}
}
return EntryCaller{
PC: pc,
File: file,
Line: line,
Defined: true,
}
}
// EntryCaller represents the caller of a logging function.
type EntryCaller struct {
Defined bool
PC uintptr
File string
Line int
Function string
}
// String returns the full path and line number of the caller.
func (ec EntryCaller) String() string {
return ec.FullPath()
}
// FullPath returns a /full/path/to/package/file:line description of the
// caller.
func (ec EntryCaller) FullPath() string {
if !ec.Defined {
return "undefined"
}
buf := bufferpool.Get()
buf.AppendString(ec.File)
buf.AppendByte(':')
buf.AppendInt(int64(ec.Line))
caller := buf.String()
buf.Free()
return caller
}
// TrimmedPath returns a package/file:line description of the caller,
// preserving only the leaf directory name and file name.
func (ec EntryCaller) TrimmedPath() string {
if !ec.Defined {
return "undefined"
}
// nb. To make sure we trim the path correctly on Windows too, we
// counter-intuitively need to use '/' and *not* os.PathSeparator here,
// because the path given originates from Go stdlib, specifically
// runtime.Caller() which (as of Mar/17) returns forward slashes even on
// Windows.
//
// See https://github.com/golang/go/issues/3335
// and https://github.com/golang/go/issues/18151
//
// for discussion on the issue on Go side.
//
// Find the last separator.
//
idx := strings.LastIndexByte(ec.File, '/')
if idx == -1 {
return ec.FullPath()
}
// Find the penultimate separator.
idx = strings.LastIndexByte(ec.File[:idx], '/')
if idx == -1 {
return ec.FullPath()
}
buf := bufferpool.Get()
// Keep everything after the penultimate separator.
buf.AppendString(ec.File[idx+1:])
buf.AppendByte(':')
buf.AppendInt(int64(ec.Line))
caller := buf.String()
buf.Free()
return caller
}
// An Entry represents a complete log message. The entry's structured context
// is already serialized, but the log level, time, message, and call site
// information are available for inspection and modification. Any fields left
// empty will be omitted when encoding.
//
// Entries are pooled, so any functions that accept them MUST be careful not to
// retain references to them.
type Entry struct {
Level Level
Time time.Time
LoggerName string
Message string
Caller EntryCaller
Stack string
}
// CheckWriteHook is a custom action that may be executed after an entry is
// written.
//
// Register one on a CheckedEntry with the After method.
//
// if ce := logger.Check(...); ce != nil {
// ce = ce.After(hook)
// ce.Write(...)
// }
//
// You can configure the hook for Fatal log statements at the logger level with
// the zap.WithFatalHook option.
type CheckWriteHook interface {
// OnWrite is invoked with the CheckedEntry that was written and a list
// of fields added with that entry.
//
// The list of fields DOES NOT include fields that were already added
// to the logger with the With method.
OnWrite(*CheckedEntry, []Field)
}
// CheckWriteAction indicates what action to take after a log entry is
// processed. Actions are ordered in increasing severity.
type CheckWriteAction uint8
const (
// WriteThenNoop indicates that nothing special needs to be done. It's the
// default behavior.
WriteThenNoop CheckWriteAction = iota
// WriteThenGoexit runs runtime.Goexit after Write.
WriteThenGoexit
// WriteThenPanic causes a panic after Write.
WriteThenPanic
// WriteThenFatal causes an os.Exit(1) after Write.
WriteThenFatal
)
// OnWrite implements the OnWrite method to keep CheckWriteAction compatible
// with the new CheckWriteHook interface which deprecates CheckWriteAction.
func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) {
switch a {
case WriteThenGoexit:
runtime.Goexit()
case WriteThenPanic:
panic(ce.Message)
case WriteThenFatal:
exit.With(1)
}
}
var _ CheckWriteHook = CheckWriteAction(0)
// CheckedEntry is an Entry together with a collection of Cores that have
// already agreed to log it.
//
// CheckedEntry references should be created by calling AddCore or After on a
// nil *CheckedEntry. References are returned to a pool after Write, and MUST
// NOT be retained after calling their Write method.
type CheckedEntry struct {
Entry
ErrorOutput WriteSyncer
dirty bool // best-effort detection of pool misuse
after CheckWriteHook
cores []Core
}
func (ce *CheckedEntry) reset() {
ce.Entry = Entry{}
ce.ErrorOutput = nil
ce.dirty = false
ce.after = nil
for i := range ce.cores {
// don't keep references to cores
ce.cores[i] = nil
}
ce.cores = ce.cores[:0]
}
// Write writes the entry to the stored Cores, returns any errors, and returns
// the CheckedEntry reference to a pool for immediate re-use. Finally, it
// executes any required CheckWriteAction.
func (ce *CheckedEntry) Write(fields ...Field) {
if ce == nil {
return
}
if ce.dirty {
if ce.ErrorOutput != nil {
// Make a best effort to detect unsafe re-use of this CheckedEntry.
// If the entry is dirty, log an internal error; because the
// CheckedEntry is being used after it was returned to the pool,
// the message may be an amalgamation from multiple call sites.
fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
_ = ce.ErrorOutput.Sync() // ignore error
}
return
}
ce.dirty = true
var err error
for i := range ce.cores {
err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
}
if err != nil && ce.ErrorOutput != nil {
fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
_ = ce.ErrorOutput.Sync() // ignore error
}
hook := ce.after
if hook != nil {
hook.OnWrite(ce, fields)
}
putCheckedEntry(ce)
}
// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
// used by Core.Check implementations, and is safe to call on nil CheckedEntry
// references.
func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
if ce == nil {
ce = getCheckedEntry()
ce.Entry = ent
}
ce.cores = append(ce.cores, core)
return ce
}
// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
// Core will panic or fatal after writing this log entry. Like AddCore, it's
// safe to call on nil CheckedEntry references.
//
// Deprecated: Use [CheckedEntry.After] instead.
func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
return ce.After(ent, should)
}
// After sets this CheckEntry's CheckWriteHook, which will be called after this
// log entry has been written. It's safe to call this on nil CheckedEntry
// references.
func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry {
if ce == nil {
ce = getCheckedEntry()
ce.Entry = ent
}
ce.after = hook
return ce
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/zapcore/level.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/zapcore/level.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zapcore
import (
"bytes"
"errors"
"fmt"
)
var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level")
// A Level is a logging priority. Higher levels are more important.
type Level int8
const (
// DebugLevel logs are typically voluminous, and are usually disabled in
// production.
DebugLevel Level = iota - 1
// InfoLevel is the default logging priority.
InfoLevel
// WarnLevel logs are more important than Info, but don't need individual
// human review.
WarnLevel
// ErrorLevel logs are high-priority. If an application is running smoothly,
// it shouldn't generate any error-level logs.
ErrorLevel
// DPanicLevel logs are particularly important errors. In development the
// logger panics after writing the message.
DPanicLevel
// PanicLevel logs a message, then panics.
PanicLevel
// FatalLevel logs a message, then calls os.Exit(1).
FatalLevel
_minLevel = DebugLevel
_maxLevel = FatalLevel
// InvalidLevel is an invalid value for Level.
//
// Core implementations may panic if they see messages of this level.
InvalidLevel = _maxLevel + 1
)
// ParseLevel parses a level based on the lower-case or all-caps ASCII
// representation of the log level. If the provided ASCII representation is
// invalid an error is returned.
//
// This is particularly useful when dealing with text input to configure log
// levels.
func ParseLevel(text string) (Level, error) {
var level Level
err := level.UnmarshalText([]byte(text))
return level, err
}
type leveledEnabler interface {
LevelEnabler
Level() Level
}
// LevelOf reports the minimum enabled log level for the given LevelEnabler
// from Zap's supported log levels, or [InvalidLevel] if none of them are
// enabled.
//
// A LevelEnabler may implement a 'Level() Level' method to override the
// behavior of this function.
//
// func (c *core) Level() Level {
// return c.currentLevel
// }
//
// It is recommended that [Core] implementations that wrap other cores use
// LevelOf to retrieve the level of the wrapped core. For example,
//
// func (c *coreWrapper) Level() Level {
// return zapcore.LevelOf(c.wrappedCore)
// }
func LevelOf(enab LevelEnabler) Level {
if lvler, ok := enab.(leveledEnabler); ok {
return lvler.Level()
}
for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
if enab.Enabled(lvl) {
return lvl
}
}
return InvalidLevel
}
// String returns a lower-case ASCII representation of the log level.
func (l Level) String() string {
switch l {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warn"
case ErrorLevel:
return "error"
case DPanicLevel:
return "dpanic"
case PanicLevel:
return "panic"
case FatalLevel:
return "fatal"
default:
return fmt.Sprintf("Level(%d)", l)
}
}
// CapitalString returns an all-caps ASCII representation of the log level.
func (l Level) CapitalString() string {
// Printing levels in all-caps is common enough that we should export this
// functionality.
switch l {
case DebugLevel:
return "DEBUG"
case InfoLevel:
return "INFO"
case WarnLevel:
return "WARN"
case ErrorLevel:
return "ERROR"
case DPanicLevel:
return "DPANIC"
case PanicLevel:
return "PANIC"
case FatalLevel:
return "FATAL"
default:
return fmt.Sprintf("LEVEL(%d)", l)
}
}
// MarshalText marshals the Level to text. Note that the text representation
// drops the -Level suffix (see example).
func (l Level) MarshalText() ([]byte, error) {
return []byte(l.String()), nil
}
// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText
// expects the text representation of a Level to drop the -Level suffix (see
// example).
//
// In particular, this makes it easy to configure logging levels using YAML,
// TOML, or JSON files.
func (l *Level) UnmarshalText(text []byte) error {
if l == nil {
return errUnmarshalNilLevel
}
if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) {
return fmt.Errorf("unrecognized level: %q", text)
}
return nil
}
func (l *Level) unmarshalText(text []byte) bool {
switch string(text) {
case "debug", "DEBUG":
*l = DebugLevel
case "info", "INFO", "": // make the zero value useful
*l = InfoLevel
case "warn", "WARN":
*l = WarnLevel
case "error", "ERROR":
*l = ErrorLevel
case "dpanic", "DPANIC":
*l = DPanicLevel
case "panic", "PANIC":
*l = PanicLevel
case "fatal", "FATAL":
*l = FatalLevel
default:
return false
}
return true
}
// Set sets the level for the flag.Value interface.
func (l *Level) Set(s string) error {
return l.UnmarshalText([]byte(s))
}
// Get gets the level for the flag.Getter interface.
func (l *Level) Get() interface{} {
return *l
}
// Enabled returns true if the given level is at or above this level.
func (l Level) Enabled(lvl Level) bool {
return lvl >= l
}
// LevelEnabler decides whether a given logging level is enabled when logging a
// message.
//
// Enablers are intended to be used to implement deterministic filters;
// concerns like sampling are better implemented as a Core.
//
// Each concrete Level value implements a static LevelEnabler which returns
// true for itself and all higher logging levels. For example WarnLevel.Enabled()
// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and
// FatalLevel, but return false for InfoLevel and DebugLevel.
type LevelEnabler interface {
Enabled(Level) bool
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/zapcore/error.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/zapcore/error.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zapcore
import (
"fmt"
"reflect"
"go.uber.org/zap/internal/pool"
)
// Encodes the given error into fields of an object. A field with the given
// name is added for the error message.
//
// If the error implements fmt.Formatter, a field with the name ${key}Verbose
// is also added with the full verbose error message.
//
// Finally, if the error implements errorGroup (from go.uber.org/multierr) or
// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
// array of objects containing the errors this error was comprised of.
//
// {
// "error": err.Error(),
// "errorVerbose": fmt.Sprintf("%+v", err),
// "errorCauses": [
// ...
// ],
// }
func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
// Try to capture panics (from nil references or otherwise) when calling
// the Error() method
defer func() {
if rerr := recover(); rerr != nil {
// If it's a nil pointer, just say "<nil>". The likeliest causes are a
// error that fails to guard against nil or a nil pointer for a
// value receiver, and in either case, "<nil>" is a nice result.
if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
enc.AddString(key, "<nil>")
return
}
retErr = fmt.Errorf("PANIC=%v", rerr)
}
}()
basic := err.Error()
enc.AddString(key, basic)
switch e := err.(type) {
case errorGroup:
return enc.AddArray(key+"Causes", errArray(e.Errors()))
case fmt.Formatter:
verbose := fmt.Sprintf("%+v", e)
if verbose != basic {
// This is a rich error type, like those produced by
// github.com/pkg/errors.
enc.AddString(key+"Verbose", verbose)
}
}
return nil
}
type errorGroup interface {
// Provides read-only access to the underlying list of errors, preferably
// without causing any allocs.
Errors() []error
}
// Note that errArray and errArrayElem are very similar to the version
// implemented in the top-level error.go file. We can't re-use this because
// that would require exporting errArray as part of the zapcore API.
// Encodes a list of errors using the standard error encoding logic.
type errArray []error
func (errs errArray) MarshalLogArray(arr ArrayEncoder) error {
for i := range errs {
if errs[i] == nil {
continue
}
el := newErrArrayElem(errs[i])
err := arr.AppendObject(el)
el.Free()
if err != nil {
return err
}
}
return nil
}
var _errArrayElemPool = pool.New(func() *errArrayElem {
return &errArrayElem{}
})
// Encodes any error into a {"error": ...} re-using the same errors logic.
//
// May be passed in place of an array to build a single-element array.
type errArrayElem struct{ err error }
func newErrArrayElem(err error) *errArrayElem {
e := _errArrayElemPool.Get()
e.err = err
return e
}
func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error {
return arr.AppendObject(e)
}
func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error {
return encodeError("error", e.err, enc)
}
func (e *errArrayElem) Free() {
e.err = nil
_errArrayElemPool.Put(e)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/zapcore/lazy_with.go | cmd/vsphere-xcopy-volume-populator/vendor/go.uber.org/zap/zapcore/lazy_with.go | // Copyright (c) 2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zapcore
import "sync"
type lazyWithCore struct {
Core
sync.Once
fields []Field
}
// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if
// the logger is written to (or is further chained in a lon-lazy manner).
func NewLazyWith(core Core, fields []Field) Core {
return &lazyWithCore{
Core: core,
fields: fields,
}
}
func (d *lazyWithCore) initOnce() {
d.Once.Do(func() {
d.Core = d.Core.With(d.fields)
})
}
func (d *lazyWithCore) With(fields []Field) Core {
d.initOnce()
return d.Core.With(fields)
}
func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry {
d.initOnce()
return d.Core.Check(e, ce)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.