repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go | cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry
// For the semantics of status codes see
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
type StatusCode int32
const (
// The default status.
StatusCodeUnset StatusCode = 0
// The Span has been validated by an Application developer or Operator to
// have completed successfully.
StatusCodeOK StatusCode = 1
// The Span contains an error.
StatusCodeError StatusCode = 2
)
var statusCodeStrings = []string{
"Unset",
"OK",
"Error",
}
func (s StatusCode) String() string {
if s >= 0 && int(s) < len(statusCodeStrings) {
return statusCodeStrings[s]
}
return "<unknown telemetry.StatusCode>"
}
// The Status type defines a logical error model that is suitable for different
// programming environments, including REST APIs and RPC APIs.
type Status struct {
// A developer-facing human readable error message.
Message string `json:"message,omitempty"`
// The status code.
Code StatusCode `json:"code,omitempty"`
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go | cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry
import (
"encoding/hex"
"errors"
"fmt"
)
const (
traceIDSize = 16
spanIDSize = 8
)
// TraceID is a custom data type that is used for all trace IDs.
type TraceID [traceIDSize]byte
// String returns the hex string representation form of a TraceID.
func (tid TraceID) String() string {
return hex.EncodeToString(tid[:])
}
// IsEmpty returns false if id contains at least one non-zero byte.
func (tid TraceID) IsEmpty() bool {
return tid == [traceIDSize]byte{}
}
// MarshalJSON converts the trace ID into a hex string enclosed in quotes.
func (tid TraceID) MarshalJSON() ([]byte, error) {
if tid.IsEmpty() {
return []byte(`""`), nil
}
return marshalJSON(tid[:])
}
// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in
// quotes.
func (tid *TraceID) UnmarshalJSON(data []byte) error {
*tid = [traceIDSize]byte{}
return unmarshalJSON(tid[:], data)
}
// SpanID is a custom data type that is used for all span IDs.
type SpanID [spanIDSize]byte
// String returns the hex string representation form of a SpanID.
func (sid SpanID) String() string {
return hex.EncodeToString(sid[:])
}
// IsEmpty returns true if the span ID contains at least one non-zero byte.
func (sid SpanID) IsEmpty() bool {
return sid == [spanIDSize]byte{}
}
// MarshalJSON converts span ID into a hex string enclosed in quotes.
func (sid SpanID) MarshalJSON() ([]byte, error) {
if sid.IsEmpty() {
return []byte(`""`), nil
}
return marshalJSON(sid[:])
}
// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes.
func (sid *SpanID) UnmarshalJSON(data []byte) error {
*sid = [spanIDSize]byte{}
return unmarshalJSON(sid[:], data)
}
// marshalJSON converts id into a hex string enclosed in quotes.
func marshalJSON(id []byte) ([]byte, error) {
// Plus 2 quote chars at the start and end.
hexLen := hex.EncodedLen(len(id)) + 2
b := make([]byte, hexLen)
hex.Encode(b[1:hexLen-1], id)
b[0], b[hexLen-1] = '"', '"'
return b, nil
}
// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
func unmarshalJSON(dst []byte, src []byte) error {
if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
src = src[1 : l-1]
}
nLen := len(src)
if nLen == 0 {
return nil
}
if len(dst) != hex.DecodedLen(nLen) {
return errors.New("invalid length for ID")
}
_, err := hex.Decode(dst, src)
if err != nil {
return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err)
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go | cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
)
// Scope is the identifying values of the instrumentation scope.
type Scope struct {
Name string `json:"name,omitempty"`
Version string `json:"version,omitempty"`
Attrs []Attr `json:"attributes,omitempty"`
DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
}
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
func (s *Scope) UnmarshalJSON(data []byte) error {
decoder := json.NewDecoder(bytes.NewReader(data))
t, err := decoder.Token()
if err != nil {
return err
}
if t != json.Delim('{') {
return errors.New("invalid Scope type")
}
for decoder.More() {
keyIface, err := decoder.Token()
if err != nil {
if errors.Is(err, io.EOF) {
// Empty.
return nil
}
return err
}
key, ok := keyIface.(string)
if !ok {
return fmt.Errorf("invalid Scope field: %#v", keyIface)
}
switch key {
case "name":
err = decoder.Decode(&s.Name)
case "version":
err = decoder.Decode(&s.Version)
case "attributes":
err = decoder.Decode(&s.Attrs)
case "droppedAttributesCount", "dropped_attributes_count":
err = decoder.Decode(&s.DroppedAttrs)
default:
// Skip unknown.
}
if err != nil {
return err
}
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go | cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry
import (
"encoding/json"
"strconv"
)
// protoInt64 represents the protobuf encoding of integers which can be either
// strings or integers.
type protoInt64 int64
// Int64 returns the protoInt64 as an int64.
func (i *protoInt64) Int64() int64 { return int64(*i) }
// UnmarshalJSON decodes both strings and integers.
func (i *protoInt64) UnmarshalJSON(data []byte) error {
if data[0] == '"' {
var str string
if err := json.Unmarshal(data, &str); err != nil {
return err
}
parsedInt, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return err
}
*i = protoInt64(parsedInt)
} else {
var parsedInt int64
if err := json.Unmarshal(data, &parsedInt); err != nil {
return err
}
*i = protoInt64(parsedInt)
}
return nil
}
// protoUint64 represents the protobuf encoding of integers which can be either
// strings or integers.
type protoUint64 uint64
// Int64 returns the protoUint64 as a uint64.
func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
// UnmarshalJSON decodes both strings and integers.
func (i *protoUint64) UnmarshalJSON(data []byte) error {
if data[0] == '"' {
var str string
if err := json.Unmarshal(data, &str); err != nil {
return err
}
parsedUint, err := strconv.ParseUint(str, 10, 64)
if err != nil {
return err
}
*i = protoUint64(parsedUint)
} else {
var parsedUint uint64
if err := json.Unmarshal(data, &parsedUint); err != nil {
return err
}
*i = protoUint64(parsedUint)
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go | cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
)
// Resource information.
type Resource struct {
// Attrs are the set of attributes that describe the resource. Attribute
// keys MUST be unique (it is not allowed to have more than one attribute
// with the same key).
Attrs []Attr `json:"attributes,omitempty"`
// DroppedAttrs is the number of dropped attributes. If the value
// is 0, then no attributes were dropped.
DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
}
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
func (r *Resource) UnmarshalJSON(data []byte) error {
decoder := json.NewDecoder(bytes.NewReader(data))
t, err := decoder.Token()
if err != nil {
return err
}
if t != json.Delim('{') {
return errors.New("invalid Resource type")
}
for decoder.More() {
keyIface, err := decoder.Token()
if err != nil {
if errors.Is(err, io.EOF) {
// Empty.
return nil
}
return err
}
key, ok := keyIface.(string)
if !ok {
return fmt.Errorf("invalid Resource field: %#v", keyIface)
}
switch key {
case "attributes":
err = decoder.Decode(&r.Attrs)
case "droppedAttributesCount", "dropped_attributes_count":
err = decoder.Decode(&r.DroppedAttrs)
default:
// Skip unknown.
}
if err != nil {
return err
}
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go | cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
//go:generate stringer -type=ValueKind -trimprefix=ValueKind
package telemetry
import (
"bytes"
"cmp"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"slices"
"strconv"
"unsafe"
)
// A Value represents a structured value.
// A zero value is valid and represents an empty value.
type Value struct {
// Ensure forward compatibility by explicitly making this not comparable.
noCmp [0]func() //nolint: unused // This is indeed used.
// num holds the value for Int64, Float64, and Bool. It holds the length
// for String, Bytes, Slice, Map.
num uint64
// any holds either the KindBool, KindInt64, KindFloat64, stringptr,
// bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64
// then the value of Value is in num as described above. Otherwise, it
// contains the value wrapped in the appropriate type.
any any
}
type (
// sliceptr represents a value in Value.any for KindString Values.
stringptr *byte
// bytesptr represents a value in Value.any for KindBytes Values.
bytesptr *byte
// sliceptr represents a value in Value.any for KindSlice Values.
sliceptr *Value
// mapptr represents a value in Value.any for KindMap Values.
mapptr *Attr
)
// ValueKind is the kind of a [Value].
type ValueKind int
// ValueKind values.
const (
ValueKindEmpty ValueKind = iota
ValueKindBool
ValueKindFloat64
ValueKindInt64
ValueKindString
ValueKindBytes
ValueKindSlice
ValueKindMap
)
var valueKindStrings = []string{
"Empty",
"Bool",
"Float64",
"Int64",
"String",
"Bytes",
"Slice",
"Map",
}
func (k ValueKind) String() string {
if k >= 0 && int(k) < len(valueKindStrings) {
return valueKindStrings[k]
}
return "<unknown telemetry.ValueKind>"
}
// StringValue returns a new [Value] for a string.
func StringValue(v string) Value {
return Value{
num: uint64(len(v)),
any: stringptr(unsafe.StringData(v)),
}
}
// IntValue returns a [Value] for an int.
func IntValue(v int) Value { return Int64Value(int64(v)) }
// Int64Value returns a [Value] for an int64.
func Int64Value(v int64) Value {
return Value{num: uint64(v), any: ValueKindInt64}
}
// Float64Value returns a [Value] for a float64.
func Float64Value(v float64) Value {
return Value{num: math.Float64bits(v), any: ValueKindFloat64}
}
// BoolValue returns a [Value] for a bool.
func BoolValue(v bool) Value { //nolint:revive // Not a control flag.
var n uint64
if v {
n = 1
}
return Value{num: n, any: ValueKindBool}
}
// BytesValue returns a [Value] for a byte slice. The passed slice must not be
// changed after it is passed.
func BytesValue(v []byte) Value {
return Value{
num: uint64(len(v)),
any: bytesptr(unsafe.SliceData(v)),
}
}
// SliceValue returns a [Value] for a slice of [Value]. The passed slice must
// not be changed after it is passed.
func SliceValue(vs ...Value) Value {
return Value{
num: uint64(len(vs)),
any: sliceptr(unsafe.SliceData(vs)),
}
}
// MapValue returns a new [Value] for a slice of key-value pairs. The passed
// slice must not be changed after it is passed.
func MapValue(kvs ...Attr) Value {
return Value{
num: uint64(len(kvs)),
any: mapptr(unsafe.SliceData(kvs)),
}
}
// AsString returns the value held by v as a string.
func (v Value) AsString() string {
if sp, ok := v.any.(stringptr); ok {
return unsafe.String(sp, v.num)
}
// TODO: error handle
return ""
}
// asString returns the value held by v as a string. It will panic if the Value
// is not KindString.
func (v Value) asString() string {
return unsafe.String(v.any.(stringptr), v.num)
}
// AsInt64 returns the value held by v as an int64.
func (v Value) AsInt64() int64 {
if v.Kind() != ValueKindInt64 {
// TODO: error handle
return 0
}
return v.asInt64()
}
// asInt64 returns the value held by v as an int64. If v is not of KindInt64,
// this will return garbage.
func (v Value) asInt64() int64 {
// Assumes v.num was a valid int64 (overflow not checked).
return int64(v.num) // nolint: gosec
}
// AsBool returns the value held by v as a bool.
func (v Value) AsBool() bool {
if v.Kind() != ValueKindBool {
// TODO: error handle
return false
}
return v.asBool()
}
// asBool returns the value held by v as a bool. If v is not of KindBool, this
// will return garbage.
func (v Value) asBool() bool { return v.num == 1 }
// AsFloat64 returns the value held by v as a float64.
func (v Value) AsFloat64() float64 {
if v.Kind() != ValueKindFloat64 {
// TODO: error handle
return 0
}
return v.asFloat64()
}
// asFloat64 returns the value held by v as a float64. If v is not of
// KindFloat64, this will return garbage.
func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) }
// AsBytes returns the value held by v as a []byte.
func (v Value) AsBytes() []byte {
if sp, ok := v.any.(bytesptr); ok {
return unsafe.Slice((*byte)(sp), v.num)
}
// TODO: error handle
return nil
}
// asBytes returns the value held by v as a []byte. It will panic if the Value
// is not KindBytes.
func (v Value) asBytes() []byte {
return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num)
}
// AsSlice returns the value held by v as a []Value.
func (v Value) AsSlice() []Value {
if sp, ok := v.any.(sliceptr); ok {
return unsafe.Slice((*Value)(sp), v.num)
}
// TODO: error handle
return nil
}
// asSlice returns the value held by v as a []Value. It will panic if the Value
// is not KindSlice.
func (v Value) asSlice() []Value {
return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num)
}
// AsMap returns the value held by v as a []Attr.
func (v Value) AsMap() []Attr {
if sp, ok := v.any.(mapptr); ok {
return unsafe.Slice((*Attr)(sp), v.num)
}
// TODO: error handle
return nil
}
// asMap returns the value held by v as a []Attr. It will panic if the
// Value is not KindMap.
func (v Value) asMap() []Attr {
return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num)
}
// Kind returns the Kind of v.
func (v Value) Kind() ValueKind {
switch x := v.any.(type) {
case ValueKind:
return x
case stringptr:
return ValueKindString
case bytesptr:
return ValueKindBytes
case sliceptr:
return ValueKindSlice
case mapptr:
return ValueKindMap
default:
return ValueKindEmpty
}
}
// Empty returns if v does not hold any value.
func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty }
// Equal returns if v is equal to w.
func (v Value) Equal(w Value) bool {
k1 := v.Kind()
k2 := w.Kind()
if k1 != k2 {
return false
}
switch k1 {
case ValueKindInt64, ValueKindBool:
return v.num == w.num
case ValueKindString:
return v.asString() == w.asString()
case ValueKindFloat64:
return v.asFloat64() == w.asFloat64()
case ValueKindSlice:
return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal)
case ValueKindMap:
sv := sortMap(v.asMap())
sw := sortMap(w.asMap())
return slices.EqualFunc(sv, sw, Attr.Equal)
case ValueKindBytes:
return bytes.Equal(v.asBytes(), w.asBytes())
case ValueKindEmpty:
return true
default:
// TODO: error handle
return false
}
}
func sortMap(m []Attr) []Attr {
sm := make([]Attr, len(m))
copy(sm, m)
slices.SortFunc(sm, func(a, b Attr) int {
return cmp.Compare(a.Key, b.Key)
})
return sm
}
// String returns Value's value as a string, formatted like [fmt.Sprint].
//
// The returned string is meant for debugging;
// the string representation is not stable.
func (v Value) String() string {
switch v.Kind() {
case ValueKindString:
return v.asString()
case ValueKindInt64:
// Assumes v.num was a valid int64 (overflow not checked).
return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
case ValueKindFloat64:
return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
case ValueKindBool:
return strconv.FormatBool(v.asBool())
case ValueKindBytes:
return fmt.Sprint(v.asBytes())
case ValueKindMap:
return fmt.Sprint(v.asMap())
case ValueKindSlice:
return fmt.Sprint(v.asSlice())
case ValueKindEmpty:
return "<nil>"
default:
// Try to handle this as gracefully as possible.
//
// Don't panic here. The goal here is to have developers find this
// first if a slog.Kind is is not handled. It is
// preferable to have user's open issue asking why their attributes
// have a "unhandled: " prefix than say that their code is panicking.
return fmt.Sprintf("<unhandled telemetry.ValueKind: %s>", v.Kind())
}
}
// MarshalJSON encodes v into OTLP formatted JSON.
func (v *Value) MarshalJSON() ([]byte, error) {
switch v.Kind() {
case ValueKindString:
return json.Marshal(struct {
Value string `json:"stringValue"`
}{v.asString()})
case ValueKindInt64:
return json.Marshal(struct {
Value string `json:"intValue"`
}{strconv.FormatInt(int64(v.num), 10)})
case ValueKindFloat64:
return json.Marshal(struct {
Value float64 `json:"doubleValue"`
}{v.asFloat64()})
case ValueKindBool:
return json.Marshal(struct {
Value bool `json:"boolValue"`
}{v.asBool()})
case ValueKindBytes:
return json.Marshal(struct {
Value []byte `json:"bytesValue"`
}{v.asBytes()})
case ValueKindMap:
return json.Marshal(struct {
Value struct {
Values []Attr `json:"values"`
} `json:"kvlistValue"`
}{struct {
Values []Attr `json:"values"`
}{v.asMap()}})
case ValueKindSlice:
return json.Marshal(struct {
Value struct {
Values []Value `json:"values"`
} `json:"arrayValue"`
}{struct {
Values []Value `json:"values"`
}{v.asSlice()}})
case ValueKindEmpty:
return nil, nil
default:
return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String())
}
}
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v.
func (v *Value) UnmarshalJSON(data []byte) error {
decoder := json.NewDecoder(bytes.NewReader(data))
t, err := decoder.Token()
if err != nil {
return err
}
if t != json.Delim('{') {
return errors.New("invalid Value type")
}
for decoder.More() {
keyIface, err := decoder.Token()
if err != nil {
if errors.Is(err, io.EOF) {
// Empty.
return nil
}
return err
}
key, ok := keyIface.(string)
if !ok {
return fmt.Errorf("invalid Value key: %#v", keyIface)
}
switch key {
case "stringValue", "string_value":
var val string
err = decoder.Decode(&val)
*v = StringValue(val)
case "boolValue", "bool_value":
var val bool
err = decoder.Decode(&val)
*v = BoolValue(val)
case "intValue", "int_value":
var val protoInt64
err = decoder.Decode(&val)
*v = Int64Value(val.Int64())
case "doubleValue", "double_value":
var val float64
err = decoder.Decode(&val)
*v = Float64Value(val)
case "bytesValue", "bytes_value":
var val64 string
if err := decoder.Decode(&val64); err != nil {
return err
}
var val []byte
val, err = base64.StdEncoding.DecodeString(val64)
*v = BytesValue(val)
case "arrayValue", "array_value":
var val struct{ Values []Value }
err = decoder.Decode(&val)
*v = SliceValue(val.Values...)
case "kvlistValue", "kvlist_value":
var val struct{ Values []Attr }
err = decoder.Decode(&val)
*v = MapValue(val.Values...)
default:
// Skip unknown.
continue
}
// Use first valid. Ignore the rest.
return err
}
// Only unknown fields. Return nil without unmarshaling any value.
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go | cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry
// Attr is a key-value pair.
type Attr struct {
Key string `json:"key,omitempty"`
Value Value `json:"value,omitempty"`
}
// String returns an Attr for a string value.
func String(key, value string) Attr {
return Attr{key, StringValue(value)}
}
// Int64 returns an Attr for an int64 value.
func Int64(key string, value int64) Attr {
return Attr{key, Int64Value(value)}
}
// Int returns an Attr for an int value.
func Int(key string, value int) Attr {
return Int64(key, int64(value))
}
// Float64 returns an Attr for a float64 value.
func Float64(key string, value float64) Attr {
return Attr{key, Float64Value(value)}
}
// Bool returns an Attr for a bool value.
func Bool(key string, value bool) Attr {
return Attr{key, BoolValue(value)}
}
// Bytes returns an Attr for a []byte value.
// The passed slice must not be changed after it is passed.
func Bytes(key string, value []byte) Attr {
return Attr{key, BytesValue(value)}
}
// Slice returns an Attr for a []Value value.
// The passed slice must not be changed after it is passed.
func Slice(key string, value ...Value) Attr {
return Attr{key, SliceValue(value...)}
}
// Map returns an Attr for a map value.
// The passed slice must not be changed after it is passed.
func Map(key string, value ...Attr) Attr {
return Attr{key, MapValue(value...)}
}
// Equal returns if a is equal to b.
func (a Attr) Equal(b Attr) bool {
return a.Key == b.Key && a.Value.Equal(b.Value)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go | cmd/vsphere-xcopy-volume-populator/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
/*
Package telemetry provides a lightweight representations of OpenTelemetry
telemetry that is compatible with the OTLP JSON protobuf encoding.
*/
package telemetry
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/main.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/main.go | package main
import (
"certificate-tool/cmd"
"log"
)
func main() {
if err := cmd.Execute(); err != nil {
log.Fatal(err)
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/pkg/storage/storage.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/pkg/storage/storage.go | package storage
import (
"fmt"
)
type Storage struct {
Vendor string `yaml:"vendor"`
VendorProduct string `yaml:"vendorProduct"`
ProductVersion string `yaml:"productVersion"`
ConnectionType string `yaml:"connectionType"`
}
type StorageCredentials struct {
Hostname string
Username string
Password string
SSLSkipVerify bool
VendorProduct string
}
func StorageInfo(credentials StorageCredentials) (Storage, error) {
storage := Storage{}
switch credentials.VendorProduct {
case "primera3par":
i, err := getPrimera3ParSystemInfo(credentials.Hostname, credentials.Username, credentials.Password, credentials.SSLSkipVerify)
if err != nil {
return Storage{}, err
}
fmt.Printf("Storage system info %v\n", i)
storage.Vendor = "HP"
storage.VendorProduct = i.Model
storage.ProductVersion = i.SystemVersion
case "ontap":
i, err := getOntapSystemInfo(credentials.Hostname, credentials.Username, credentials.Password, credentials.SSLSkipVerify)
if err != nil {
return Storage{}, err
}
fmt.Printf("Storage system info %v\n", i)
storage.Vendor = "NetApp"
storage.VendorProduct = i.Name
storage.ProductVersion = i.Version.Full
default:
return storage, fmt.Errorf("storage system into retrieval is unsupported for %s", credentials.VendorProduct)
}
return storage, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/pkg/storage/primera3par.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/pkg/storage/primera3par.go | package storage
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
)
type SystemVersion struct {
Id int `json:"id"`
VersionString string `json:"versionString"`
Name string `json:"name"`
}
type SystemInfo struct {
SystemVersion string `json:"systemVersion"`
Model string `json:"model"`
}
func getPrimera3ParSystemInfo(apiURL, username, password string, skipSSLVerify bool) (SystemInfo, error) {
sessionKey, err := getSessionKey(apiURL, username, password, skipSSLVerify)
if err != nil {
return SystemInfo{}, err
}
fullURL, err := url.JoinPath(apiURL, "/api/v1/system")
if err != nil {
return SystemInfo{}, fmt.Errorf("error constructing URL: %w", err)
}
req, err := http.NewRequest(http.MethodGet, fullURL, nil)
if err != nil {
return SystemInfo{}, fmt.Errorf("error creating HTTP request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-HP3PAR-WSAPI-SessionKey", sessionKey)
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: skipSSLVerify},
},
}
resp, err := client.Do(req)
if err != nil {
return SystemInfo{}, fmt.Errorf("error sending HTTP request: %w", err)
}
defer resp.Body.Close() // Ensure the body is closed after we're done.
body, err := io.ReadAll(resp.Body)
if err != nil {
return SystemInfo{}, fmt.Errorf("error reading response body: %w", err)
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return SystemInfo{}, fmt.Errorf("API request failed with status %s and body: %s", resp.Status, string(body))
}
var systemInfo SystemInfo
err = json.Unmarshal(body, &systemInfo)
if err != nil {
return SystemInfo{}, fmt.Errorf("error unmarshalling JSON: %w. Response body was: %s", err, string(body))
}
return systemInfo, nil
}
func getSessionKey(hostname, username, password string, skipSSLVerify bool) (string, error) {
url := fmt.Sprintf("%s/api/v1/credentials", hostname)
requestBody := map[string]string{
"user": username,
"password": password,
}
jsonBody, err := json.Marshal(requestBody)
if err != nil {
return "", fmt.Errorf("failed to encode JSON: %w", err)
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody))
if err != nil {
return "", fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: skipSSLVerify},
},
}
resp, err := client.Do(req)
if err != nil {
return "", fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("failed to read response: %w", err)
}
if resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden {
var errorResp struct {
Code int `json:"code"`
Desc string `json:"desc"`
}
if err := json.Unmarshal(bodyBytes, &errorResp); err == nil {
return "", fmt.Errorf("authentication failed: %s (code %d)", errorResp.Desc, errorResp.Code)
}
return "", fmt.Errorf("authentication failed with status %d: %s", resp.StatusCode, string(bodyBytes))
}
var response map[string]string
if err := json.Unmarshal(bodyBytes, &response); err != nil {
return "", fmt.Errorf("failed to parse session key response: %w", err)
}
if sessionKey, ok := response["key"]; ok {
return sessionKey, nil
}
return "", fmt.Errorf("failed to retrieve session key, response: %s", string(bodyBytes))
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/pkg/storage/ontap.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/pkg/storage/ontap.go | package storage
import (
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
)
type OntapSystem struct {
Name string `json:"name"`
Version struct {
Full string `json:"full"`
}
}
func getOntapSystemInfo(apiURL, username, password string, skipSSLVerify bool) (OntapSystem, error) {
fullURL, err := url.JoinPath(apiURL, "/api/cluster")
if err != nil {
return OntapSystem{}, fmt.Errorf("error constructing URL: %w", err)
}
req, err := http.NewRequest(http.MethodGet, fullURL, nil)
if err != nil {
return OntapSystem{}, fmt.Errorf("error creating HTTP request: %w", err)
}
req.SetBasicAuth(username, password)
req.Header.Set("Content-Type", "application/json")
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: skipSSLVerify},
},
}
resp, err := client.Do(req)
if err != nil {
return OntapSystem{}, fmt.Errorf("error sending HTTP request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return OntapSystem{}, fmt.Errorf("error reading response body: %w", err)
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return OntapSystem{}, fmt.Errorf("API request failed with status %s and body: %s", resp.Status, string(body))
}
fmt.Println(string(body))
var systemInfo OntapSystem
err = json.Unmarshal(body, &systemInfo)
if err != nil {
return OntapSystem{}, fmt.Errorf("error unmarshalling JSON: %w. Response body was: %s", err, string(body))
}
return systemInfo, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/pkg/vmware/vms.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/pkg/vmware/vms.go | package vmware
import (
"certificate-tool/internal/utils/osutils"
"context"
"fmt"
"log"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/vmware/govmomi/vmdk"
"k8s.io/klog/v2"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/guest"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
)
const (
// defaultVMDKURL is the fallback URL for Ubuntu 20.04 server VMDK
defaultVMDKURL = "https://cloud-images.ubuntu.com/releases/focal/release/ubuntu-20.04-server-cloudimg-amd64.vmdk"
)
// VMConfig holds parameters for provisioning a VM.
type VMConfig struct {
GuestID string
MemoryMB int
CPUs int
Network string
Pool string
CDDeviceKey string
Host string
}
// downloadVMDKIfMissing checks for the VMDK locally, downloading it if absent.
// Returns the local filename of the VMDK.
func ensureVmdk(downloadVmdkURL, localVmdkPath string) (string, error) {
if downloadVmdkURL == "" {
downloadVmdkURL = defaultVMDKURL
}
if localVmdkPath != "" {
if _, err := os.Stat(localVmdkPath); err == nil {
klog.Infof("Using existing local VMDK: %s", localVmdkPath)
return localVmdkPath, nil
} else if !os.IsNotExist(err) {
return "", fmt.Errorf("unable to stat local VMDK %q: %w", localVmdkPath, err)
}
}
fmt.Printf("the downloadable vmdk url %+v \n", downloadVmdkURL)
u, err := url.Parse(downloadVmdkURL)
if err != nil {
return "", fmt.Errorf("invalid VMDK URL %q: %w", downloadVmdkURL, err)
}
fmt.Printf("the downloadable vmdk url %+v \n", downloadVmdkURL)
fmt.Printf("the parsed downloadable vmdk url %+v \n", u)
if u.Scheme != "http" && u.Scheme != "https" {
return "", fmt.Errorf("no local VMDK at %q and %q is not an HTTP URL", localVmdkPath, downloadVmdkURL)
}
dest := filepath.Base(u.Path)
if _, err := os.Stat(dest); os.IsNotExist(err) {
klog.Infof("Downloading VMDK from %s → %s", downloadVmdkURL, dest)
if err := osutils.ExecCommand("wget", "-O", dest, downloadVmdkURL); err != nil {
return "", fmt.Errorf("failed to download VMDK: %w", err)
}
} else {
klog.Infof("Using cached download: %s", dest)
}
fmt.Printf("the downlaodble dest %v\n", dest)
pwd, err := os.Getwd()
if err != nil {
return "", err
}
return path.Join(pwd, dest), nil
}
func fileExist(ctx context.Context, ds *object.Datastore, fullPath string) (bool, error) {
_, err := ds.Stat(ctx, fullPath)
if err != nil {
if strings.Contains(err.Error(), "No such file") || strings.Contains(err.Error(), "No such directory") {
return false, nil
}
return false, fmt.Errorf("stat %q: %w", fullPath, err)
}
return true, nil
}
func uploadFile(ctx context.Context, ds *object.Datastore, vmName, localFilePath string) (string, error) {
remote := filepath.Join(vmName, filepath.Base(localFilePath))
fullRemotePath := fmt.Sprintf("[%s] %s", ds.Name(), remote)
exist, err := fileExist(ctx, ds, remote)
if err != nil {
return "", fmt.Errorf("error checking remote file %s exist: %w", remote, err)
}
if exist == true {
return fullRemotePath, nil
}
if err = ds.UploadFile(ctx, localFilePath, remote, nil); err != nil {
return "", fmt.Errorf("upload ISO to %s failed: %w", remote, err)
}
return fullRemotePath, nil
}
func uploadVmdk(
ctx context.Context,
client *govmomi.Client,
ds *object.Datastore,
dc *object.Datacenter,
rp *object.ResourcePool,
host *object.HostSystem,
vmName string,
localFilePath string) (string, error) {
folders, err := dc.Folders(ctx)
if err != nil {
return "", fmt.Errorf("cannot get DC folders: %w", err)
}
remoteVmdkPath := fmt.Sprintf("[%s] %s/%s", ds.Name(), vmName, filepath.Base(localFilePath))
log.Printf("Importing vmdk %s\n", remoteVmdkPath)
err = vmdk.Import(
ctx,
client.Client,
localFilePath,
ds,
vmdk.ImportParams{
Datacenter: dc,
Pool: rp,
Folder: folders.VmFolder,
Host: host,
Force: false,
Path: vmName,
Type: types.VirtualDiskTypeThin,
Logger: nil,
},
)
if err != nil {
return "", fmt.Errorf("import vmdk: %v", err)
}
return remoteVmdkPath, nil
}
// getExistingVMDKPath queries an existing VM for its primary VMDK path.
// It prioritizes finding the "-flat.vmdk" version if it exists, otherwise, returns the regular VMDK path.
func getExistingVMDKPath(ctx context.Context, vm *object.VirtualMachine, ds *object.Datastore) (string, error) {
devices, err := vm.Device(ctx)
if err != nil {
return "", fmt.Errorf("failed to get VM devices: %w", err)
}
var vmdkPath string
for _, device := range devices {
if disk, ok := device.(*types.VirtualDisk); ok {
if backing, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
vmdkPath = backing.FileName
klog.Infof("Found existing VMDK at %s", vmdkPath)
break
}
}
}
if vmdkPath == "" {
return "", fmt.Errorf("no VMDK found for VM %q", vm.Name())
}
parts := strings.SplitN(vmdkPath, "] ", 2)
if len(parts) != 2 {
return "", fmt.Errorf("invalid VMDK path format: %q", vmdkPath)
}
klog.Infof("Found existing regular VMDK for VM %q: %s", vm.Name(), vmdkPath)
return vmdkPath, nil
}
func createVM(ctx context.Context, cli *govmomi.Client,
dc *object.Datacenter, rp *object.ResourcePool, host *object.HostSystem, // Add host parameter
vmName, vmdkPath, dsName string) (*object.VirtualMachine, error) {
vmxPath := fmt.Sprintf("[%s] %s/%s.vmx", dsName, vmName, vmName)
vmConfig := types.VirtualMachineConfigSpec{
Name: vmName,
GuestId: "Fedora64Guest",
MemoryMB: 2048,
NumCPUs: 2,
Files: &types.VirtualMachineFileInfo{
VmPathName: vmxPath,
},
}
isciController := addDefaultSCSIController(&vmConfig)
diskBacking := &types.VirtualDiskFlatVer2BackingInfo{}
diskBacking.FileName = vmdkPath
diskBacking.DiskMode = string(types.VirtualDiskModePersistent)
diskBacking.ThinProvisioned = types.NewBool(true)
unit := int32(0)
disk := &types.VirtualDisk{
CapacityInKB: 0,
CapacityInBytes: 0,
VirtualDevice: types.VirtualDevice{
ControllerKey: isciController.Key,
UnitNumber: &unit,
Backing: diskBacking,
},
}
deviceConfigSpec := &types.VirtualDeviceConfigSpec{
Operation: types.VirtualDeviceConfigSpecOperationAdd,
Device: disk,
}
vmConfig.DeviceChange = append(vmConfig.DeviceChange, deviceConfigSpec)
log.Printf("Creating VM %s...", vmName)
folders, err := dc.Folders(context.TODO())
if err != nil {
panic(err)
}
task, err := folders.VmFolder.CreateVM(ctx, vmConfig, rp, host)
if err != nil {
return nil, err
}
info, err := task.WaitForResult(ctx, nil)
if err != nil {
return nil, err
}
log.Printf("created VM %s...", vmName)
return object.NewVirtualMachine(cli.Client, info.Result.(types.ManagedObjectReference)), nil
}
func attachCDROM(ctx context.Context, vm *object.VirtualMachine, isoPath string) error {
devices, err := vm.Device(ctx)
if err != nil {
return fmt.Errorf("list devices: %w", err)
}
var controller types.BaseVirtualController
if ide, err := devices.FindIDEController(""); err == nil {
controller = ide
} else if sata, err := devices.FindSATAController(""); err == nil {
controller = sata
} else {
return fmt.Errorf("no IDE or SATA controller found: %w", err)
}
cdrom, err := devices.CreateCdrom(controller)
if err != nil {
return fmt.Errorf("create cdrom device: %w", err)
}
cdrom.Backing = &types.VirtualCdromIsoBackingInfo{
VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{
FileName: isoPath,
},
}
cdrom.Connectable = &types.VirtualDeviceConnectInfo{
StartConnected: true,
Connected: true,
AllowGuestControl: true,
}
cdSpec := &types.VirtualDeviceConfigSpec{
Operation: types.VirtualDeviceConfigSpecOperationAdd,
Device: cdrom,
}
spec := types.VirtualMachineConfigSpec{DeviceChange: []types.BaseVirtualDeviceConfigSpec{cdSpec}}
task, err := vm.Reconfigure(ctx, spec)
if err != nil {
return fmt.Errorf("reconfigure VM: %w", err)
}
if err := task.Wait(ctx); err != nil {
return fmt.Errorf("wait reconfigure: %w", err)
}
klog.Infof("Attached ISO %s", isoPath)
return nil
}
// powerOn powers on the VM.
func powerOn(ctx context.Context, vm *object.VirtualMachine) error {
task, err := vm.PowerOn(ctx)
if err != nil {
return fmt.Errorf("start power-on task: %w", err)
}
if err := task.Wait(ctx); err != nil {
return fmt.Errorf("power-on failed: %w", err)
}
klog.Infof("Powered on VM %s", vm.InventoryPath)
return nil
}
// waitForVMRegistration polls until the VM is found in inventory.
func waitForVMRegistration(ctx context.Context, finder *find.Finder, vmName string, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
if _, err := finder.VirtualMachine(ctx, vmName); err == nil {
return nil
}
time.Sleep(5 * time.Second)
}
return fmt.Errorf("timeout waiting for VM %s registration", vmName)
}
// writeRandomDataToGuest starts a dd process inside the guest.
func writeRandomDataToGuest(ctx context.Context, client *govmomi.Client, finder *find.Finder, vmName, user, pass string, mb int) error {
vm, err := finder.VirtualMachine(ctx, vmName)
if err != nil {
return fmt.Errorf("cannot find VM %s: %w", vmName, err)
}
opMgr := guest.NewOperationsManager(client.Client, vm.Reference())
auth := &types.NamePasswordAuthentication{Username: user, Password: pass}
procMgr, err := opMgr.ProcessManager(ctx)
if err != nil {
return fmt.Errorf("guest process manager error: %w", err)
}
path := fmt.Sprintf("/tmp/%s-data.bin", vmName)
spec := types.GuestProgramSpec{ProgramPath: "/bin/dd", Arguments: fmt.Sprintf("if=/dev/urandom of=%s bs=1M count=%d", path, mb)}
if _, err := procMgr.StartProgram(ctx, auth, &spec); err != nil {
return fmt.Errorf("start guest write process: %w", err)
}
klog.Infof("Random data write started inside VM %s: %d MB to %s", vmName, mb, path)
return nil
}
func addDefaultSCSIController(vmConfig *types.VirtualMachineConfigSpec) *types.ParaVirtualSCSIController {
controller := &types.ParaVirtualSCSIController{
VirtualSCSIController: types.VirtualSCSIController{
SharedBus: types.VirtualSCSISharingNoSharing,
VirtualController: types.VirtualController{
VirtualDevice: types.VirtualDevice{Key: 3000},
BusNumber: 0,
},
},
}
controller.VirtualController = types.VirtualController{}
controller.VirtualController.Key = 1000
controller.SharedBus = types.VirtualSCSISharingNoSharing
controller.VirtualController.BusNumber = 0
controllerSpec := types.VirtualDeviceConfigSpec{
Operation: types.VirtualDeviceConfigSpecOperationAdd,
Device: controller,
}
vmConfig.DeviceChange = append(vmConfig.DeviceChange, &controllerSpec)
log.Println("Added default LSI Logic SAS controller to VM configuration")
return controller
}
func attachNetwork(
ctx context.Context,
cli *govmomi.Client,
vm *object.VirtualMachine,
networkName string,
) error {
finder := find.NewFinder(cli.Client, false)
dc, err := finder.DefaultDatacenter(ctx)
if err != nil {
return fmt.Errorf("get DC: %w", err)
}
finder.SetDatacenter(dc)
netObj, err := finder.Network(ctx, networkName)
if err != nil {
return fmt.Errorf("find network %q: %w", networkName, err)
}
devices, err := vm.Device(ctx)
if err != nil {
return fmt.Errorf("device list: %w", err)
}
backing, err := netObj.EthernetCardBackingInfo(ctx)
if err != nil {
return fmt.Errorf("build NIC backing: %w", err)
}
nic, err := devices.CreateEthernetCard("vmxnet3", backing)
if err != nil {
return fmt.Errorf("create NIC: %w", err)
}
nicSpec := &types.VirtualDeviceConfigSpec{
Operation: types.VirtualDeviceConfigSpecOperationAdd,
Device: nic,
}
spec := types.VirtualMachineConfigSpec{
DeviceChange: []types.BaseVirtualDeviceConfigSpec{nicSpec},
}
task, err := vm.Reconfigure(ctx, spec)
if err != nil {
return fmt.Errorf("reconfigure: %w", err)
}
if err = task.Wait(ctx); err != nil {
return fmt.Errorf("reconfigure task: %w", err)
}
return nil
}
func CreateVM(vmName, vsphereUrl, vsphereUser, vspherePassword, dataCenter,
dataStore, pool, hostName, downloadVmdkURL, localVmdkPath, isoPath string, waitTimeout time.Duration) (string, error) { // Add hostName parameter
ctx, cancel, client, finder, dc, ds, rp, err := SetupVSphere(
5*time.Minute, vsphereUrl, vsphereUser, vspherePassword, dataCenter, dataStore, pool)
if err != nil {
log.Fatalf("vSphere setup failed: %v", err)
}
defer cancel()
var host *object.HostSystem
if hostName != "" {
host, err = finder.HostSystem(ctx, hostName)
if err != nil {
return "", fmt.Errorf("failed to find host %q: %w", hostName, err)
}
klog.Infof("Using host: %s", host.Name())
}
vm, err := finder.VirtualMachine(context.Background(), vmName)
if err != nil {
if _, ok := err.(*find.NotFoundError); !ok {
return "", err
}
}
if vm != nil {
log.Printf("VM %q already exists. Attempting to retrieve its VMDK path from vSphere.", vmName)
existingVmdkPath, err := getExistingVMDKPath(ctx, vm, ds)
if err != nil {
return "", fmt.Errorf("failed to get VMDK path for existing VM %q: %w", vmName, err)
}
return existingVmdkPath, nil
}
vmdkToUpload, err := ensureVmdk(downloadVmdkURL, localVmdkPath)
if err != nil {
return "", err
}
fmt.Printf("\nvmdk to upload %s\n", vmdkToUpload)
remoteVmdkPath, err := uploadVmdk(ctx, client, ds, dc, rp, host, vmName, vmdkToUpload)
if err != nil {
return "", err
}
fmt.Printf("\nremote vmdk path %s\n", remoteVmdkPath)
// After upload, the `remoteVmdkPath` should correctly point to the descriptor VMDK.
// We don't need a separate findVMDKPath after upload because uploadVmdk already handles it.
// The `createVM` function will then use this `remoteVmdkPath`.
remoteIsoPath, err := uploadFile(ctx, ds, vmName, isoPath)
if err != nil {
return "", err
}
vm, err = createVM(ctx, client, dc, rp, host, vmName, remoteVmdkPath, ds.Name())
if err != nil {
return "", err
}
if err := attachCDROM(ctx, vm, remoteIsoPath); err != nil {
return "", err
}
if err := attachNetwork(ctx, client, vm, "VM Network"); err != nil {
log.Fatalf("add NIC: %v", err)
}
if err := waitForVMRegistration(ctx, finder, vmName, waitTimeout); err != nil {
return "", err
}
klog.Infof("VM %s is ready.", vmName)
return remoteVmdkPath, nil
}
func DestroyVM(vmName, vsphereUrl, vsphereUser, vspherePassword, dataCenter,
dataStore, pool string, timeout time.Duration) error {
ctx, cancel, _, finder, _, _, _, err := SetupVSphere(
timeout, vsphereUrl, vsphereUser, vspherePassword, dataCenter, dataStore, pool)
if err != nil {
log.Fatalf("vSphere setup failed: %v", err)
}
defer cancel()
vm, err := finder.VirtualMachine(ctx, vmName)
if err != nil {
if _, ok := err.(*find.NotFoundError); ok {
return nil
} else {
return err
}
}
if vm != nil {
log.Printf("Destroying VM %s", vmName)
task, err := vm.Destroy(context.Background())
if err != nil {
return err
}
powerState, err := vm.PowerState(context.Background())
if err != nil {
return err
}
// --- Power Off the VM if it's On or Suspended ---
if powerState == types.VirtualMachinePowerStatePoweredOn || powerState == types.VirtualMachinePowerStateSuspended {
log.Printf("Powering off Virtual Machine '%s' before destruction...", vm.Name())
task, err := vm.PowerOff(ctx)
if err != nil {
log.Fatalf("Failed to initiate power off for VM '%s': %v", vm.Name(), err)
}
// Wait for the power-off task to complete
if err = task.Wait(ctx); err != nil {
// Log the error but attempt to destroy anyway, as some power-off failures might still allow destruction.
log.Printf("Warning: Power off task for VM '%s' failed or timed out: %v. Attempting destruction anyway.", vm.Name(), err)
} else {
log.Printf("Virtual Machine '%s' powered off successfully.", vm.Name())
}
} else {
log.Printf("Virtual Machine '%s' is already powered off.", vm.Name())
}
// Wait for the destroy task to complete
log.Printf("Waiting for destroy task to complete for VM '%s' (Task ID: %s)...", vm.Name(), task.Reference())
if err = task.Wait(ctx); err != nil {
log.Fatalf("Destroy task for VM '%s' failed or timed out: %v", vm.Name(), err)
}
log.Printf("Virtual Machine '%s' destroyed successfully!", vm.Name())
log.Printf("\nSUCCESS: Virtual Machine '%s' has been destroyed.\n", vmName)
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/pkg/vmware/client.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/pkg/vmware/client.go | package vmware
import (
"context"
"fmt"
"net/url"
"time"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
)
func SetupVSphere(timeout time.Duration, vcURL, user, pass, dcName, dsName, poolName string,
) (
ctx context.Context,
cancel context.CancelFunc,
cli *govmomi.Client,
finder *find.Finder,
dc *object.Datacenter,
ds *object.Datastore,
rp *object.ResourcePool,
err error,
) {
ctx, cancel = context.WithTimeout(context.Background(), timeout)
u, err := url.Parse(vcURL)
if err != nil {
err = fmt.Errorf("invalid vCenter URL: %w", err)
return
}
u.User = url.UserPassword(user, pass)
cli, err = govmomi.NewClient(ctx, u, true /* allowInsecure */)
if err != nil {
err = fmt.Errorf("vCenter connect error: %w", err)
return
}
finder = find.NewFinder(cli.Client, false)
dc, err = finder.Datacenter(ctx, dcName)
if err != nil {
err = fmt.Errorf("find datacenter %q: %w", dcName, err)
return
}
finder.SetDatacenter(dc)
ds, err = finder.Datastore(ctx, dsName)
if err != nil {
err = fmt.Errorf("find datastore %q: %w", dsName, err)
return
}
rp, err = finder.ResourcePool(ctx, poolName)
if err != nil {
err = fmt.Errorf("find resource pool %q: %w", dsName, err)
return
}
return
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/pkg/config/config.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/pkg/config/config.go | package config
import (
"bufio"
"certificate-tool/internal/utils"
"fmt"
"os"
"path/filepath"
"strings"
"syscall"
"golang.org/x/term"
"gopkg.in/yaml.v3"
)
type Config struct {
TestNamespace string `yaml:"test-namespace"`
Kubeconfig string `yaml:"kubeconfig"`
SecretName string `yaml:"secret-name"`
PvcYamlPath string `yaml:"pvc-yaml-path"`
TestLabels string `yaml:"test-labels"`
TestImageLabel string `yaml:"test-image-label"`
TestPopulatorImage string `yaml:"test-populator-image"`
StoragePasswordFile string `yaml:"storage-password-file"`
StorageUser string `yaml:"storage-user"`
StorageURL string `yaml:"storage-url"`
StorageClassName string `yaml:"storage-class-name"`
VspherePasswordFile string `yaml:"vsphere-password-file"`
VsphereUser string `yaml:"vsphere-user"`
VsphereURL string `yaml:"vsphere-url"`
VMs []*utils.VM `yaml:"vms"`
Name string `yaml:"name"`
IsoPath string `yaml:"iso-path"`
DataStore string `yaml:"vsphere-datastore"`
DataCenter string `yaml:"data-center"`
WaitTimeout string `yaml:"wait-timeout"` // Will be parsed to time.Duration
Pool string `yaml:"vsphere-resource-pool"`
DownloadVmdkURL string `yaml:"download-vmdk-url"`
LocalVmdkPath string `yaml:"local-vmdk-path"`
StorageSkipSSLVerification string `yaml:"storage-skip-ssl-verification"`
StoragePassword string `yaml:"-"`
VspherePassword string `yaml:"-"`
}
const passwordsDir = ".passwords"
func readPasswordFromFile(filePath string) (string, error) {
if filePath == "" {
return "", nil
}
data, err := os.ReadFile(filePath)
if err != nil {
return "", err
}
password := strings.TrimRight(string(data), "\r\n")
return password, nil
}
// promptForPassword prompts the user to enter a password securely
func promptForPassword(prompt string) (string, error) {
fmt.Print(prompt)
password, err := term.ReadPassword(int(syscall.Stdin))
fmt.Println()
if err != nil {
return "", fmt.Errorf("failed to read password: %w", err)
}
return string(password), nil
}
// savePasswordToFile saves a password to a file in the .passwords directory
func savePasswordToFile(password, filename string) error {
if err := os.MkdirAll(passwordsDir, 0700); err != nil {
return fmt.Errorf("failed to create passwords directory: %w", err)
}
filePath := filepath.Join(passwordsDir, filename)
if err := os.WriteFile(filePath, []byte(password), 0600); err != nil {
return fmt.Errorf("failed to write password file: %w", err)
}
return nil
}
// askToSavePassword prompts the user if they want to save the password
func askToSavePassword(passwordType string) bool {
fmt.Printf("Would you like to save the %s password to %s for future use? (y/N): ", passwordType, passwordsDir)
reader := bufio.NewReader(os.Stdin)
response, err := reader.ReadString('\n')
if err != nil {
return false
}
response = strings.TrimSpace(strings.ToLower(response))
return response == "y" || response == "yes"
}
// getOrPromptPassword gets password from file or prompts user if file path is empty
func getOrPromptPassword(passwordFile, passwordType, defaultFilename string) (string, error) {
if passwordFile != "" {
return readPasswordFromFile(passwordFile)
}
savedPasswordPath := filepath.Join(passwordsDir, defaultFilename)
if _, err := os.Stat(savedPasswordPath); err == nil {
fmt.Printf("Found saved %s password in %s\n", passwordType, savedPasswordPath)
return readPasswordFromFile(savedPasswordPath)
}
password, err := promptForPassword(fmt.Sprintf("Enter %s password: ", passwordType))
if err != nil {
return "", err
}
if askToSavePassword(passwordType) {
if saveErr := savePasswordToFile(password, defaultFilename); saveErr != nil {
fmt.Printf("Warning: Failed to save password: %v\n", saveErr)
} else {
fmt.Printf("Password saved to %s\n", filepath.Join(passwordsDir, defaultFilename))
}
}
return password, nil
}
func LoadConfig(configPath string) (*Config, error) {
data, err := os.ReadFile(configPath)
if err != nil {
return nil, err
}
var cfg Config
err = yaml.Unmarshal(data, &cfg)
if err != nil {
return nil, err
}
cfg.StoragePassword, err = getOrPromptPassword(
cfg.StoragePasswordFile,
"storage",
"storage-password.txt",
)
if err != nil {
return nil, fmt.Errorf("failed to get storage password: %w", err)
}
// Get vSphere password
cfg.VspherePassword, err = getOrPromptPassword(
cfg.VspherePasswordFile,
"vSphere",
"vsphere-password.txt",
)
if err != nil {
return nil, fmt.Errorf("failed to get vSphere password: %w", err)
}
return &cfg, nil
}
func DefaultConfigPath() string {
return filepath.Join("assets", "config", "static_values.yaml")
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/cmd/root.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/cmd/root.go | package cmd
import (
"certificate-tool/pkg/config" // Import the new config package
"os"
"time" // Import time package for Duration parsing
"github.com/spf13/cobra"
)
// RootCmd represents the base command
var RootCmd = &cobra.Command{
Use: "certificate-tool",
Short: "CLI tool to orchestrate xcopy offload tests",
Long: `This tool creates the environment, a VM with data, configures PVC and CR, and finally runs xcopy offload tests.`,
}
var (
cfgFile string // New flag for the configuration file
appConfig *config.Config // Holds the loaded configuration
)
// Execute executes the root command.
func Execute() error {
return RootCmd.Execute()
}
func init() {
cobra.OnInitialize(initConfig) // Initialize config before any command runs
RootCmd.AddCommand(
prepare,
createTestCmd,
)
// New persistent flag for the configuration file
RootCmd.PersistentFlags().StringVar(
&cfgFile,
"config", config.DefaultConfigPath(), // Set default path for config file
"Path to the YAML configuration file",
)
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
var err error
appConfig, err = config.LoadConfig(cfgFile)
if err != nil {
if os.IsNotExist(err) && cfgFile == config.DefaultConfigPath() {
panic("Failed to load configuration: " + err.Error())
}
}
}
// Helper function to parse duration from string
func parseDuration(s string, defaultDuration time.Duration) time.Duration {
if s == "" {
return defaultDuration
}
d, err := time.ParseDuration(s)
if err != nil {
// Log the error or handle it as appropriate, using default for now
return defaultDuration
}
return d
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/cmd/test-xcopy.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/cmd/test-xcopy.go | package cmd
import (
"certificate-tool/internal/testplan"
"context"
"fmt"
"os"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
var (
planYamlPath string
)
var createTestCmd = &cobra.Command{
Use: "test-xcopy",
Short: "Creates the test environment: PVC and CR instance",
Run: func(cmd *cobra.Command, args []string) {
data, err := os.ReadFile(planYamlPath) // planYamlPath remains a flag
if err != nil {
fmt.Printf("failed reading plan file: %v\n", err)
os.Exit(1)
}
tp, err := testplan.Parse(data)
if err != nil {
fmt.Printf("failed parsing plan: %v\n", err)
os.Exit(1)
}
// Use kubeconfig from appConfig
config, err := clientcmd.BuildConfigFromFlags("", appConfig.Kubeconfig)
if err != nil {
fmt.Printf("kubeconfig error: %v\n", err)
os.Exit(1)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
fmt.Printf("k8s client error: %v\n", err)
os.Exit(1)
}
tp.ClientSet = clientset
tp.StorageClass = appConfig.StorageClassName // Use StorageClassName from appConfig
tp.Namespace = appConfig.TestNamespace // Use TestNamespace from appConfig
tp.VSphereURL = appConfig.VsphereURL // Use VsphereURL from appConfig
tp.VSphereUser = appConfig.VsphereUser // Use VsphereUser from appConfig
tp.VSpherePassword = appConfig.VspherePassword // Use VspherePassword from appConfig
tp.Datacenter = appConfig.DataCenter // Use DataCenter from appConfig
tp.Datastore = appConfig.DataStore // Use DataStore from appConfig
tp.ResourcePool = appConfig.Pool // Use Pool from appConfig
tp.VmdkDownloadURL = appConfig.DownloadVmdkURL // Use DownloadVmdkURL from appConfig
tp.LocalVmdkPath = appConfig.LocalVmdkPath // Use LocalVmdkPath from appConfig
tp.IsoPath = appConfig.IsoPath // Use IsoPath from appConfig
tp.AppConfig = appConfig
ctx := context.Background()
if err := tp.Start(ctx, appConfig.TestPopulatorImage, appConfig.PvcYamlPath); err != nil {
fmt.Printf("test plan execution failed: %v\n", err)
os.Exit(1)
}
// Output results
out, err := tp.FormatOutput()
if err != nil {
fmt.Printf("failed formatting output: %v\n", err)
os.Exit(1)
}
fmt.Print(string(out))
fmt.Println("TestPlan completed.")
},
}
func init() {
RootCmd.AddCommand(createTestCmd)
createTestCmd.Flags().StringVar(&planYamlPath, "plan-yaml-path", "assets/manifests/examples/example-test-plan.yaml", "Path to the test plan YAML file")
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/cmd/prepare.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/cmd/prepare.go | package cmd
import (
"certificate-tool/internal/k8s"
"k8s.io/klog/v2"
"strings"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
var prepare = &cobra.Command{
Use: "prepare",
Short: "Creates the controller environment (deployment, clusterRole and role bindings) ",
Run: func(cmd *cobra.Command, args []string) {
klog.Infof("Creating controller environment...")
// Use values from appConfig
config, err := clientcmd.BuildConfigFromFlags("", appConfig.Kubeconfig)
if err != nil {
panic(err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
if err := k8s.EnsureNamespace(clientset, appConfig.TestNamespace); err != nil {
panic(err)
}
saName := "populator"
roleName := "populator"
if err := k8s.EnsureServiceAccount(clientset, appConfig.TestNamespace, saName); err != nil {
panic(err)
}
clusterRole := k8s.NewClusterRole(roleName)
if err := k8s.EnsureClusterRole(clientset, clusterRole); err != nil {
panic(err)
}
clusterRoleBinding := k8s.NewClusterRoleBinding(appConfig.TestNamespace, roleName, saName)
if err := k8s.EnsureClusterRoleBinding(clientset, clusterRoleBinding); err != nil {
panic(err)
}
klog.Infof("Controller namespace created successfully.")
// Redundant EnsureNamespace and EnsureServiceAccount calls. Keeping them as per original, but they are duplicates.
if err := k8s.EnsureNamespace(clientset, appConfig.TestNamespace); err != nil {
panic(err)
}
if err := k8s.EnsureServiceAccount(clientset, appConfig.TestNamespace, saName); err != nil {
panic(err)
}
populatorRole := k8s.NewRole(roleName, appConfig.TestNamespace)
if err := k8s.EnsureRole(clientset, populatorRole); err != nil {
panic(err)
}
populatorRoleBinding := k8s.NewRoleBinding(appConfig.TestNamespace, saName, roleName)
if err := k8s.EnsureRoleBinding(clientset, populatorRoleBinding); err != nil {
panic(err)
}
if err := k8s.EnsureRole(clientset, populatorRole); err != nil {
panic(err)
}
klog.Infof("Ensuring secret...")
Secret := k8s.NewPopulatorSecret(
appConfig.TestNamespace,
appConfig.StorageSkipSSLVerification,
appConfig.StoragePassword,
appConfig.StorageUser,
appConfig.StorageURL,
appConfig.VspherePassword,
appConfig.VsphereUser,
stripHTTP(appConfig.VsphereURL),
appConfig.SecretName,
)
if err := k8s.EnsureSecret(clientset, Secret); err != nil {
panic(err)
}
klog.Infof("Environment created successfully.")
},
}
func stripHTTP(url string) string {
if strings.HasPrefix(url, "https://") {
return url[8:]
}
if strings.HasPrefix(url, "http://") {
return url[7:]
}
return url
}
func init() {
RootCmd.AddCommand(prepare)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/cmd/destroy-vm.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/cmd/destroy-vm.go | package cmd
import (
"certificate-tool/internal/utils"
"certificate-tool/pkg/vmware"
"fmt"
"time"
"github.com/spf13/cobra"
)
// TestCase defines a single test scenario.
type TestCase struct {
VMs []*utils.VM `yaml:"vms"`
}
var destroyVMCmd = &cobra.Command{
Use: "destroy-vms",
RunE: func(cmd *cobra.Command, args []string) error {
for _, vm := range appConfig.VMs {
fullVMName := fmt.Sprintf("%s-%s", appConfig.Name, vm.NamePrefix)
// Use values from appConfig
return vmware.DestroyVM(
fullVMName,
appConfig.VsphereURL,
appConfig.VsphereUser,
appConfig.VspherePassword,
appConfig.DataCenter,
appConfig.DataStore,
appConfig.Pool,
parseDuration(appConfig.WaitTimeout, 5*time.Minute),
)
}
return nil
},
}
func init() {
RootCmd.AddCommand(destroyVMCmd)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/k8s/polling.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/k8s/polling.go | package k8s
import (
"context"
"fmt"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
wait "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
)
type PodResult struct {
PodName string
Container string
ExitCode int32
Duration time.Duration
Success bool
Err error
}
// PollPodsAndCheck polls all pods matching your test’s label selector until they terminate,
// then returns a slice of PodResult indicating pass/fail per-container.
func PollPodsAndCheck(
ctx context.Context,
client *kubernetes.Clientset,
namespace string,
labelSelector string,
maxTimeSeconds int,
pollInterval time.Duration,
timeout time.Duration,
) ([]PodResult, time.Duration, error) {
var finalPods []v1.Pod
start := time.Now()
// 1) Poll until all pods finish (Succeeded or Failed).
// immediate=true means the condition runs immediately once.
err := wait.PollUntilContextTimeout(
ctx, // your parent context
pollInterval, // e.g. 2*time.Second
timeout, // overall timeout e.g. MaxTime + buffer
true, // run condition immediately on entry
func(ctx context.Context) (bool, error) {
podList, err := client.CoreV1().
Pods(namespace).
List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
if err != nil {
return false, err
}
finalPods = podList.Items
for _, p := range finalPods {
if p.Status.Phase != v1.PodSucceeded &&
p.Status.Phase != v1.PodFailed {
return false, nil // still running
}
}
return true, nil // all done
},
)
totalElapsed := time.Since(start)
if err != nil {
return nil, totalElapsed, fmt.Errorf("waiting for pods to finish: %w", err)
}
// 2) Inspect each pod & container
var results []PodResult
for _, p := range finalPods {
for _, cs := range p.Status.ContainerStatuses {
if cs.State.Terminated == nil {
results = append(results, PodResult{
PodName: p.Name,
Container: cs.Name,
Success: false,
Err: fmt.Errorf("no terminated state found"),
})
continue
}
term := cs.State.Terminated
dur := term.FinishedAt.Time.Sub(term.StartedAt.Time)
success := term.ExitCode == 0 && dur <= time.Duration(maxTimeSeconds)*time.Second
var errDetail error
if term.ExitCode != 0 {
errDetail = fmt.Errorf("exit code %d", term.ExitCode)
} else if dur > time.Duration(maxTimeSeconds)*time.Second {
errDetail = fmt.Errorf("timeout: ran %v > %ds", dur, maxTimeSeconds)
}
results = append(results, PodResult{
PodName: p.Name,
Container: cs.Name,
ExitCode: term.ExitCode,
Duration: dur,
Success: success,
Err: errDetail,
})
}
}
return results, totalElapsed, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/k8s/rbac.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/k8s/rbac.go | package k8s
import (
"fmt"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NewRole returns a Role that allows managing PVCs in the given namespace.
func NewRole(roleName, namespace string) *rbacv1.Role {
return &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: namespace,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"persistentvolumeclaims"},
Verbs: []string{"get", "list", "watch", "patch", "create", "update", "delete"},
},
},
}
}
// NewClusterRole returns a ClusterRole for managing various K8s and Forklift resources.
func NewClusterRole(roleName string) *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"pods", "persistentvolumeclaims", "persistentvolumes", "storageclasses", "secrets"},
Verbs: []string{"get", "list", "watch", "patch", "create", "update", "delete"},
},
{
APIGroups: []string{""},
Resources: []string{"events"},
Verbs: []string{"create", "patch", "update"},
},
{
APIGroups: []string{"storage.k8s.io"},
Resources: []string{"storageclasses"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{"forklift.konveyor.io"},
Resources: []string{"ovirtvolumepopulators", "vspherexcopyvolumepopulators", "openstackvolumepopulators"},
Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"},
},
},
}
}
// NewClusterRoleBinding returns a ClusterRoleBinding that binds the SA to the given ClusterRole.
func NewClusterRoleBinding(namespace, roleName, saName string) *rbacv1.ClusterRoleBinding {
bindingName := fmt.Sprintf("%s-binding", roleName)
return &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: bindingName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: saName,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: roleName,
},
}
}
// NewRoleBinding returns a RoleBinding that binds the SA to the Role in the namespace.
func NewRoleBinding(namespace, saName, roleName string) *rbacv1.RoleBinding {
bindingName := fmt.Sprintf("%s-binding", roleName)
return &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: bindingName,
Namespace: namespace,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: saName,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: roleName,
},
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/k8s/apply.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/k8s/apply.go | package k8s
import (
"bytes"
"fmt"
"io"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/yaml"
"net/http"
"os"
"strings"
"text/template"
"k8s.io/client-go/kubernetes"
)
type TemplateParams struct {
TestNamespace string
TestImageLabel string
TestLabels string
TestPopulatorImage string
PodNamespace string
VmdkPath string
StorageVendor string
StorageClassName string
}
// ProcessTemplate reads a file and processes it as a Go template using the provided variables.
func ProcessTemplate(filePath string, vars map[string]string, leftDelim, rightDelim string) ([]byte, error) {
rawData, err := os.ReadFile(filePath)
if err != nil {
return nil, fmt.Errorf("failed to read file %s: %w", filePath, err)
}
tmpl, err := template.New("template").Delims(leftDelim, rightDelim).Parse(string(rawData))
if err != nil {
return nil, fmt.Errorf("failed to parse template: %w", err)
}
var buf bytes.Buffer
if err := tmpl.Execute(&buf, vars); err != nil {
return nil, fmt.Errorf("failed to execute template: %w", err)
}
return buf.Bytes(), nil
}
func Decode[T any](data []byte) (*T, error) {
dec := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(data), 1024)
var obj T
if err := dec.Decode(&obj); err != nil {
return nil, fmt.Errorf("decode %T: %w", obj, err)
}
return &obj, nil
}
// ToMap returns the map[string]string your templates expect.
func (p *TemplateParams) ToMap() map[string]string {
return map[string]string{
"TEST_NAMESPACE": p.TestNamespace,
"TEST_IMAGE_LABEL": p.TestImageLabel,
"TEST_LABELS": p.TestLabels,
"TEST_POPULATOR_IMAGE": p.TestPopulatorImage,
"POD_NAMESPACE": p.PodNamespace,
"VMDK_PATH": p.VmdkPath,
"STORAGE_VENDOR": p.StorageVendor,
"STORAGE_CLASS_NAME": p.StorageClassName,
}
}
// LoadAndDecode loads a fileOrURL, runs your Go template over it,
// then decodes into the typed K8s object T.
func LoadAndDecode[T any](
fileOrURL string,
params *TemplateParams,
leftDelim, rightDelim string,
) (*T, error) {
raw, err := os.ReadFile(fileOrURL)
if err != nil && (strings.HasPrefix(fileOrURL, "http://") || strings.HasPrefix(fileOrURL, "https://")) {
var resp *http.Response
resp, err = http.Get(fileOrURL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
raw, err = io.ReadAll(resp.Body)
}
if err != nil {
return nil, fmt.Errorf("loading %s: %w", fileOrURL, err)
}
tmpl, err := template.
New("m").
Delims(leftDelim, rightDelim).
Parse(string(raw))
if err != nil {
return nil, fmt.Errorf("parsing template: %w", err)
}
var buf bytes.Buffer
if err := tmpl.Execute(&buf, params.ToMap()); err != nil {
return nil, fmt.Errorf("executing template: %w", err)
}
return Decode[T](buf.Bytes())
}
func ApplyResource[T any](
fileOrURL string,
params *TemplateParams,
leftDelim, rightDelim string,
ensure func(clientset *kubernetes.Clientset, namespace string, obj *T) error,
clientset *kubernetes.Clientset,
namespace string,
) error {
obj, err := LoadAndDecode[T](fileOrURL, params, leftDelim, rightDelim)
if err != nil {
return fmt.Errorf("failed to load %s: %w", fileOrURL, err)
}
if err := ensure(clientset, namespace, obj); err != nil {
return fmt.Errorf("ensuring %T : %w", obj, err)
}
return nil
}
func ApplyPVCFromTemplate(clientset *kubernetes.Clientset, namespace, pvcName, size, storageClassName, yamlPath string) error {
data, err := os.ReadFile(yamlPath)
if err != nil {
return fmt.Errorf("read PVC template: %w", err)
}
var pvc corev1.PersistentVolumeClaim
if err := yaml.Unmarshal(data, &pvc); err != nil {
return fmt.Errorf("unmarshal PVC template: %w", err)
}
pvc.Name = pvcName
pvc.Namespace = namespace
pvc.Spec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse(size)
pvc.Spec.StorageClassName = &storageClassName
return EnsurePersistentVolumeClaim(clientset, namespace, &pvc)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/k8s/secret.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/k8s/secret.go | package k8s
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NewPopulatorSecret creates a Kubernetes Secret for passing vSphere and storage credentials.
func NewPopulatorSecret(namespace, storageSkipSSLVerification, storagePassword, storageUser, storageUrl, vspherePassword, vsphereUser, vsphereUrl, secretName string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: namespace,
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
"VSPHERE_INSECURE": "true",
"STORAGE_HOSTNAME": storageUrl,
"STORAGE_PASSWORD": storagePassword,
"STORAGE_USERNAME": storageUser,
"GOVMOMI_HOSTNAME": vsphereUrl,
"GOVMOMI_PASSWORD": vspherePassword,
"GOVMOMI_USERNAME": vsphereUser,
"STORAGE_SKIP_SSL_VERIFICATION": storageSkipSSLVerification,
},
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/k8s/ensure.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/k8s/ensure.go | package k8s
import (
"certificate-tool/internal/utils"
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
)
// EnsureNamespace makes sure a namespace exists; if not, it creates it.
func EnsureNamespace(clientset *kubernetes.Clientset, namespace string) error {
_, err := clientset.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
_, err = clientset.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create namespace %q: %w", namespace, err)
}
klog.Infof("Namespace %q created", namespace)
return nil
}
if err != nil {
return fmt.Errorf("failed to get namespace %q: %w", namespace, err)
}
klog.Infof("Namespace %q already exists", namespace)
return nil
}
// EnsureServiceAccount ensures a ServiceAccount exists in the given namespace.
func EnsureServiceAccount(clientset *kubernetes.Clientset, namespace, saName string) error {
_, err := clientset.CoreV1().ServiceAccounts(namespace).Get(context.TODO(), saName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
sa := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{Name: saName, Namespace: namespace},
}
_, err = clientset.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), sa, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create ServiceAccount %q in namespace %q: %w", saName, namespace, err)
}
klog.Infof("ServiceAccount %q created in namespace %q", saName, namespace)
return nil
}
if err != nil {
return fmt.Errorf("failed to get ServiceAccount %q in namespace %q: %w", saName, namespace, err)
}
klog.Infof("ServiceAccount %q already exists in namespace %q", saName, namespace)
return nil
}
func EnsureClusterRole(clientset *kubernetes.Clientset, role *rbacv1.ClusterRole) error {
_, err := clientset.RbacV1().ClusterRoles().Get(context.TODO(), role.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
created, err := clientset.RbacV1().ClusterRoles().Create(context.TODO(), role, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create ClusterRole %q: %w", role.Name, err)
}
klog.Infof("ClusterRole %q created", created.Name)
return nil
}
if err != nil {
return fmt.Errorf("failed to get ClusterRole %q: %w", role.Name, err)
}
klog.Infof("ClusterRole %q already exists", role.Name)
return nil
}
func EnsureClusterRoleBinding(clientset *kubernetes.Clientset, binding *rbacv1.ClusterRoleBinding) error {
_, err := clientset.RbacV1().ClusterRoleBindings().Get(context.TODO(), binding.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
created, err := clientset.RbacV1().ClusterRoleBindings().Create(context.TODO(), binding, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create ClusterRoleBinding %q: %w", binding.Name, err)
}
klog.Infof("ClusterRoleBinding %q created", created.Name)
return nil
}
if err != nil {
return fmt.Errorf("failed to get ClusterRoleBinding %q: %w", binding.Name, err)
}
klog.Infof("ClusterRoleBinding %q already exists", binding.Name)
return nil
}
func EnsureRole(clientset *kubernetes.Clientset, role *rbacv1.Role) error {
_, err := clientset.RbacV1().Roles(role.Namespace).Get(context.TODO(), role.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
created, err := clientset.RbacV1().Roles(role.Namespace).Create(context.TODO(), role, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create Role %q in namespace %q: %w", role.Name, role.Namespace, err)
}
klog.Infof("Role %q created in namespace %q", created.Name, created.Namespace)
return nil
}
if err != nil {
return fmt.Errorf("failed to get Role %q in namespace %q: %w", role.Name, role.Namespace, err)
}
klog.Infof("Role %q already exists in namespace %q", role.Name, role.Namespace)
return nil
}
func EnsureRoleBinding(clientset *kubernetes.Clientset, binding *rbacv1.RoleBinding) error {
_, err := clientset.RbacV1().RoleBindings(binding.Namespace).Get(context.TODO(), binding.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
created, err := clientset.RbacV1().RoleBindings(binding.Namespace).Create(context.TODO(), binding, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create RoleBinding %q: %w", binding.Name, err)
}
klog.Infof("RoleBinding %q created", created.Name)
return nil
}
if err != nil {
return fmt.Errorf("failed to get RoleBinding %q: %w", binding.Name, err)
}
klog.Infof("RoleBinding %q already exists", binding.Name)
return nil
}
func EnsureSecret(clientset *kubernetes.Clientset, secret *corev1.Secret) error {
_, err := clientset.CoreV1().Secrets(secret.Namespace).Get(context.TODO(), secret.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
created, err := clientset.CoreV1().Secrets(secret.Namespace).Create(context.TODO(), secret, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create Secret %q: %w", secret.Name, err)
}
klog.Infof("Secret %q created", created.Name)
return nil
}
if err != nil {
return fmt.Errorf("failed to get Secret %q: %w", secret.Name, err)
}
klog.Infof("Secret %q already exists", secret.Name)
return nil
}
func EnsurePersistentVolumeClaim(clientset *kubernetes.Clientset, namespace string, pvc *corev1.PersistentVolumeClaim) error {
existing, err := clientset.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
created, err := clientset.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create PVC %q: %w", pvc.Name, err)
}
klog.Infof("PVC %q created", created.Name)
return nil
}
if err != nil {
return fmt.Errorf("failed to get PVC %q: %w", pvc.Name, err)
}
klog.Infof("PVC %q already exists", existing.Name)
return nil
}
// EnsurePopulatorPod creates or reapplies a populator Pod mounting its PVC.
func EnsurePopulatorPod(ctx context.Context, clientset *kubernetes.Clientset, namespace, podName, image, testLabel string, vm utils.VM, storageVendorProduct, pvcName string) error {
pods := clientset.CoreV1().Pods(namespace)
_, err := pods.Get(ctx, podName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
mustBeDefined := false
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: namespace,
Labels: map[string]string{"test": testLabel},
},
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
ServiceAccountName: "populator",
Volumes: []corev1.Volume{
{Name: "target", VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: pvcName}}},
},
Containers: []corev1.Container{{
Name: "populate",
Image: image,
ImagePullPolicy: corev1.PullAlways,
VolumeDevices: []corev1.VolumeDevice{{Name: "target", DevicePath: "/dev/block"}},
Ports: []corev1.ContainerPort{{Name: "metrics", ContainerPort: 8443, Protocol: corev1.ProtocolTCP}},
EnvFrom: []corev1.EnvFromSource{{SecretRef: &corev1.SecretEnvSource{corev1.LocalObjectReference{Name: "populator-secret"}, &mustBeDefined}}},
Args: []string{
// name or id is fine, the govmomi code uses a finder
fmt.Sprintf("--source-vm-id=%s", vm.Name),
fmt.Sprintf("--source-vmdk=%s", vm.VmdkPath),
fmt.Sprintf("--target-namespace=%s", namespace),
fmt.Sprintf("--cr-name=%s", "notrequired"),
fmt.Sprintf("--cr-namespace=%s", "notrequired"),
fmt.Sprintf("--owner-name=%s", pvcName),
fmt.Sprintf("--secret-name=%s-secret", "notrequired"),
fmt.Sprintf("--pvc-size=%s", vm.Size),
fmt.Sprintf("--owner-uid=%s", "notrequired"),
fmt.Sprintf("--storage-vendor-product=%s", storageVendorProduct),
},
}},
},
}
if _, err := pods.Create(ctx, pod, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("failed to create populator pod %s: %w", podName, err)
}
klog.Infof("Created populator pod %s", podName)
return nil
}
if err != nil {
return fmt.Errorf("error getting pod %s: %w", podName, err)
}
klog.Infof("Populator pod %s already exists", podName)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/k8s/logs.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/k8s/logs.go | package k8s
import (
"bytes"
"context"
"fmt"
"io"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
)
// GetPodLogs fetches the logs for a given pod and returns the last 'tailLines' lines.
func GetPodLogs(ctx context.Context, clientset kubernetes.Interface, namespace, podName string, tailLines int64) (string, error) {
podLogOptions := &corev1.PodLogOptions{
TailLines: &tailLines,
}
req := clientset.CoreV1().Pods(namespace).GetLogs(podName, podLogOptions)
podLogs, err := req.Stream(ctx)
if err != nil {
return "", fmt.Errorf("error opening log stream for pod %s/%s: %w", namespace, podName, err)
}
defer podLogs.Close()
buf := new(bytes.Buffer)
_, err = io.Copy(buf, podLogs)
if err != nil {
return "", fmt.Errorf("error copying logs for pod %s/%s: %w", namespace, podName, err)
}
return buf.String(), nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/utils/types.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/utils/types.go | package utils
// VM represents a VM configuration to be provisioned.
type VM struct {
// this name is known during the test case execution and is not serializable
// perhaps we set that in the test case yaml to specify existing VMs?
Name string
NamePrefix string `yaml:"namePrefix"`
Size string `yaml:"size"`
VmdkPath string `yaml:"vmdkPath"`
LocalVmdkPath string `yaml:"localVmdkPath"`
}
// SuccessCriteria indicates the max allowed run time for a test case.
type SuccessCriteria struct {
MaxTimeSeconds int `yaml:"maxTimeSeconds"`
}
// TestResult holds the outcome of a test case.
type TestResult struct {
Success bool `yaml:"success"`
ElapsedTime int64 `yaml:"elapsedTime"`
FailureReason string `yaml:"failureReason"`
}
type IndividualTestResult struct {
PodName string `yaml:"name"`
Success bool `yaml:"success"`
ElapsedTime int64 `yaml:"elapsedTime"`
FailureReason string `yaml:"failureReason"`
LogLines string `yaml:"logLines"`
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/utils/osutils/os.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/utils/osutils/os.go | package osutils
import (
"k8s.io/klog/v2"
"os"
"os/exec"
)
func ExecCommand(name string, args ...string) error {
klog.Infof("Executing: %s %v", name, args)
cmd := exec.Command(name, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/utils/yaml/yaml.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/utils/yaml/yaml.go | package yaml
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"text/template"
)
func ApplyTemplatedYAML(
kubeconfig, fileOrURL string,
vars map[string]string,
leftDelim, rightDelim string,
) error {
raw, err := os.ReadFile(fileOrURL)
if err != nil {
return fmt.Errorf("read %s: %w", fileOrURL, err)
}
tmpl, err := template.
New("m").
Delims(leftDelim, rightDelim).
Parse(string(raw))
if err != nil {
return fmt.Errorf("parse template: %w", err)
}
var buf bytes.Buffer
if err := tmpl.Execute(&buf, vars); err != nil {
return fmt.Errorf("execute template: %w", err)
}
cmd := exec.Command(
"kubectl",
"--kubeconfig", kubeconfig,
"apply", "-f", "-",
)
cmd.Stdin = &buf
cmd.Stdout = io.Discard
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("kubectl apply: %w", err)
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/testplan/plan.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/testplan/plan.go | package testplan
import (
"certificate-tool/internal/utils"
"certificate-tool/pkg/config"
"certificate-tool/pkg/storage"
"context"
"fmt"
"k8s.io/klog/v2"
"time"
"gopkg.in/yaml.v3"
"k8s.io/client-go/kubernetes"
)
// TestPlan aggregates multiple test cases under a VM image.
type TestPlan struct {
StorageVendorProduct string `yaml:"storageVendorProduct"`
TestCases []TestCase `yaml:"tests"`
Namespace string `yaml:"-"`
StorageClass string `yaml:"-"`
ClientSet *kubernetes.Clientset `yaml:"-"`
VSphereURL string `yaml:"-"`
VSphereUser string `yaml:"-"`
VSpherePassword string `yaml:"-"`
Datacenter string `yaml:"-"`
Datastore string `yaml:"-"`
ResourcePool string `yaml:"-"`
HostName string `yaml:"hostName"`
// New fields for VMDK download URL, local VMDK path, and ISO path
VmdkDownloadURL string
LocalVmdkPath string
IsoPath string
AppConfig *config.Config
}
// Parse unmarshals YAML data into a TestPlan.
func Parse(yamlData []byte) (*TestPlan, error) {
var tp TestPlan
if err := yaml.Unmarshal(yamlData, &tp); err != nil {
return nil, err
}
return &tp, nil
}
// Start runs all test cases sequentially, creating PVCs and pods, recording results.
func (tp *TestPlan) Start(ctx context.Context, podImage, pvcYamlPath string) error {
for i := range tp.TestCases {
tc := &tp.TestCases[i]
tc.ClientSet = tp.ClientSet
tc.Namespace = tp.Namespace
tc.VSphereURL = tp.VSphereURL
tc.VSphereUser = tp.VSphereUser
tc.VSpherePassword = tp.VSpherePassword
tc.Datacenter = tp.Datacenter
tc.Datastore = tp.Datastore
tc.ResourcePool = tp.ResourcePool
tc.HostName = tp.HostName
tc.VmdkDownloadURL = tp.VmdkDownloadURL
tc.IsoPath = tp.IsoPath
tc.StorageClass = tp.StorageClass
if tc.LocalVmdkPath == "" {
tc.LocalVmdkPath = tp.LocalVmdkPath
}
start := time.Now()
if err := tc.Run(ctx, podImage, pvcYamlPath, tp.StorageVendorProduct); err != nil {
tc.ResultSummary = utils.TestResult{
Success: false,
ElapsedTime: int64(time.Since(start).Seconds()),
FailureReason: err.Error(),
}
return fmt.Errorf("test %s failed: %w", tc.Name, err)
}
tc.ResultSummary.ElapsedTime = int64(time.Since(start).Seconds())
}
return nil
}
// FormatOutput returns the marshaled YAML of metadata, image, and test results.
func (tp *TestPlan) FormatOutput() ([]byte, error) {
output := struct {
Metadata struct {
Storage struct {
storage.Storage
StorageVendorProduct string `yaml:"storageVendorProduct"`
ConnectionType string `yaml:"connectionType"`
} `yaml:"storage"`
} `yaml:"metadata"`
Image string `yaml:"image"`
Tests []TestCase `yaml:"tests"`
}{
Tests: tp.TestCases,
}
c := storage.StorageCredentials{
Hostname: tp.AppConfig.StorageURL,
Username: tp.AppConfig.StorageUser,
Password: tp.AppConfig.StoragePassword,
SSLSkipVerify: tp.AppConfig.StorageSkipSSLVerification == "true",
VendorProduct: tp.StorageVendorProduct,
}
storageInfo, err := storage.StorageInfo(c)
if err != nil {
klog.Errorf("failed to get storage info: %v", err)
}
output.Metadata.Storage.Storage = storageInfo
output.Metadata.Storage.StorageVendorProduct = tp.StorageVendorProduct
output.Metadata.Storage.ConnectionType = "TODOTODOOOOTODOTODDO"
return yaml.Marshal(output)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/testplan/testcase.go | cmd/vsphere-xcopy-volume-populator/certificate-tool/internal/testplan/testcase.go | package testplan
import (
"certificate-tool/internal/k8s"
"certificate-tool/internal/utils"
"certificate-tool/pkg/vmware"
"context"
"fmt"
"time"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
)
type TestCaseForPrint struct {
Name string `yaml:"name"`
Success utils.SuccessCriteria `yaml:"success"`
VMs []*utils.VM `yaml:"vms"`
Results utils.TestResult `yaml:"results"`
}
// TestCase defines a single test scenario.
type TestCase struct {
Name string `yaml:"name"`
Success utils.SuccessCriteria `yaml:"success"`
VMs []*utils.VM `yaml:"vms"`
IndividualTestResults []utils.IndividualTestResult `yaml:"individualTestResults"`
ResultSummary utils.TestResult `yaml:"resultsummary"`
Namespace string `yaml:"-"`
StorageClass string `yaml:"-"`
ClientSet *kubernetes.Clientset `yaml:"-"`
VSphereURL string `yaml:"-"`
VSphereUser string `yaml:"-"`
VSpherePassword string `yaml:"-"`
Datacenter string `yaml:"-"`
Datastore string `yaml:"-"`
ResourcePool string `yaml:"-"`
HostName string `yaml:"-"`
VmdkDownloadURL string `yaml:"-"`
LocalVmdkPath string `yaml:"localVmdkPath"`
IsoPath string `yaml:"-"`
}
// Run provisions per-pod PVCs, VMs, launches populator pods, and waits.
func (tc *TestCase) Run(ctx context.Context, podImage, pvcYamlPath, storageVendorProduct string) error {
_, cancel, _, _, _, _, _, err := vmware.SetupVSphere(
10*time.Minute,
tc.VSphereURL,
tc.VSphereUser,
tc.VSpherePassword,
tc.Datacenter,
tc.Datastore,
tc.ResourcePool,
)
if err != nil {
return fmt.Errorf("vSphere setup failed: %w", err)
}
defer cancel()
if err := tc.ensureVMs(tc.Name, tc.VMs, tc.VmdkDownloadURL, tc.LocalVmdkPath, tc.IsoPath); err != nil {
return fmt.Errorf("VM setup failed: %w", err)
}
for _, vm := range tc.VMs {
pvcName := fmt.Sprintf("pvc-%s-%s", tc.Name, vm.NamePrefix)
if err := k8s.ApplyPVCFromTemplate(tc.ClientSet, tc.Namespace, pvcName, vm.Size, tc.StorageClass, pvcYamlPath); err != nil {
return fmt.Errorf("failed ensuring PVC %s: %w", pvcName, err)
}
podName := fmt.Sprintf("populator-%s-%s", tc.Name, vm.NamePrefix)
if err := k8s.EnsurePopulatorPod(ctx, tc.ClientSet, tc.Namespace, podName, podImage, tc.Name, *vm, storageVendorProduct, pvcName); err != nil {
return fmt.Errorf("failed creating pod %s: %w", podName, err)
}
}
newCtx, _ := context.WithTimeout(ctx, 10*time.Minute)
results, _, err := k8s.PollPodsAndCheck(newCtx, tc.ClientSet, tc.Namespace, fmt.Sprintf("test=%s", tc.Name), tc.Success.MaxTimeSeconds, 5*time.Second, time.Duration(tc.Success.MaxTimeSeconds)*time.Second)
if err != nil {
return fmt.Errorf("failed polling pods: %w", err)
}
tc.ResultSummary.Success = true
for _, r := range results {
newTcResult := utils.IndividualTestResult{
PodName: r.PodName,
Success: r.Success,
ElapsedTime: int64(r.Duration.Seconds()),
}
if newTcResult.Success != true {
newTcResult.FailureReason = fmt.Sprintf("Err: %s, ExitCode: %d", r.Err, r.ExitCode)
const logLinesToFetch = 10
logs, logErr := k8s.GetPodLogs(newCtx, tc.ClientSet, tc.Namespace, r.PodName, logLinesToFetch)
if logErr != nil {
newTcResult.LogLines = fmt.Sprintf("Failed to get logs: %v", logErr)
fmt.Printf("Warning: Could not get logs for pod %s/%s: %v\n", tc.Namespace, r.PodName, logErr)
} else {
newTcResult.LogLines = logs
}
}
tc.IndividualTestResults = append(tc.IndividualTestResults, newTcResult)
tc.ResultSummary.Success = tc.ResultSummary.Success && r.Success
if !r.Success {
tc.ResultSummary.FailureReason = fmt.Sprintf("%s Pod: %s, err: %s; code: %d", tc.ResultSummary.FailureReason, r.PodName, r.Err, r.ExitCode)
}
}
return nil
}
// ensureVMs creates VMs and sets their VMDK paths.
func (tc *TestCase) ensureVMs(testName string, vms []*utils.VM, downloadVmdkURL, tcLocalVmdkPath, isoPath string) error {
klog.Infof("Ensuring VMs for test %s", testName)
for _, vm := range vms {
vm.Name = fmt.Sprintf("%s-%s", testName, vm.NamePrefix)
localVmdkPath := tcLocalVmdkPath
if vm.LocalVmdkPath != "" {
localVmdkPath = vm.LocalVmdkPath
}
klog.Infof("Creating VM %s with image %s, VMDK URL: %s, Local VMDK Path: %s, ISO Path: %s", vm.Name, downloadVmdkURL, localVmdkPath, isoPath)
remoteVmdkPath, err := vmware.CreateVM(
vm.Name,
tc.VSphereURL,
tc.VSphereUser,
tc.VSpherePassword,
tc.Datacenter,
tc.Datastore,
tc.ResourcePool,
tc.HostName,
downloadVmdkURL,
localVmdkPath,
isoPath,
10*time.Minute,
)
if err != nil {
return fmt.Errorf("failed to create VM %s: %w", vm.Name, err)
}
vm.VmdkPath = remoteVmdkPath
klog.Infof("VM %s created with VMDK path: %s", vm.Name, vm.VmdkPath)
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/powerstore/powerstore.go | cmd/vsphere-xcopy-volume-populator/internal/powerstore/powerstore.go | package powerstore
import (
"context"
"fmt"
"strings"
"github.com/dell/gopowerstore"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/fcutil"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
"k8s.io/klog/v2"
)
const (
hostIDContextKey string = "hostID"
esxLogicalHostNameKey string = "esxLogicalHostName"
esxRealHostNameKey string = "esxRealHostName"
)
type PowerstoreClonner struct {
Client gopowerstore.Client
}
// CurrentMappedGroups implements populator.StorageApi.
func (p *PowerstoreClonner) CurrentMappedGroups(targetLUN populator.LUN, mappingContext populator.MappingContext) ([]string, error) {
if targetLUN.IQN == "" {
return nil, fmt.Errorf("target LUN IQN is required")
}
ctx := context.Background()
mappings, err := p.Client.GetHostVolumeMappingByVolumeID(ctx, targetLUN.IQN)
if err != nil {
return nil, fmt.Errorf("failed to get volume mappings for %s: %w", targetLUN.Name, err)
}
mappedHosts := make([]string, 0, len(mappings))
for _, mapping := range mappings {
host, err := p.Client.GetHost(ctx, mapping.HostID)
if err != nil {
klog.Warningf("Failed to get host info for host ID %s: %s", mapping.HostID, err)
continue
}
mappedHosts = append(mappedHosts, host.Name)
}
if len(mappedHosts) == 0 {
return nil, fmt.Errorf("volume %s is not mapped to any host", targetLUN.Name)
}
return mappedHosts, nil
}
// EnsureClonnerIgroup implements populator.StorageApi.
func (p *PowerstoreClonner) EnsureClonnerIgroup(initiatorGroup string, adapterIds []string) (populator.MappingContext, error) {
klog.Infof("ensuring initiator group %s for adapters %v", initiatorGroup, adapterIds)
ctx := context.Background()
mappingContext := make(map[string]any)
hosts, err := p.Client.GetHosts(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get initiator groups: %w", err)
}
found, mappingContext, err := getHostByInitiator(adapterIds, &hosts, initiatorGroup)
if err != nil {
return nil, fmt.Errorf("failed to get host by initiator: %w", err)
}
if found {
return mappingContext, nil
}
hostGroups, err := p.Client.GetHostGroups(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get host groups: %w", err)
}
for _, hostGroup := range hostGroups {
found, mappingContext, err = getHostByInitiator(adapterIds, &hostGroup.Hosts, initiatorGroup)
if err != nil {
return nil, fmt.Errorf("failed to get host by initiator: %w", err)
}
if found {
return mappingContext, nil
}
}
// if no host group found or host, create new host group
host, err := p.Client.GetHostByName(ctx, initiatorGroup)
if err != nil {
klog.Infof("initiator group %s not found, creating new initiator group", initiatorGroup)
osType := gopowerstore.OSTypeEnumESXi
inits := make([]gopowerstore.InitiatorCreateModify, 0, len(adapterIds))
for _, a := range adapterIds {
pt, err := detectPortType(a)
if err != nil {
return nil, fmt.Errorf("failed to detect port type for adapter %s: %w", a, err)
}
portName, err := extractAdapterIdByPortType(a, pt)
if err != nil {
return nil, fmt.Errorf("failed to modify WWN by type for adapter %s: %w", a, err)
}
inits = append(inits, gopowerstore.InitiatorCreateModify{
PortName: &portName,
PortType: &pt,
})
}
createParams := &gopowerstore.HostCreate{
Name: &initiatorGroup,
OsType: &osType,
Initiators: &inits,
}
createResp, err := p.Client.CreateHost(ctx, createParams)
if err != nil {
return nil, fmt.Errorf("failed to create initiator group %s: %w", initiatorGroup, err)
}
host, err = p.Client.GetHost(ctx, createResp.ID)
if err != nil {
return nil, fmt.Errorf("failed to get created initiator group %s: %w", createResp.ID, err)
}
klog.Infof("Successfully created initiator group %s with ID %s", initiatorGroup, host.ID)
} else {
klog.Infof("Found existing initiator group %s with ID %s", initiatorGroup, host.ID)
}
mappingContext = createMappingContext(&host, initiatorGroup)
klog.Infof("Successfully ensured initiator group %s with %d adapters", initiatorGroup, len(adapterIds))
return mappingContext, nil
}
func extractAdapterIdByPortType(adapterId string, portType gopowerstore.InitiatorProtocolTypeEnum) (string, error) {
switch portType {
case gopowerstore.InitiatorProtocolTypeEnumISCSI:
return adapterId, nil
case gopowerstore.InitiatorProtocolTypeEnumFC:
wwpn, err := fcutil.ExtractAndFormatWWPN(adapterId)
if err != nil {
return "", fmt.Errorf("failed to extract and format WWPN for adapter %s: %w", adapterId, err)
}
wwpn = strings.ToLower(wwpn)
return wwpn, nil
case gopowerstore.InitiatorProtocolTypeEnumNVME:
return adapterId, nil
}
return "", fmt.Errorf("invalid port type: %s", portType)
}
func getHostByInitiator(adapterIds []string, hosts *[]gopowerstore.Host, initiatorGroup string) (bool, populator.MappingContext, error) {
for _, host := range *hosts {
for _, initiator := range host.Initiators {
for _, adapterId := range adapterIds {
portType, err := detectPortType(adapterId)
if err != nil {
return false, populator.MappingContext{}, fmt.Errorf("failed to detect port type for adapter %s: %w", adapterId, err)
}
formattedAdapterId, err := extractAdapterIdByPortType(adapterId, portType)
if err != nil {
return false, populator.MappingContext{}, fmt.Errorf("failed to extract adapter ID by port type for adapter %s: %w", adapterId, err)
}
if initiator.PortName == formattedAdapterId {
klog.Infof("Found existing initiator group %s with ID %s name %s port name %s", initiatorGroup, host.ID, host.Name, initiator.PortName)
mappingContext := createMappingContext(&host, initiatorGroup)
return true, mappingContext, nil
}
}
}
}
return false, populator.MappingContext{}, nil
}
func createMappingContext(host *gopowerstore.Host, initiatorGroup string) populator.MappingContext {
mappingContext := populator.MappingContext{
hostIDContextKey: host.ID,
esxLogicalHostNameKey: initiatorGroup,
esxRealHostNameKey: host.Name,
}
return mappingContext
}
func detectPortType(adapterId string) (gopowerstore.InitiatorProtocolTypeEnum, error) {
switch {
case strings.HasPrefix(adapterId, "iqn."):
return gopowerstore.InitiatorProtocolTypeEnumISCSI, nil
case strings.HasPrefix(adapterId, "fc."):
return gopowerstore.InitiatorProtocolTypeEnumFC, nil
case strings.HasPrefix(adapterId, "nqn."):
return gopowerstore.InitiatorProtocolTypeEnumNVME, nil
default:
return gopowerstore.InitiatorProtocolTypeEnumISCSI, fmt.Errorf("Could not determine port type for adapter ID: %s", adapterId)
}
}
func (p *PowerstoreClonner) Map(initiatorGroup string, targetLUN populator.LUN, mappingContext populator.MappingContext) (populator.LUN, error) {
if targetLUN.IQN == "" {
return targetLUN, fmt.Errorf("target LUN IQN is required")
}
if mappingContext == nil {
return targetLUN, fmt.Errorf("mapping context is required")
}
klog.Infof("mapping volume %s to initiator-group %s", targetLUN.Name, initiatorGroup)
ctx := context.Background()
hostName := initiatorGroup
if initiatorGroup == mappingContext[esxLogicalHostNameKey] {
hostName = mappingContext[esxRealHostNameKey].(string)
}
// Get the host by the real PowerStore host name
host, err := p.Client.GetHostByName(ctx, hostName)
if err != nil {
return targetLUN, fmt.Errorf("failed to find host for host name %s: %w", hostName, err)
}
hostID := host.ID
// idempotency: skip attach if already mapped
existing, err := p.Client.GetHostVolumeMappingByVolumeID(ctx, targetLUN.IQN)
if err == nil {
for _, m := range existing {
if m.HostID == hostID {
klog.Infof("Volume %s already mapped to initiatior group %s", targetLUN.Name, hostName)
return targetLUN, nil
}
}
}
attachParams := &gopowerstore.HostVolumeAttach{
VolumeID: &targetLUN.IQN,
}
_, err = p.Client.AttachVolumeToHost(ctx, hostID, attachParams)
if err != nil {
return targetLUN, fmt.Errorf("failed to attach volume %s to initiatior group %s: %w", targetLUN.Name, hostID, err)
}
klog.Infof("Successfully mapped volume %s to initiatior group %s", targetLUN.Name, hostName)
return targetLUN, nil
}
// ResolveVolumeHandleToLUN implements populator.StorageApi.
func (p *PowerstoreClonner) ResolvePVToLUN(pv populator.PersistentVolume) (populator.LUN, error) {
if pv.VolumeAttributes == nil {
return populator.LUN{}, fmt.Errorf("PersistentVolume attributes are required")
}
name := pv.VolumeAttributes["Name"]
if name == "" {
return populator.LUN{}, fmt.Errorf("PersistentVolume 'Name' attribute is required to locate the volume in PowerStore")
}
ctx := context.Background()
volume, err := p.Client.GetVolumeByName(ctx, name)
if err != nil {
return populator.LUN{}, fmt.Errorf("failed to get volume %s: %w", name, err)
}
klog.Infof("Successfully resolved volume %s", name)
return populator.LUN{
Name: name,
VolumeHandle: pv.VolumeHandle,
Protocol: pv.VolumeAttributes["Protocol"],
NAA: volume.Wwn, // volume.Wwn contains naa. prefix
ProviderID: volume.ID,
IQN: volume.ID,
}, nil
}
// UnMap implements populator.StorageApi.
func (p *PowerstoreClonner) UnMap(initiatorGroup string, targetLUN populator.LUN, mappingContext populator.MappingContext) error {
if targetLUN.IQN == "" {
return fmt.Errorf("target LUN IQN is required")
}
if mappingContext == nil {
return fmt.Errorf("mapping context is required")
}
klog.Infof("unmapping volume %s from initiator-group %s", targetLUN.Name, initiatorGroup)
hostName := initiatorGroup
if initiatorGroup == mappingContext[esxLogicalHostNameKey] {
hostName = mappingContext[esxRealHostNameKey].(string)
}
ctx := context.Background()
hostID := mappingContext[hostIDContextKey].(string)
// Detach volume from host
detachParams := &gopowerstore.HostVolumeDetach{
VolumeID: &targetLUN.IQN,
}
_, err := p.Client.DetachVolumeFromHost(ctx, hostID, detachParams)
if err != nil {
return fmt.Errorf("failed to detach volume %s from initiator group %s: %w", targetLUN.Name, hostID, err)
}
klog.Infof("Successfully unmapped volume %s from initiator group %s", targetLUN.Name, hostName)
return nil
}
func NewPowerstoreClonner(hostname, username, password string, sslSkipVerify bool) (PowerstoreClonner, error) {
if hostname == "" {
return PowerstoreClonner{}, fmt.Errorf("hostname is required")
}
if username == "" {
return PowerstoreClonner{}, fmt.Errorf("username is required")
}
if password == "" {
return PowerstoreClonner{}, fmt.Errorf("password is required")
}
clientOptions := gopowerstore.NewClientOptions()
clientOptions.SetInsecure(sslSkipVerify)
client, err := gopowerstore.NewClientWithArgs(hostname, username, password, clientOptions)
if err != nil {
return PowerstoreClonner{}, fmt.Errorf("failed to create PowerStore client: %w", err)
}
ctx := context.Background()
_, err = client.GetCluster(ctx)
if err != nil {
return PowerstoreClonner{}, fmt.Errorf("failed to authenticate with PowerStore backend %s: %w", hostname, err)
}
return PowerstoreClonner{Client: client}, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/ontap/ontap.go | cmd/vsphere-xcopy-volume-populator/internal/ontap/ontap.go | package ontap
import (
"context"
"fmt"
"os"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
drivers "github.com/netapp/trident/storage_drivers"
"github.com/netapp/trident/storage_drivers/ontap/api"
"k8s.io/klog/v2"
)
const OntapProviderID = "600a0980"
// Ensure NetappClonner implements required interfaces
var _ populator.VMDKCapable = &NetappClonner{}
type NetappClonner struct {
api api.OntapAPI
}
// Map the targetLUN to the initiator group.
func (c *NetappClonner) Map(initatorGroup string, targetLUN populator.LUN, _ populator.MappingContext) (populator.LUN, error) {
_, err := c.api.EnsureLunMapped(context.TODO(), initatorGroup, targetLUN.Name)
if err != nil {
return populator.LUN{}, fmt.Errorf("Failed to map lun path %s to group %s: %w ", targetLUN.Name, initatorGroup, err)
}
return targetLUN, nil
}
func (c *NetappClonner) UnMap(initatorGroup string, targetLUN populator.LUN, _ populator.MappingContext) error {
return c.api.LunUnmap(context.TODO(), initatorGroup, targetLUN.Name)
}
func (c *NetappClonner) EnsureClonnerIgroup(initiatorGroup string, adapterIds []string) (populator.MappingContext, error) {
// esxs needs "vmware" as the group protocol.
err := c.api.IgroupCreate(context.Background(), initiatorGroup, "mixed", "vmware")
if err != nil {
// TODO ignore if exists error? with ontap there is no error
return nil, fmt.Errorf("failed adding igroup %w", err)
}
atLeastOneAdded := false
for _, adapterId := range adapterIds {
err = c.api.EnsureIgroupAdded(context.Background(), initiatorGroup, adapterId)
if err != nil {
klog.Warningf("failed adding host to igroup %s", err)
continue
}
atLeastOneAdded = true
}
if !atLeastOneAdded {
return nil, fmt.Errorf("failed adding any host to igroup")
}
return nil, nil
}
func NewNetappClonner(hostname, username, password string) (NetappClonner, error) {
// additional ontap values should be passed as env variables using prefix ONTAP_
svm := os.Getenv("ONTAP_SVM")
config := drivers.OntapStorageDriverConfig{
CommonStorageDriverConfig: &drivers.CommonStorageDriverConfig{},
ManagementLIF: hostname,
Username: username,
Password: password,
LimitAggregateUsage: "",
SVM: svm,
}
client, err := api.NewRestClientFromOntapConfig(context.TODO(), &config)
if err != nil {
klog.V(2).Infof("ONTAP client initialization error details: %v", err)
return NetappClonner{}, fmt.Errorf("failed to initialize ONTAP client (common causes: incorrect password, invalid SVM name, network connectivity): %w", err)
}
nc := NetappClonner{api: client}
return nc, nil
}
func (c *NetappClonner) ResolvePVToLUN(pv populator.PersistentVolume) (populator.LUN, error) {
// trident sets internalName attribute on a volume, and that is the real volume name in the system
internalName, ok := pv.VolumeAttributes["internalName"]
if !ok {
return populator.LUN{}, fmt.Errorf("intenalName attribute is missing on the PersistentVolume %s", pv.Name)
}
l, err := c.api.LunGetByName(context.Background(), fmt.Sprintf("/vol/%s/lun0", internalName))
if err != nil {
return populator.LUN{}, err
}
klog.Infof("found lun %s with serial %s", l.Name, l.SerialNumber)
// in RHEL lsblk needs that swap. In fedora it doesn't
//serialNumber := strings.ReplaceAll(l.SerialNumber, "?", "\\\\x3f")
naa := fmt.Sprintf("naa.%s%x", OntapProviderID, l.SerialNumber)
lun := populator.LUN{Name: l.Name, VolumeHandle: pv.VolumeHandle, SerialNumber: l.SerialNumber, NAA: naa}
return lun, nil
}
func (c *NetappClonner) Get(lun populator.LUN, _ populator.MappingContext) (string, error) {
// this code is from netapp/trident/storage_drivers/ontap/ontap_common.go
// FIXME - this ips list needs to be intersected with the list of reporting
// nodes for the LUN? see c.api.LunMapGetReportingNodes
ips, err := c.api.NetInterfaceGetDataLIFs(context.Background(), "iscsi")
if err != nil || len(ips) < 1 {
return "", err
}
return ips[0], nil
}
func (c *NetappClonner) CurrentMappedGroups(targetLUN populator.LUN, _ populator.MappingContext) ([]string, error) {
lunMappedIgroups, err := c.api.LunListIgroupsMapped(context.Background(), targetLUN.Name)
if err != nil {
return nil, fmt.Errorf("Failed to get mapped luns by path %s: %w ", targetLUN.Name, err)
}
return lunMappedIgroups, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/ontap/ontap_api_mock.go | cmd/vsphere-xcopy-volume-populator/internal/ontap/ontap_api_mock.go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/netapp/trident/storage_drivers/ontap/api (interfaces: OntapAPI)
//
// Generated by this command:
//
// mockgen -destination=internal/ontap/ontap_api_mock.go -package=ontap github.com/netapp/trident/storage_drivers/ontap/api OntapAPI
//
// Package ontap is a generated GoMock package.
package ontap
import (
context "context"
reflect "reflect"
time "time"
api "github.com/netapp/trident/storage_drivers/ontap/api"
gomock "go.uber.org/mock/gomock"
)
// MockOntapAPI is a mock of OntapAPI interface.
type MockOntapAPI struct {
ctrl *gomock.Controller
recorder *MockOntapAPIMockRecorder
isgomock struct{}
}
// MockOntapAPIMockRecorder is the mock recorder for MockOntapAPI.
type MockOntapAPIMockRecorder struct {
mock *MockOntapAPI
}
// NewMockOntapAPI creates a new mock instance.
func NewMockOntapAPI(ctrl *gomock.Controller) *MockOntapAPI {
mock := &MockOntapAPI{ctrl: ctrl}
mock.recorder = &MockOntapAPIMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockOntapAPI) EXPECT() *MockOntapAPIMockRecorder {
return m.recorder
}
// APIVersion mocks base method.
func (m *MockOntapAPI) APIVersion(arg0 context.Context) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "APIVersion", arg0)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// APIVersion indicates an expected call of APIVersion.
func (mr *MockOntapAPIMockRecorder) APIVersion(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "APIVersion", reflect.TypeOf((*MockOntapAPI)(nil).APIVersion), arg0)
}
// EmsAutosupportLog mocks base method.
func (m *MockOntapAPI) EmsAutosupportLog(ctx context.Context, driverName, appVersion string, autoSupport bool, category, computerName, eventDescription string, eventID int, eventSource string, logLevel int) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "EmsAutosupportLog", ctx, driverName, appVersion, autoSupport, category, computerName, eventDescription, eventID, eventSource, logLevel)
}
// EmsAutosupportLog indicates an expected call of EmsAutosupportLog.
func (mr *MockOntapAPIMockRecorder) EmsAutosupportLog(ctx, driverName, appVersion, autoSupport, category, computerName, eventDescription, eventID, eventSource, logLevel any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EmsAutosupportLog", reflect.TypeOf((*MockOntapAPI)(nil).EmsAutosupportLog), ctx, driverName, appVersion, autoSupport, category, computerName, eventDescription, eventID, eventSource, logLevel)
}
// EnsureIgroupAdded mocks base method.
func (m *MockOntapAPI) EnsureIgroupAdded(ctx context.Context, initiatorGroupName, initiator string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EnsureIgroupAdded", ctx, initiatorGroupName, initiator)
ret0, _ := ret[0].(error)
return ret0
}
// EnsureIgroupAdded indicates an expected call of EnsureIgroupAdded.
func (mr *MockOntapAPIMockRecorder) EnsureIgroupAdded(ctx, initiatorGroupName, initiator any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureIgroupAdded", reflect.TypeOf((*MockOntapAPI)(nil).EnsureIgroupAdded), ctx, initiatorGroupName, initiator)
}
// EnsureLunMapped mocks base method.
func (m *MockOntapAPI) EnsureLunMapped(ctx context.Context, initiatorGroupName, lunPath string) (int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EnsureLunMapped", ctx, initiatorGroupName, lunPath)
ret0, _ := ret[0].(int)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// EnsureLunMapped indicates an expected call of EnsureLunMapped.
func (mr *MockOntapAPIMockRecorder) EnsureLunMapped(ctx, initiatorGroupName, lunPath any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureLunMapped", reflect.TypeOf((*MockOntapAPI)(nil).EnsureLunMapped), ctx, initiatorGroupName, lunPath)
}
// ExportPolicyCreate mocks base method.
func (m *MockOntapAPI) ExportPolicyCreate(ctx context.Context, policy string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExportPolicyCreate", ctx, policy)
ret0, _ := ret[0].(error)
return ret0
}
// ExportPolicyCreate indicates an expected call of ExportPolicyCreate.
func (mr *MockOntapAPIMockRecorder) ExportPolicyCreate(ctx, policy any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPolicyCreate", reflect.TypeOf((*MockOntapAPI)(nil).ExportPolicyCreate), ctx, policy)
}
// ExportPolicyDestroy mocks base method.
func (m *MockOntapAPI) ExportPolicyDestroy(ctx context.Context, policy string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExportPolicyDestroy", ctx, policy)
ret0, _ := ret[0].(error)
return ret0
}
// ExportPolicyDestroy indicates an expected call of ExportPolicyDestroy.
func (mr *MockOntapAPIMockRecorder) ExportPolicyDestroy(ctx, policy any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPolicyDestroy", reflect.TypeOf((*MockOntapAPI)(nil).ExportPolicyDestroy), ctx, policy)
}
// ExportPolicyExists mocks base method.
func (m *MockOntapAPI) ExportPolicyExists(ctx context.Context, policyName string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExportPolicyExists", ctx, policyName)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ExportPolicyExists indicates an expected call of ExportPolicyExists.
func (mr *MockOntapAPIMockRecorder) ExportPolicyExists(ctx, policyName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPolicyExists", reflect.TypeOf((*MockOntapAPI)(nil).ExportPolicyExists), ctx, policyName)
}
// ExportRuleCreate mocks base method.
func (m *MockOntapAPI) ExportRuleCreate(ctx context.Context, policyName, desiredPolicyRule, nasProtocol string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExportRuleCreate", ctx, policyName, desiredPolicyRule, nasProtocol)
ret0, _ := ret[0].(error)
return ret0
}
// ExportRuleCreate indicates an expected call of ExportRuleCreate.
func (mr *MockOntapAPIMockRecorder) ExportRuleCreate(ctx, policyName, desiredPolicyRule, nasProtocol any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportRuleCreate", reflect.TypeOf((*MockOntapAPI)(nil).ExportRuleCreate), ctx, policyName, desiredPolicyRule, nasProtocol)
}
// ExportRuleDestroy mocks base method.
func (m *MockOntapAPI) ExportRuleDestroy(ctx context.Context, policyName string, ruleIndex int) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExportRuleDestroy", ctx, policyName, ruleIndex)
ret0, _ := ret[0].(error)
return ret0
}
// ExportRuleDestroy indicates an expected call of ExportRuleDestroy.
func (mr *MockOntapAPIMockRecorder) ExportRuleDestroy(ctx, policyName, ruleIndex any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportRuleDestroy", reflect.TypeOf((*MockOntapAPI)(nil).ExportRuleDestroy), ctx, policyName, ruleIndex)
}
// ExportRuleList mocks base method.
func (m *MockOntapAPI) ExportRuleList(ctx context.Context, policyName string) (map[string]int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExportRuleList", ctx, policyName)
ret0, _ := ret[0].(map[string]int)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ExportRuleList indicates an expected call of ExportRuleList.
func (mr *MockOntapAPIMockRecorder) ExportRuleList(ctx, policyName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportRuleList", reflect.TypeOf((*MockOntapAPI)(nil).ExportRuleList), ctx, policyName)
}
// FlexgroupCloneSplitStart mocks base method.
func (m *MockOntapAPI) FlexgroupCloneSplitStart(ctx context.Context, cloneName string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupCloneSplitStart", ctx, cloneName)
ret0, _ := ret[0].(error)
return ret0
}
// FlexgroupCloneSplitStart indicates an expected call of FlexgroupCloneSplitStart.
func (mr *MockOntapAPIMockRecorder) FlexgroupCloneSplitStart(ctx, cloneName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupCloneSplitStart", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupCloneSplitStart), ctx, cloneName)
}
// FlexgroupCreate mocks base method.
func (m *MockOntapAPI) FlexgroupCreate(ctx context.Context, volume api.Volume) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupCreate", ctx, volume)
ret0, _ := ret[0].(error)
return ret0
}
// FlexgroupCreate indicates an expected call of FlexgroupCreate.
func (mr *MockOntapAPIMockRecorder) FlexgroupCreate(ctx, volume any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupCreate", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupCreate), ctx, volume)
}
// FlexgroupDestroy mocks base method.
func (m *MockOntapAPI) FlexgroupDestroy(ctx context.Context, volumeName string, force bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupDestroy", ctx, volumeName, force)
ret0, _ := ret[0].(error)
return ret0
}
// FlexgroupDestroy indicates an expected call of FlexgroupDestroy.
func (mr *MockOntapAPIMockRecorder) FlexgroupDestroy(ctx, volumeName, force any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupDestroy", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupDestroy), ctx, volumeName, force)
}
// FlexgroupExists mocks base method.
func (m *MockOntapAPI) FlexgroupExists(ctx context.Context, volumeName string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupExists", ctx, volumeName)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FlexgroupExists indicates an expected call of FlexgroupExists.
func (mr *MockOntapAPIMockRecorder) FlexgroupExists(ctx, volumeName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupExists", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupExists), ctx, volumeName)
}
// FlexgroupInfo mocks base method.
func (m *MockOntapAPI) FlexgroupInfo(ctx context.Context, volumeName string) (*api.Volume, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupInfo", ctx, volumeName)
ret0, _ := ret[0].(*api.Volume)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FlexgroupInfo indicates an expected call of FlexgroupInfo.
func (mr *MockOntapAPIMockRecorder) FlexgroupInfo(ctx, volumeName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupInfo", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupInfo), ctx, volumeName)
}
// FlexgroupListByPrefix mocks base method.
func (m *MockOntapAPI) FlexgroupListByPrefix(ctx context.Context, prefix string) (api.Volumes, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupListByPrefix", ctx, prefix)
ret0, _ := ret[0].(api.Volumes)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FlexgroupListByPrefix indicates an expected call of FlexgroupListByPrefix.
func (mr *MockOntapAPIMockRecorder) FlexgroupListByPrefix(ctx, prefix any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupListByPrefix", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupListByPrefix), ctx, prefix)
}
// FlexgroupModifyExportPolicy mocks base method.
func (m *MockOntapAPI) FlexgroupModifyExportPolicy(ctx context.Context, volumeName, policyName string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupModifyExportPolicy", ctx, volumeName, policyName)
ret0, _ := ret[0].(error)
return ret0
}
// FlexgroupModifyExportPolicy indicates an expected call of FlexgroupModifyExportPolicy.
func (mr *MockOntapAPIMockRecorder) FlexgroupModifyExportPolicy(ctx, volumeName, policyName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupModifyExportPolicy", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupModifyExportPolicy), ctx, volumeName, policyName)
}
// FlexgroupModifySnapshotDirectoryAccess mocks base method.
func (m *MockOntapAPI) FlexgroupModifySnapshotDirectoryAccess(ctx context.Context, volumeName string, enable bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupModifySnapshotDirectoryAccess", ctx, volumeName, enable)
ret0, _ := ret[0].(error)
return ret0
}
// FlexgroupModifySnapshotDirectoryAccess indicates an expected call of FlexgroupModifySnapshotDirectoryAccess.
func (mr *MockOntapAPIMockRecorder) FlexgroupModifySnapshotDirectoryAccess(ctx, volumeName, enable any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupModifySnapshotDirectoryAccess", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupModifySnapshotDirectoryAccess), ctx, volumeName, enable)
}
// FlexgroupModifyUnixPermissions mocks base method.
func (m *MockOntapAPI) FlexgroupModifyUnixPermissions(ctx context.Context, volumeNameInternal, volumeNameExternal, unixPermissions string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupModifyUnixPermissions", ctx, volumeNameInternal, volumeNameExternal, unixPermissions)
ret0, _ := ret[0].(error)
return ret0
}
// FlexgroupModifyUnixPermissions indicates an expected call of FlexgroupModifyUnixPermissions.
func (mr *MockOntapAPIMockRecorder) FlexgroupModifyUnixPermissions(ctx, volumeNameInternal, volumeNameExternal, unixPermissions any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupModifyUnixPermissions", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupModifyUnixPermissions), ctx, volumeNameInternal, volumeNameExternal, unixPermissions)
}
// FlexgroupMount mocks base method.
func (m *MockOntapAPI) FlexgroupMount(ctx context.Context, name, junctionPath string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupMount", ctx, name, junctionPath)
ret0, _ := ret[0].(error)
return ret0
}
// FlexgroupMount indicates an expected call of FlexgroupMount.
func (mr *MockOntapAPIMockRecorder) FlexgroupMount(ctx, name, junctionPath any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupMount", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupMount), ctx, name, junctionPath)
}
// FlexgroupSetComment mocks base method.
func (m *MockOntapAPI) FlexgroupSetComment(ctx context.Context, volumeNameInternal, volumeNameExternal, comment string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupSetComment", ctx, volumeNameInternal, volumeNameExternal, comment)
ret0, _ := ret[0].(error)
return ret0
}
// FlexgroupSetComment indicates an expected call of FlexgroupSetComment.
func (mr *MockOntapAPIMockRecorder) FlexgroupSetComment(ctx, volumeNameInternal, volumeNameExternal, comment any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupSetComment", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupSetComment), ctx, volumeNameInternal, volumeNameExternal, comment)
}
// FlexgroupSetQosPolicyGroupName mocks base method.
func (m *MockOntapAPI) FlexgroupSetQosPolicyGroupName(ctx context.Context, name string, qos api.QosPolicyGroup) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupSetQosPolicyGroupName", ctx, name, qos)
ret0, _ := ret[0].(error)
return ret0
}
// FlexgroupSetQosPolicyGroupName indicates an expected call of FlexgroupSetQosPolicyGroupName.
func (mr *MockOntapAPIMockRecorder) FlexgroupSetQosPolicyGroupName(ctx, name, qos any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupSetQosPolicyGroupName", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupSetQosPolicyGroupName), ctx, name, qos)
}
// FlexgroupSetSize mocks base method.
func (m *MockOntapAPI) FlexgroupSetSize(ctx context.Context, name, newSize string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupSetSize", ctx, name, newSize)
ret0, _ := ret[0].(error)
return ret0
}
// FlexgroupSetSize indicates an expected call of FlexgroupSetSize.
func (mr *MockOntapAPIMockRecorder) FlexgroupSetSize(ctx, name, newSize any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupSetSize", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupSetSize), ctx, name, newSize)
}
// FlexgroupSize mocks base method.
func (m *MockOntapAPI) FlexgroupSize(ctx context.Context, volumeName string) (uint64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupSize", ctx, volumeName)
ret0, _ := ret[0].(uint64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FlexgroupSize indicates an expected call of FlexgroupSize.
func (mr *MockOntapAPIMockRecorder) FlexgroupSize(ctx, volumeName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupSize", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupSize), ctx, volumeName)
}
// FlexgroupSnapshotCreate mocks base method.
func (m *MockOntapAPI) FlexgroupSnapshotCreate(ctx context.Context, snapshotName, sourceVolume string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupSnapshotCreate", ctx, snapshotName, sourceVolume)
ret0, _ := ret[0].(error)
return ret0
}
// FlexgroupSnapshotCreate indicates an expected call of FlexgroupSnapshotCreate.
func (mr *MockOntapAPIMockRecorder) FlexgroupSnapshotCreate(ctx, snapshotName, sourceVolume any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupSnapshotCreate", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupSnapshotCreate), ctx, snapshotName, sourceVolume)
}
// FlexgroupSnapshotDelete mocks base method.
func (m *MockOntapAPI) FlexgroupSnapshotDelete(ctx context.Context, snapshotName, sourceVolume string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupSnapshotDelete", ctx, snapshotName, sourceVolume)
ret0, _ := ret[0].(error)
return ret0
}
// FlexgroupSnapshotDelete indicates an expected call of FlexgroupSnapshotDelete.
func (mr *MockOntapAPIMockRecorder) FlexgroupSnapshotDelete(ctx, snapshotName, sourceVolume any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupSnapshotDelete", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupSnapshotDelete), ctx, snapshotName, sourceVolume)
}
// FlexgroupSnapshotList mocks base method.
func (m *MockOntapAPI) FlexgroupSnapshotList(ctx context.Context, sourceVolume string) (api.Snapshots, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupSnapshotList", ctx, sourceVolume)
ret0, _ := ret[0].(api.Snapshots)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FlexgroupSnapshotList indicates an expected call of FlexgroupSnapshotList.
func (mr *MockOntapAPIMockRecorder) FlexgroupSnapshotList(ctx, sourceVolume any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupSnapshotList", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupSnapshotList), ctx, sourceVolume)
}
// FlexgroupUnmount mocks base method.
func (m *MockOntapAPI) FlexgroupUnmount(ctx context.Context, name string, force bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupUnmount", ctx, name, force)
ret0, _ := ret[0].(error)
return ret0
}
// FlexgroupUnmount indicates an expected call of FlexgroupUnmount.
func (mr *MockOntapAPIMockRecorder) FlexgroupUnmount(ctx, name, force any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupUnmount", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupUnmount), ctx, name, force)
}
// FlexgroupUsedSize mocks base method.
func (m *MockOntapAPI) FlexgroupUsedSize(ctx context.Context, volumeName string) (int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlexgroupUsedSize", ctx, volumeName)
ret0, _ := ret[0].(int)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FlexgroupUsedSize indicates an expected call of FlexgroupUsedSize.
func (mr *MockOntapAPIMockRecorder) FlexgroupUsedSize(ctx, volumeName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlexgroupUsedSize", reflect.TypeOf((*MockOntapAPI)(nil).FlexgroupUsedSize), ctx, volumeName)
}
// GetSLMDataLifs mocks base method.
func (m *MockOntapAPI) GetSLMDataLifs(ctx context.Context, ips, reportingNodeNames []string) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSLMDataLifs", ctx, ips, reportingNodeNames)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSLMDataLifs indicates an expected call of GetSLMDataLifs.
func (mr *MockOntapAPIMockRecorder) GetSLMDataLifs(ctx, ips, reportingNodeNames any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSLMDataLifs", reflect.TypeOf((*MockOntapAPI)(nil).GetSLMDataLifs), ctx, ips, reportingNodeNames)
}
// GetSVMAggregateAttributes mocks base method.
func (m *MockOntapAPI) GetSVMAggregateAttributes(ctx context.Context) (map[string]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSVMAggregateAttributes", ctx)
ret0, _ := ret[0].(map[string]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSVMAggregateAttributes indicates an expected call of GetSVMAggregateAttributes.
func (mr *MockOntapAPIMockRecorder) GetSVMAggregateAttributes(ctx any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSVMAggregateAttributes", reflect.TypeOf((*MockOntapAPI)(nil).GetSVMAggregateAttributes), ctx)
}
// GetSVMAggregateNames mocks base method.
func (m *MockOntapAPI) GetSVMAggregateNames(ctx context.Context) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSVMAggregateNames", ctx)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSVMAggregateNames indicates an expected call of GetSVMAggregateNames.
func (mr *MockOntapAPIMockRecorder) GetSVMAggregateNames(ctx any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSVMAggregateNames", reflect.TypeOf((*MockOntapAPI)(nil).GetSVMAggregateNames), ctx)
}
// GetSVMAggregateSpace mocks base method.
func (m *MockOntapAPI) GetSVMAggregateSpace(ctx context.Context, aggregate string) ([]api.SVMAggregateSpace, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSVMAggregateSpace", ctx, aggregate)
ret0, _ := ret[0].([]api.SVMAggregateSpace)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSVMAggregateSpace indicates an expected call of GetSVMAggregateSpace.
func (mr *MockOntapAPIMockRecorder) GetSVMAggregateSpace(ctx, aggregate any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSVMAggregateSpace", reflect.TypeOf((*MockOntapAPI)(nil).GetSVMAggregateSpace), ctx, aggregate)
}
// GetSVMPeers mocks base method.
func (m *MockOntapAPI) GetSVMPeers(ctx context.Context) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSVMPeers", ctx)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSVMPeers indicates an expected call of GetSVMPeers.
func (mr *MockOntapAPIMockRecorder) GetSVMPeers(ctx any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSVMPeers", reflect.TypeOf((*MockOntapAPI)(nil).GetSVMPeers), ctx)
}
// GetSVMState mocks base method.
func (m *MockOntapAPI) GetSVMState(ctx context.Context) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSVMState", ctx)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSVMState indicates an expected call of GetSVMState.
func (mr *MockOntapAPIMockRecorder) GetSVMState(ctx any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSVMState", reflect.TypeOf((*MockOntapAPI)(nil).GetSVMState), ctx)
}
// GetSVMUUID mocks base method.
func (m *MockOntapAPI) GetSVMUUID() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSVMUUID")
ret0, _ := ret[0].(string)
return ret0
}
// GetSVMUUID indicates an expected call of GetSVMUUID.
func (mr *MockOntapAPIMockRecorder) GetSVMUUID() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSVMUUID", reflect.TypeOf((*MockOntapAPI)(nil).GetSVMUUID))
}
// IgroupCreate mocks base method.
func (m *MockOntapAPI) IgroupCreate(ctx context.Context, initiatorGroupName, initiatorGroupType, osType string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IgroupCreate", ctx, initiatorGroupName, initiatorGroupType, osType)
ret0, _ := ret[0].(error)
return ret0
}
// IgroupCreate indicates an expected call of IgroupCreate.
func (mr *MockOntapAPIMockRecorder) IgroupCreate(ctx, initiatorGroupName, initiatorGroupType, osType any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IgroupCreate", reflect.TypeOf((*MockOntapAPI)(nil).IgroupCreate), ctx, initiatorGroupName, initiatorGroupType, osType)
}
// IgroupDestroy mocks base method.
func (m *MockOntapAPI) IgroupDestroy(ctx context.Context, initiatorGroupName string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IgroupDestroy", ctx, initiatorGroupName)
ret0, _ := ret[0].(error)
return ret0
}
// IgroupDestroy indicates an expected call of IgroupDestroy.
func (mr *MockOntapAPIMockRecorder) IgroupDestroy(ctx, initiatorGroupName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IgroupDestroy", reflect.TypeOf((*MockOntapAPI)(nil).IgroupDestroy), ctx, initiatorGroupName)
}
// IgroupGetByName mocks base method.
func (m *MockOntapAPI) IgroupGetByName(ctx context.Context, initiatorGroupName string) (map[string]bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IgroupGetByName", ctx, initiatorGroupName)
ret0, _ := ret[0].(map[string]bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// IgroupGetByName indicates an expected call of IgroupGetByName.
func (mr *MockOntapAPIMockRecorder) IgroupGetByName(ctx, initiatorGroupName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IgroupGetByName", reflect.TypeOf((*MockOntapAPI)(nil).IgroupGetByName), ctx, initiatorGroupName)
}
// IgroupList mocks base method.
func (m *MockOntapAPI) IgroupList(ctx context.Context) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IgroupList", ctx)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// IgroupList indicates an expected call of IgroupList.
func (mr *MockOntapAPIMockRecorder) IgroupList(ctx any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IgroupList", reflect.TypeOf((*MockOntapAPI)(nil).IgroupList), ctx)
}
// IgroupListLUNsMapped mocks base method.
func (m *MockOntapAPI) IgroupListLUNsMapped(ctx context.Context, initiatorGroupName string) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IgroupListLUNsMapped", ctx, initiatorGroupName)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// IgroupListLUNsMapped indicates an expected call of IgroupListLUNsMapped.
func (mr *MockOntapAPIMockRecorder) IgroupListLUNsMapped(ctx, initiatorGroupName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IgroupListLUNsMapped", reflect.TypeOf((*MockOntapAPI)(nil).IgroupListLUNsMapped), ctx, initiatorGroupName)
}
// IgroupRemove mocks base method.
func (m *MockOntapAPI) IgroupRemove(ctx context.Context, initiatorGroupName, initiator string, force bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IgroupRemove", ctx, initiatorGroupName, initiator, force)
ret0, _ := ret[0].(error)
return ret0
}
// IgroupRemove indicates an expected call of IgroupRemove.
func (mr *MockOntapAPIMockRecorder) IgroupRemove(ctx, initiatorGroupName, initiator, force any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IgroupRemove", reflect.TypeOf((*MockOntapAPI)(nil).IgroupRemove), ctx, initiatorGroupName, initiator, force)
}
// IsSVMDRCapable mocks base method.
func (m *MockOntapAPI) IsSVMDRCapable(ctx context.Context) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsSVMDRCapable", ctx)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// IsSVMDRCapable indicates an expected call of IsSVMDRCapable.
func (mr *MockOntapAPIMockRecorder) IsSVMDRCapable(ctx any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSVMDRCapable", reflect.TypeOf((*MockOntapAPI)(nil).IsSVMDRCapable), ctx)
}
// IscsiInitiatorGetDefaultAuth mocks base method.
func (m *MockOntapAPI) IscsiInitiatorGetDefaultAuth(ctx context.Context) (api.IscsiInitiatorAuth, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IscsiInitiatorGetDefaultAuth", ctx)
ret0, _ := ret[0].(api.IscsiInitiatorAuth)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// IscsiInitiatorGetDefaultAuth indicates an expected call of IscsiInitiatorGetDefaultAuth.
func (mr *MockOntapAPIMockRecorder) IscsiInitiatorGetDefaultAuth(ctx any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IscsiInitiatorGetDefaultAuth", reflect.TypeOf((*MockOntapAPI)(nil).IscsiInitiatorGetDefaultAuth), ctx)
}
// IscsiInitiatorSetDefaultAuth mocks base method.
func (m *MockOntapAPI) IscsiInitiatorSetDefaultAuth(ctx context.Context, authType, userName, passphrase, outboundUserName, outboundPassphrase string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IscsiInitiatorSetDefaultAuth", ctx, authType, userName, passphrase, outboundUserName, outboundPassphrase)
ret0, _ := ret[0].(error)
return ret0
}
// IscsiInitiatorSetDefaultAuth indicates an expected call of IscsiInitiatorSetDefaultAuth.
func (mr *MockOntapAPIMockRecorder) IscsiInitiatorSetDefaultAuth(ctx, authType, userName, passphrase, outboundUserName, outboundPassphrase any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IscsiInitiatorSetDefaultAuth", reflect.TypeOf((*MockOntapAPI)(nil).IscsiInitiatorSetDefaultAuth), ctx, authType, userName, passphrase, outboundUserName, outboundPassphrase)
}
// IscsiInterfaceGet mocks base method.
func (m *MockOntapAPI) IscsiInterfaceGet(ctx context.Context, svm string) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IscsiInterfaceGet", ctx, svm)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// IscsiInterfaceGet indicates an expected call of IscsiInterfaceGet.
func (mr *MockOntapAPIMockRecorder) IscsiInterfaceGet(ctx, svm any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IscsiInterfaceGet", reflect.TypeOf((*MockOntapAPI)(nil).IscsiInterfaceGet), ctx, svm)
}
// IscsiNodeGetNameRequest mocks base method.
func (m *MockOntapAPI) IscsiNodeGetNameRequest(ctx context.Context) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IscsiNodeGetNameRequest", ctx)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// IscsiNodeGetNameRequest indicates an expected call of IscsiNodeGetNameRequest.
func (mr *MockOntapAPIMockRecorder) IscsiNodeGetNameRequest(ctx any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IscsiNodeGetNameRequest", reflect.TypeOf((*MockOntapAPI)(nil).IscsiNodeGetNameRequest), ctx)
}
// JobScheduleExists mocks base method.
func (m *MockOntapAPI) JobScheduleExists(ctx context.Context, replicationSchedule string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "JobScheduleExists", ctx, replicationSchedule)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// JobScheduleExists indicates an expected call of JobScheduleExists.
func (mr *MockOntapAPIMockRecorder) JobScheduleExists(ctx, replicationSchedule any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "JobScheduleExists", reflect.TypeOf((*MockOntapAPI)(nil).JobScheduleExists), ctx, replicationSchedule)
}
// LunCloneCreate mocks base method.
func (m *MockOntapAPI) LunCloneCreate(ctx context.Context, flexvol, source, lunName string, qosPolicyGroup api.QosPolicyGroup) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LunCloneCreate", ctx, flexvol, source, lunName, qosPolicyGroup)
ret0, _ := ret[0].(error)
return ret0
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/ontap/ontap_test.go | cmd/vsphere-xcopy-volume-populator/internal/ontap/ontap_test.go | package ontap
import (
"fmt"
"testing"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
"github.com/netapp/trident/storage_drivers/ontap/api"
"go.uber.org/mock/gomock"
)
func TestNetappClonner_Map(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockAPI := NewMockOntapAPI(ctrl)
clonner := &NetappClonner{api: mockAPI}
igroup := "test-igroup"
lun := populator.LUN{Name: "test-lun"}
mockAPI.EXPECT().EnsureLunMapped(gomock.Any(), igroup, lun.Name).Return(1, nil)
mappedLUN, err := clonner.Map(igroup, lun, nil)
if err != nil {
t.Errorf("Map() error = %v, wantErr %v", err, false)
}
if mappedLUN.Name != lun.Name {
t.Errorf("Map() = %v, want %v", mappedLUN.Name, lun.Name)
}
}
func TestNetappClonner_UnMap(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockAPI := NewMockOntapAPI(ctrl)
clonner := &NetappClonner{api: mockAPI}
igroup := "test-igroup"
lun := populator.LUN{Name: "test-lun"}
mockAPI.EXPECT().LunUnmap(gomock.Any(), igroup, lun.Name).Return(nil)
err := clonner.UnMap(igroup, lun, nil)
if err != nil {
t.Errorf("UnMap() error = %v, wantErr %v", err, false)
}
}
func TestNetappClonner_EnsureClonnerIgroup(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockAPI := NewMockOntapAPI(ctrl)
clonner := &NetappClonner{api: mockAPI}
igroup := "test-igroup"
adapterIDs := []string{"adapter1", "adapter2"}
mockAPI.EXPECT().IgroupCreate(gomock.Any(), igroup, "mixed", "vmware").Return(nil)
mockAPI.EXPECT().EnsureIgroupAdded(gomock.Any(), igroup, "adapter1").Return(nil)
mockAPI.EXPECT().EnsureIgroupAdded(gomock.Any(), igroup, "adapter2").Return(nil)
_, err := clonner.EnsureClonnerIgroup(igroup, adapterIDs)
if err != nil {
t.Errorf("EnsureClonnerIgroup() error = %v, wantErr %v", err, false)
}
}
func TestNetappClonner_ResolvePVToLUN(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockAPI := NewMockOntapAPI(ctrl)
clonner := &NetappClonner{api: mockAPI}
pv := populator.PersistentVolume{
Name: "test-pv",
VolumeAttributes: map[string]string{
"internalName": "test-internal-name",
},
}
lunPath := fmt.Sprintf("/vol/%s/lun0", pv.VolumeAttributes["internalName"])
expectedLUN := &api.Lun{
Name: lunPath,
SerialNumber: "test-serial",
}
mockAPI.EXPECT().LunGetByName(gomock.Any(), lunPath).Return(expectedLUN, nil)
lun, err := clonner.ResolvePVToLUN(pv)
if err != nil {
t.Errorf("ResolvePVToLUN() error = %v, wantErr %v", err, false)
}
if lun.Name != expectedLUN.Name {
t.Errorf("ResolvePVToLUN() = %v, want %v", lun.Name, expectedLUN.Name)
}
}
func TestNetappClonner_Get(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockAPI := NewMockOntapAPI(ctrl)
clonner := &NetappClonner{api: mockAPI}
expectedIPs := []string{"192.0.2.1", "192.0.2.2"}
mockAPI.EXPECT().NetInterfaceGetDataLIFs(gomock.Any(), "iscsi").Return(expectedIPs, nil)
ip, err := clonner.Get(populator.LUN{}, nil)
if err != nil {
t.Errorf("Get() error = %v, wantErr %v", err, false)
}
if ip != expectedIPs[0] {
t.Errorf("Get() = %v, want %v", ip, expectedIPs[0])
}
}
func TestNetappClonner_CurrentMappedGroups(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockAPI := NewMockOntapAPI(ctrl)
clonner := &NetappClonner{api: mockAPI}
lun := populator.LUN{Name: "test-lun"}
expectedGroups := []string{"group1", "group2"}
mockAPI.EXPECT().LunListIgroupsMapped(gomock.Any(), lun.Name).Return(expectedGroups, nil)
groups, err := clonner.CurrentMappedGroups(lun, nil)
if err != nil {
t.Errorf("CurrentMappedGroups() error = %v, wantErr %v", err, false)
}
if len(groups) != len(expectedGroups) {
t.Errorf("CurrentMappedGroups() = %v, want %v", groups, expectedGroups)
}
}
func TestNewNetappClonner(t *testing.T) {
_, err := NewNetappClonner("invalid-hostname", "username", "password")
if err == nil {
t.Errorf("NewNetappClonner() error = %v, wantErr %v", err, true)
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/vantara/vantara-storage.go | cmd/vsphere-xcopy-volume-populator/internal/vantara/vantara-storage.go | package vantara
import (
"encoding/json"
"errors"
"fmt"
"regexp"
"strings"
"k8s.io/klog/v2"
)
const requiredMajorVersion = 1
const requiredMinorVersion = 9
type VantaraStorageAPI struct {
StorageID string
RestServerIP string
RestSvrPort string
UserID string
Password string
VantaraObj VantaraObject
}
type VantaraObject map[string]interface {
}
func NewVantaraStorageAPI(storageID, restServerIP, restSvrPort, userID, password string, vantaraObj VantaraObject) *VantaraStorageAPI {
return &VantaraStorageAPI{
StorageID: storageID,
RestServerIP: restServerIP,
RestSvrPort: restSvrPort,
UserID: userID,
Password: password,
VantaraObj: vantaraObj,
}
}
// This function decodes a base64-encoded string and returns the decoded value.
func decodeBase64(encoded string) string {
return encoded
// decodedBytes, err := base64.StdEncoding.DecodeString(encoded)
//
// if err != nil {
// panic(err)
// }
//
// return string(decodedBytes)
}
func extractIPAddress(url string) (string, error) {
ipRegex := `\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b`
r := regexp.MustCompile(ipRegex)
match := r.FindString(url)
if match == "" {
return "", errors.New("IP address not found")
}
return match, nil
}
type PortsEntry struct {
HostGroupName string `json:"hostGroupName"`
HostGroupNumber float64 `json:"hostGroupNumber"`
Lun float64 `json:"lun"`
PortID string `json:"portId"`
}
type LdevEntry struct {
Ports []PortsEntry `json:"ports"`
}
func getlunID(ldevJson LdevEntry, hostGroupId string) (string, error) {
parts := strings.SplitN(hostGroupId, ",", 2)
portID := parts[0]
hostGroupNumber := parts[1]
for _, port := range ldevJson.Ports {
if port.PortID == portID && fmt.Sprintf("%d", int(port.HostGroupNumber)) == hostGroupNumber {
return fmt.Sprintf("%d", int(port.Lun)), nil
}
}
return "", errors.New("LUN not found")
}
func (v *VantaraStorageAPI) VantaraStorage(actionType string) (map[string]interface{}, error) {
headers := map[string]string{
"Content-Type": "application/json",
"Accept": "application/json",
"Response-Job-Status": "Completed",
}
body := map[string]string{}
sessionId := "0"
var err error
var decodedIp, userCreds string
if decode != false {
decodedIp, err = extractIPAddress(decodeBase64(v.RestServerIP))
decodedUserID := decodeBase64(v.UserID)
decodedPassword := decodeBase64(v.Password)
userCreds = decodedUserID + ":" + decodedPassword
} else {
decodedIp, err = extractIPAddress(v.RestServerIP)
userCreds = v.UserID + ":" + v.Password
}
if err != nil {
klog.Errorf("Failed to extract IP address: %v", err)
return nil, err
}
api := NewBlockStorageAPI(decodedIp, v.RestSvrPort, v.StorageID)
klog.Infof("API object %v", api)
// Check API version
url := api.APIVersion()
klog.Infof("API version URL: %s", url)
r, err := MakeHTTPRequest("GET", url, nil, headers, "basic", userCreds)
if err != nil {
klog.Errorf("Failed to get API version: %v", err)
return nil, err
}
apiVersion := r["apiVersion"].(string)
CheckAPIVersion(apiVersion, requiredMajorVersion, requiredMinorVersion)
// Generate a session
url = api.GenerateSession()
r, err = MakeHTTPRequest("POST", url, body, headers, "basic", userCreds)
if err != nil {
klog.Errorf("Failed to generate session: %v", err)
return nil, err
}
klog.Infof("Session generated successfully: %v", r)
// Discard session after the function returns
defer func() {
url = api.DiscardSession(sessionId)
resp, err := MakeHTTPRequest("DELETE", url, body, headers, "session", headers["Authorization"])
if err != nil {
klog.Errorf("Failed to discard session: %v", err)
return
}
klog.Infof("Session discarded successfully: %v", resp)
klog.Flush()
}()
token := r["token"].(string)
auth := "Session " + token
sessionIdFloat64 := r["sessionId"].(float64)
sessionIdInt := int(sessionIdFloat64)
sessionId = fmt.Sprintf("%d", sessionIdInt)
headers["Authorization"] = auth
switch actionType {
case GETLDEV:
url = api.Ldev(v.VantaraObj["ldevId"].(string))
r, err = MakeHTTPRequest("GET", url, nil, headers, "session", headers["Authorization"])
if err != nil {
klog.Errorf("Failed to get LDEV info: %v", err)
return nil, err
}
case ADDPATH:
var hostGroupId string
url = api.Luns()
body["ldevId"] = v.VantaraObj["ldevId"].(string)
for _, hostGroupId = range v.VantaraObj["hostGroupIds"].([]string) {
parts := strings.SplitN(hostGroupId, ",", 2)
body["portId"] = parts[0]
body["hostGroupNumber"] = parts[1]
bodyJson, _ := json.Marshal(body)
klog.Infof("Body: %s", string(bodyJson))
_, err := api.InvokeAsyncCommand("POST", url, body, headers)
if err != nil {
fmt.Println("Failed to add path")
return nil, err
}
}
case DELETEPATH:
var hostGroupId string
var ldevEntry LdevEntry
url = api.Ldev(v.VantaraObj["ldevId"].(string))
r, err = MakeHTTPRequest("GET", url, nil, headers, "session", headers["Authorization"])
if err != nil {
klog.Errorf("Failed to get LDEV info: %v", err)
return nil, err
}
ldevEntryBytes, _ := json.Marshal(r)
json.Unmarshal(ldevEntryBytes, &ldevEntry)
klog.Infof("LDEV entry: %v", ldevEntry)
for _, hostGroupId = range v.VantaraObj["hostGroupIds"].([]string) {
lunId, err := getlunID(ldevEntry, hostGroupId)
if err != nil {
klog.Errorf("Failed to get LUN ID: %v", err)
return nil, err
}
objectID := hostGroupId + "," + lunId
url = api.Lun(objectID)
_, err = api.InvokeAsyncCommand("DELETE", url, body, headers)
if err != nil {
klog.Errorf("Failed to delete path: %v", err)
return nil, err
}
}
case GETPORTDETAILS:
url = api.Ports() + "?detailInfoType=" + "logins"
r, err = MakeHTTPRequest("GET", url, nil, headers, "session", headers["Authorization"])
if err != nil {
klog.Errorf("Failed to get port details: %v", err)
return nil, err
}
default:
}
jsonData, _ := json.MarshalIndent(r, "", " ")
klog.Infof("Response: %s", string(jsonData))
return r, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/vantara/vantara-api.go | cmd/vsphere-xcopy-volume-populator/internal/vantara/vantara-api.go | package vantara
import (
"bytes"
"crypto/tls"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"k8s.io/klog/v2"
)
type BlockStorageAPI struct {
GumIPAddr string
Port string
StorageID string
BaseURL string
ObjectURL string
ServiceURL string
}
func NewBlockStorageAPI(gumIPAddr, port, storageID string) *BlockStorageAPI {
baseURL := fmt.Sprintf("https://%s:%s/ConfigurationManager/v1", gumIPAddr, port)
return &BlockStorageAPI{
GumIPAddr: gumIPAddr,
Port: port,
StorageID: storageID,
BaseURL: baseURL,
ObjectURL: "/objects",
ServiceURL: "/services",
}
}
func (api *BlockStorageAPI) GetStorageID() string {
return api.StorageID
}
func (api *BlockStorageAPI) Ldevs() string {
return api.BaseURL + api.ObjectURL + "/ldevs"
}
func (api *BlockStorageAPI) Ldev(objectID string) string {
return api.Ldevs() + "/" + objectID
}
func (api *BlockStorageAPI) GenerateSession() string {
return api.BaseURL + api.ObjectURL + "/sessions"
}
func (api *BlockStorageAPI) DiscardSession(objectID string) string {
return api.BaseURL + api.ObjectURL + "/sessions/" + objectID
}
func (api *BlockStorageAPI) Job(objectID string) string {
return api.BaseURL + api.ObjectURL + "/jobs/" + objectID
}
func (api *BlockStorageAPI) Ports() string {
return api.BaseURL + api.ObjectURL + "/ports"
}
func (api *BlockStorageAPI) Luns() string {
return api.BaseURL + api.ObjectURL + "/luns"
}
func (api *BlockStorageAPI) Lun(objectID string) string {
return api.Luns() + "/" + objectID
}
func (api *BlockStorageAPI) APIVersion() string {
return fmt.Sprintf("https://%s:%s/ConfigurationManager/configuration/version", api.GumIPAddr, api.Port)
}
func MakeHTTPRequest(methodType, url string, body, headers map[string]string, authType, authValue string) (map[string]interface{}, error) {
klog.Infof("Making HTTP request:")
klog.Infof("Method: %s", methodType)
klog.Infof("URL: %s", url)
klog.Infof("Headers: %v", headers)
klog.Infof("Auth Type: %s", authType)
// Disable TLS certificate verification
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
// Create request body
var reqBody io.Reader
if body != nil { // If body is not nil, encode it as JSON
jsonBody, err := json.Marshal(body)
if err != nil {
klog.Errorf("Error encoding JSON: %v", err)
return nil, err
}
reqBody = bytes.NewReader(jsonBody)
}
req, err := http.NewRequest(methodType, url, reqBody)
if err != nil {
klog.Errorf("Error creating request: %v", err)
return nil, err
}
// Set headers
for key, value := range headers {
req.Header.Set(key, value)
}
// Set authentication
if authType == "basic" {
// authValue should be "username:password"
base64Auth := base64.StdEncoding.EncodeToString([]byte(authValue))
req.Header.Set("Authorization", "Basic "+base64Auth)
} else if authType == "session" {
// authValue should be the token
req.Header.Set("Authorization", authValue)
}
resp, err := client.Do(req)
if err != nil {
klog.Errorf("Error making request: %v", err)
return nil, err
}
klog.Infof("Response status: %s", resp.Status)
defer resp.Body.Close()
// Todo: Check for 503 status code and retry
if resp.StatusCode == http.StatusServiceUnavailable {
resp.Body.Close()
klog.Errorf("Service unavailable, retrying...")
time.Sleep(60 * time.Second)
return MakeHTTPRequest(methodType, url, body, headers, authType, authValue)
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted {
klog.Errorf("Request failed with status code: %d", resp.StatusCode)
return nil, fmt.Errorf("request failed with status code: %d", resp.StatusCode)
}
var result map[string]interface{}
json.NewDecoder(resp.Body).Decode(&result)
return result, nil
}
func (api *BlockStorageAPI) checkUpdate(jobID string, headers map[string]string) (map[string]interface{}, error) {
url := api.Job(jobID)
return MakeHTTPRequest("GET", url, nil, headers, "", "")
}
func CheckAPIVersion(apiVersion string, requiredMajorVersion, requiredMinorVersion int) error {
versionParts := strings.Split(apiVersion, ".")
if len(versionParts) < 2 {
return fmt.Errorf("invalid API version format: %s", apiVersion)
}
majorVersion, err := strconv.Atoi(versionParts[0])
if err != nil {
return fmt.Errorf("invalid major version: %s", versionParts[0])
}
minorVersion, err := strconv.Atoi(versionParts[1])
if err != nil {
return fmt.Errorf("invalid minor version: %s", versionParts[1])
}
if !((majorVersion == requiredMajorVersion && minorVersion >= requiredMinorVersion) || majorVersion >= requiredMajorVersion+1) {
return fmt.Errorf("this program requires API Version %d.%d.x or newer", requiredMajorVersion, requiredMinorVersion)
}
return nil
}
func (api *BlockStorageAPI) InvokeAsyncCommand(methodType, url string, body, headers map[string]string) (string, error) {
result, err := MakeHTTPRequest(methodType, url, body, headers, "session", headers["Authorization"])
if err != nil {
return "", err
}
klog.Infof("Request was accepted. JOB URL: %v", result["self"])
status := "Initializing"
retryCount := 1
waitTime := 1 // FIRST_WAIT_TIME
for status != "Completed" {
if retryCount > 10 { // MAX_RETRY_COUNT
return "", fmt.Errorf("timeout error: operation was not completed")
}
time.Sleep(time.Duration(waitTime) * time.Second)
jobResult, err := api.checkUpdate(fmt.Sprintf("%d", int(result["jobId"].(float64))), headers)
if err != nil {
klog.Errorf("Error checking job status: %v", err)
return "", err
}
status = jobResult["status"].(string)
klog.Infof("Status: %s", status)
if waitTime*2 < 120 {
waitTime *= 2
} else {
waitTime = 120
}
retryCount++
}
klog.Infof("Async job was succeeded. status: %s", status)
return status, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/vantara/vantara.go | cmd/vsphere-xcopy-volume-populator/internal/vantara/vantara.go | package vantara
import (
"encoding/json"
"fmt"
"os"
"strings"
"k8s.io/klog/v2"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
)
const decode = true
// Action types
const (
GETLDEV = "getLdev"
ADDPATH = "addPath"
DELETEPATH = "deletePath"
GETPORTDETAILS = "getPortDetails"
)
type VantaraCloner struct {
api VantaraStorageAPI
}
func NewVantaraClonner(hostname, username, password string) (VantaraCloner, error) {
vantaraObj := make(VantaraObject)
envStorage, _ := getStorageEnvVars()
v := getNewVantaraStorageAPIfromEnv(envStorage, vantaraObj)
return VantaraCloner{api: *v}, nil
}
func getStorageEnvVars() (map[string]interface{}, error) {
envHGs := os.Getenv("HOSTGROUP_ID_LIST")
hgids := []string{}
if envHGs != "" {
items := strings.Split(envHGs, ":")
for _, item := range items {
hg := strings.TrimSpace(item)
if hg != "" {
hgids = append(hgids, hg)
}
}
}
storageEnvVars := map[string]interface{}{
"storageId": os.Getenv("STORAGE_ID"),
"restServerIP": os.Getenv("STORAGE_HOSTNAME"),
"port": os.Getenv("STORAGE_PORT"),
"userID": os.Getenv("STORAGE_USERNAME"),
"password": os.Getenv("STORAGE_PASSWORD"),
"hostGroupIds": hgids,
}
klog.Info(
"storageId: ", storageEnvVars["storageId"],
"restServerIP: ", storageEnvVars["restServerIP"],
"port: ", storageEnvVars["port"],
"userID: ", "",
"password: ", "",
"hostGroupID: ", storageEnvVars["hostGroupIds"],
)
return storageEnvVars, nil
}
func getNewVantaraStorageAPIfromEnv(envVars map[string]interface{}, vantaraObj VantaraObject) *VantaraStorageAPI {
vantaraObj["envHostGroupIds"] = envVars["hostGroupIds"].([]string)
return NewVantaraStorageAPI(envVars["storageId"].(string), envVars["restServerIP"].(string), envVars["port"].(string), envVars["userID"].(string), envVars["password"].(string), vantaraObj)
}
func (v *VantaraCloner) CurrentMappedGroups(lun populator.LUN, context populator.MappingContext) ([]string, error) {
LDEV := v.ShowLdev(lun)
klog.Infof("LDEV: %+v", LDEV) // LDEV is a map[string]interface{}
// Ensure LDEV["ports"] is of type []interface{}
rawPorts, ok := LDEV["ports"].([]interface{})
if !ok {
return nil, fmt.Errorf("invalid type for LDEV['ports'], expected []interface{}")
}
hgids := []string{}
for _, rawPort := range rawPorts {
portMap, ok := rawPort.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("invalid type for port, expected map[string]interface{}")
}
portID, _ := portMap["portId"].(string)
var hostGroupNumber string
if hgn, ok := portMap["hostGroupNumber"].(float64); ok {
hostGroupNumber = fmt.Sprintf("%d", int(hgn))
} else if hgnStr, ok := portMap["hostGroupNumber"].(string); ok {
hostGroupNumber = hgnStr
} else {
return nil, fmt.Errorf("invalid type for port['hostGroupNumber']")
}
hgids = append(hgids, portID+","+hostGroupNumber)
klog.Infof("portID: %s, hostGroupNumber: %s", portID, hostGroupNumber)
}
return hgids, nil
}
func (v *VantaraCloner) ResolvePVToLUN(pv populator.PersistentVolume) (populator.LUN, error) {
parts := strings.Split(pv.VolumeHandle, "--")
lun := populator.LUN{}
if len(parts) != 5 || parts[0] != "01" {
return lun, fmt.Errorf("invalid volume handle: %s", pv.VolumeHandle)
}
ioProtocol := parts[1]
storageDeviceID := parts[2]
ldevID := parts[3]
ldevNickName := parts[4]
//storageModelID := storageDeviceID[:6]
//storageSerialNumber := storageDeviceID[6:]
lun.LDeviceID = ldevID
// LDEV := ShowLdev(lun)
// ldevnaaid := LDEV["naaId"].(string)
lun.StorageSerialNumber = storageDeviceID
lun.Protocol = ioProtocol
// lun.ProviderID = ldevnaaid[:6]
// lun.SerialNumber = ldevnaaid[6:]
lun.VolumeHandle = pv.VolumeHandle
lun.Name = ldevNickName
klog.Infof("Resolved LUN: %+v", lun)
return lun, nil
}
func (v *VantaraCloner) GetNaaID(lun populator.LUN) populator.LUN {
LDEV := v.ShowLdev(lun)
ldevnaaid := LDEV["naaId"].(string)
lun.ProviderID = ldevnaaid[:6]
lun.SerialNumber = ldevnaaid[6:]
lun.NAA = fmt.Sprintf("naa.%s", ldevnaaid)
return lun
}
func (v *VantaraCloner) EnsureClonnerIgroup(xcopyInitiatorGroup string, hbaUIDs []string) (populator.MappingContext, error) {
if v.api.VantaraObj["envHostGroupIds"] != nil {
hgids := v.api.VantaraObj["envHostGroupIds"].([]string)
klog.Infof("HostGroupIDs used from environment variable: %s", hgids)
return populator.MappingContext{"hostGroupIds": hgids}, nil
}
// Get the host group IDs from the storage
klog.Infof("Fetching host group IDs from storage")
var r map[string]interface{}
r, _ = v.api.VantaraStorage(GETPORTDETAILS)
jsonBytes, err := json.Marshal(r)
if err != nil {
klog.Errorf("Error marshalling map to JSON: %s", err)
return nil, err
}
var jsonData JSONData
if err := json.Unmarshal(jsonBytes, &jsonData); err != nil {
klog.Errorf("Error parsing JSON: %s", err)
return nil, err
}
ret := FindHostGroupIDs(jsonData, hbaUIDs)
jsonBytes, _ = json.MarshalIndent(ret, "", " ")
klog.Infof("HostGroupIDs: %s", string(jsonBytes))
var hostGroupIds = make([]string, len(ret))
for i, login := range ret {
hostGroupIds[i] = login.HostGroupId
}
klog.Infof("HostGroupIDs: %s", hostGroupIds)
return populator.MappingContext{"hostGroupIds": hostGroupIds}, nil
}
func (v *VantaraCloner) Map(xcopyInitiatorGroup string, lun populator.LUN, context populator.MappingContext) (populator.LUN, error) {
v.api.VantaraObj["ldevId"] = lun.LDeviceID
v.api.VantaraObj["hostGroupIds"] = context["hostGroupIds"].([]string)
_, _ = v.api.VantaraStorage(ADDPATH)
lun = v.GetNaaID(lun)
return lun, nil
}
func (v *VantaraCloner) UnMap(xcopyInitiatorGroup string, lun populator.LUN, context populator.MappingContext) error {
v.api.VantaraObj["ldevId"] = lun.LDeviceID
v.api.VantaraObj["hostGroupIds"] = context["hostGroupIds"].([]string)
_, _ = v.api.VantaraStorage(DELETEPATH)
return nil
}
func (v *VantaraCloner) ShowLdev(lun populator.LUN) map[string]interface{} {
v.api.VantaraObj["ldevId"] = lun.LDeviceID
r, _ := v.api.VantaraStorage(GETLDEV)
return r
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/vantara/vantara-hgid.go | cmd/vsphere-xcopy-volume-populator/internal/vantara/vantara-hgid.go | package vantara
import (
"strings"
"k8s.io/klog/v2"
)
type Logins struct {
HostGroupId string `json:"hostGroupId"`
Islogin string `json:"isLogin"`
LoginWWN string `json:"loginWwn"`
WWNNickName string `json:"wwnNickName"`
IscsiNickName string `json:"iscsiNickName"`
IscsiTargetName string `json:"iscsiTargetName"`
LoginIscsiName string `json:"loginIscsiName"`
}
type DataEntry struct {
PortID string `json:"portId"`
WWN string `json:"wwn"`
Logins []Logins `json:"logins"`
}
type JSONData struct {
Data []DataEntry `json:"data"`
}
func FindHostGroupIDs(jsonData JSONData, hbaUIDs []string) []Logins {
results := []Logins{}
for _, entry := range jsonData.Data {
for _, login := range entry.Logins {
for _, uid := range hbaUIDs {
if strings.HasPrefix(uid, "fc.") {
parts := strings.Split(strings.TrimPrefix(uid, "fc."), ":")
wwnn := ""
if len(parts) != 2 {
klog.Errorf("Invalid FC WWN: %s", uid)
continue
} else {
wwnn = strings.ToUpper(parts[1])
}
if login.LoginWWN == wwnn {
output := Logins{
HostGroupId: login.HostGroupId,
Islogin: login.Islogin,
LoginWWN: login.LoginWWN,
WWNNickName: login.WWNNickName,
IscsiNickName: "",
IscsiTargetName: "",
LoginIscsiName: "",
}
results = append(results, output)
}
} else if strings.HasPrefix(uid, "iqn.") {
continue
} else if strings.HasPrefix(uid, "nqn.") {
continue
} else {
klog.Errorf("Unknown UID type: %s", uid)
}
}
}
}
return results
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/vmware/client.go | cmd/vsphere-xcopy-volume-populator/internal/vmware/client.go | package vmware
import (
"context"
"encoding/xml"
"net/url"
"strings"
"fmt"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/cli/esx"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog/v2"
)
//go:generate go run go.uber.org/mock/mockgen -destination=mocks/vmware_mock_client.go -package=vmware_mocks . Client
type Client interface {
GetEsxByVm(ctx context.Context, vmName string) (*object.HostSystem, error)
RunEsxCommand(ctx context.Context, host *object.HostSystem, command []string) ([]esx.Values, error)
GetDatastore(ctx context.Context, dc *object.Datacenter, datastore string) (*object.Datastore, error)
// GetVMDiskBacking returns disk backing information for detecting disk type (VVol, RDM, VMDK)
GetVMDiskBacking(ctx context.Context, vmId string, vmdkPath string) (*DiskBacking, error)
}
// DiskBacking contains information about the disk backing type
type DiskBacking struct {
// VVolId is set if the disk is VVol-backed
VVolId string
// IsRDM is true if the disk is a Raw Device Mapping
IsRDM bool
// DeviceName is the underlying device name
DeviceName string
}
type VSphereClient struct {
*govmomi.Client
}
func NewClient(vcenterUrl, username, password string) (Client, error) {
ctx := context.Background()
u, err := soap.ParseURL(vcenterUrl)
if err != nil {
return nil, fmt.Errorf("Failed parsing vCenter URL: %w", err)
}
u.User = url.UserPassword(username, password)
c, err := govmomi.NewClient(ctx, u, true)
if err != nil {
return nil, fmt.Errorf("Failed creating vSphere client: %w", err)
}
return &VSphereClient{Client: c}, nil
}
func (c *VSphereClient) RunEsxCommand(ctx context.Context, host *object.HostSystem, command []string) ([]esx.Values, error) {
executor, err := esx.NewExecutor(ctx, c.Client.Client, host.Reference())
if err != nil {
return nil, err
}
// Invoke esxcli command
klog.Infof("about to run esxcli command %s", command)
res, err := executor.Run(ctx, command)
if err != nil {
klog.Errorf("Failed to run esxcli command %v: %s", command, err)
if fault, ok := err.(*esx.Fault); ok {
if parsedFault, parseErr := ErrToFault(fault); parseErr == nil {
klog.Errorf("ESX CLI Fault - Type: %s, Messages: %v", parsedFault.Type, parsedFault.ErrMsgs)
} else {
klog.Errorf("Failed to parse fault details: %v", parseErr)
}
}
return nil, err
}
for _, valueMap := range res.Values {
message, _ := valueMap["message"]
status, statusExists := valueMap["status"]
klog.Infof("esxcli result %v, message %s, status %v", valueMap, message, status)
if statusExists && strings.Join(status, "") != "0" {
return nil, fmt.Errorf("Failed to invoke vmkfstools: %v", message)
}
}
return res.Values, nil
}
func (c *VSphereClient) GetEsxByVm(ctx context.Context, vmId string) (*object.HostSystem, error) {
finder := find.NewFinder(c.Client.Client, true)
datacenters, err := finder.DatacenterList(ctx, "*")
if err != nil {
return nil, fmt.Errorf("failed getting datacenters: %w", err)
}
var vm *object.VirtualMachine
for _, dc := range datacenters {
finder.SetDatacenter(dc)
result, err := finder.VirtualMachine(ctx, vmId)
if err != nil {
if _, ok := err.(*find.NotFoundError); !ok {
return nil, fmt.Errorf("error searching for VM in Datacenter '%s': %w", dc.Name(), err)
}
} else {
vm = result
fmt.Printf("found vm %v\n", vm)
break
}
}
if vm == nil {
moref := types.ManagedObjectReference{Type: "VirtualMachine", Value: vmId}
vm = object.NewVirtualMachine(c.Client.Client, moref)
}
if vm == nil {
return nil, fmt.Errorf("failed to find VM with ID %s", vmId)
}
var vmProps mo.VirtualMachine
err = vm.Properties(ctx, vm.Reference(), []string{"runtime.host"}, &vmProps)
if err != nil {
return nil, fmt.Errorf("failed to get VM properties: %w", err)
}
hostRef := vmProps.Runtime.Host
host := object.NewHostSystem(c.Client.Client, *hostRef)
if host == nil {
return nil, fmt.Errorf("failed to find host: %w", err)
}
return host, nil
}
func (c *VSphereClient) GetDatastore(ctx context.Context, dc *object.Datacenter, datastore string) (*object.Datastore, error) {
finder := find.NewFinder(c.Client.Client, false)
finder.SetDatacenter(dc)
ds, err := finder.Datastore(ctx, datastore)
if err != nil {
return nil, fmt.Errorf("Failed to find datastore %s: %w", datastore, err)
}
return ds, nil
}
// GetVMDiskBacking retrieves disk backing information to determine disk type
func (c *VSphereClient) GetVMDiskBacking(ctx context.Context, vmId string, vmdkPath string) (*DiskBacking, error) {
finder := find.NewFinder(c.Client.Client, true)
datacenters, err := finder.DatacenterList(ctx, "*")
if err != nil {
return nil, fmt.Errorf("failed getting datacenters: %w", err)
}
var vm *object.VirtualMachine
for _, dc := range datacenters {
finder.SetDatacenter(dc)
result, err := finder.VirtualMachine(ctx, vmId)
if err != nil {
if _, ok := err.(*find.NotFoundError); !ok {
return nil, fmt.Errorf("error searching for VM in Datacenter '%s': %w", dc.Name(), err)
}
} else {
vm = result
break
}
}
if vm == nil {
moref := types.ManagedObjectReference{Type: "VirtualMachine", Value: vmId}
vm = object.NewVirtualMachine(c.Client.Client, moref)
}
if vm == nil {
return nil, fmt.Errorf("failed to find VM with ID %s", vmId)
}
// Get VM configuration to inspect disk devices
var vmProps mo.VirtualMachine
err = vm.Properties(ctx, vm.Reference(), []string{"config.hardware.device"}, &vmProps)
if err != nil {
return nil, fmt.Errorf("failed to get VM properties: %w", err)
}
// Normalize vmdkPath for comparison (remove brackets and spaces)
normalizedPath := strings.ToLower(vmdkPath)
// Find the disk matching the vmdkPath
for _, device := range vmProps.Config.Hardware.Device {
disk, ok := device.(*types.VirtualDisk)
if !ok {
continue
}
// Check different backing types
switch backing := disk.Backing.(type) {
case *types.VirtualDiskFlatVer2BackingInfo:
// Check if this disk matches the requested path
if !strings.Contains(strings.ToLower(backing.FileName), normalizedPath) &&
!strings.Contains(normalizedPath, strings.ToLower(backing.FileName)) {
// Try to match by extracting datastore and path
if !diskPathMatches(backing.FileName, vmdkPath) {
continue
}
}
// Check for VVol backing
if backing.BackingObjectId != "" {
klog.V(2).Infof("Disk %s is VVol-backed (BackingObjectId: %s)", vmdkPath, backing.BackingObjectId)
return &DiskBacking{
VVolId: backing.BackingObjectId,
IsRDM: false,
DeviceName: backing.FileName,
}, nil
}
// Regular VMDK
klog.V(2).Infof("Disk %s is VMDK-backed", vmdkPath)
return &DiskBacking{
VVolId: "",
IsRDM: false,
DeviceName: backing.FileName,
}, nil
case *types.VirtualDiskRawDiskMappingVer1BackingInfo:
// Check if this disk matches
if !strings.Contains(strings.ToLower(backing.FileName), normalizedPath) &&
!strings.Contains(normalizedPath, strings.ToLower(backing.FileName)) {
if !diskPathMatches(backing.FileName, vmdkPath) {
continue
}
}
klog.V(2).Infof("Disk %s is RDM-backed (DeviceName: %s)", vmdkPath, backing.DeviceName)
return &DiskBacking{
VVolId: "",
IsRDM: true,
DeviceName: backing.DeviceName,
}, nil
}
}
// If we couldn't find the disk, return default VMDK type
klog.V(2).Infof("Could not find specific disk %s, assuming VMDK type", vmdkPath)
return &DiskBacking{
VVolId: "",
IsRDM: false,
DeviceName: "",
}, nil
}
// diskPathMatches compares two VMDK paths accounting for different formats
func diskPathMatches(path1, path2 string) bool {
// Extract datastore and filename from both paths
// Format: "[datastore] folder/file.vmdk"
normalize := func(p string) string {
p = strings.TrimSpace(p)
p = strings.ToLower(p)
// Remove brackets from datastore
p = strings.ReplaceAll(p, "[", "")
p = strings.ReplaceAll(p, "]", "")
return p
}
return normalize(path1) == normalize(path2)
}
type Obj struct {
XMLName xml.Name `xml:"urn:vim25 obj"`
VersionID string `xml:"versionId,attr"`
Type string `xml:"http://www.w3.org/2001/XMLSchema-instance type,attr"`
Fault Fault `xml:"fault"`
LocalizedMessage string `xml:"localizedMessage"`
}
type Fault struct {
Type string `xml:"http://www.w3.org/2001/XMLSchema-instance type,attr"`
ErrMsgs []string `xml:"errMsg"`
}
func ErrToFault(err error) (*Fault, error) {
f, ok := err.(*esx.Fault)
if ok {
var obj Obj
decoder := xml.NewDecoder(strings.NewReader(f.Detail))
err := decoder.Decode(&obj)
if err != nil {
return nil, fmt.Errorf("failed to decode from xml to fault: %w", err)
}
return &obj.Fault, nil
}
return nil, fmt.Errorf("error is not of type esx.Fault")
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/vmware/ssh_client.go | cmd/vsphere-xcopy-volume-populator/internal/vmware/ssh_client.go | package vmware
import (
"context"
"fmt"
"net"
"strings"
"time"
"github.com/kubev2v/forklift/pkg/lib/logging"
"github.com/kubev2v/forklift/pkg/lib/util"
"github.com/vmware/govmomi/object"
"golang.org/x/crypto/ssh"
"k8s.io/klog/v2"
)
// SSHClient interface for SSH operations
type SSHClient interface {
Connect(ctx context.Context, hostname, username string, privateKey []byte) error
ExecuteCommand(datastore, sshCommand string, args ...string) (string, error)
Close() error
}
type ESXiSSHClient struct {
hostname string
username string
sshClient *ssh.Client
privateKey []byte
}
func NewSSHClient() SSHClient {
return &ESXiSSHClient{}
}
func (c *ESXiSSHClient) Connect(ctx context.Context, hostname, username string, privateKey []byte) error {
c.hostname = hostname
c.username = username
c.privateKey = privateKey
// Parse the private key
signer, err := ssh.ParsePrivateKey(privateKey)
if err != nil {
return fmt.Errorf("failed to parse private key: %w", err)
}
// Create SSH client configuration
config := &ssh.ClientConfig{
User: username,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
// Establish TCP connection honoring context cancellation/deadline
addr := net.JoinHostPort(hostname, "22")
dialer := &net.Dialer{}
netConn, err := dialer.DialContext(ctx, "tcp", addr)
if err != nil {
return fmt.Errorf("failed to connect to SSH server: %w", err)
}
// Ensure the SSH handshake also respects the context deadline
if deadline, ok := ctx.Deadline(); ok {
_ = netConn.SetDeadline(deadline)
}
// Perform SSH handshake on the established net.Conn
cc, chans, reqs, err := ssh.NewClientConn(netConn, addr, config)
if err != nil {
_ = netConn.Close()
return fmt.Errorf("failed to establish SSH client connection: %w", err)
}
c.sshClient = ssh.NewClient(cc, chans, reqs)
klog.Infof("Connected to SSH server %s", hostname)
return nil
}
// ExecuteCommand executes a command using the SSH_ORIGINAL_COMMAND pattern
// Uses structured format: DS=<datastore>;CMD=<operation> <args...>
// If datastore is empty, only tests connectivity without calling the wrapper
func (c *ESXiSSHClient) ExecuteCommand(datastore, sshCommand string, args ...string) (string, error) {
if c.sshClient == nil {
return "", fmt.Errorf("SSH client not connected")
}
// Create a new session for this command
session, err := c.sshClient.NewSession()
if err != nil {
return "", fmt.Errorf("failed to create SSH session: %w", err)
}
defer session.Close()
// Build the command part
cmdPart := sshCommand
if len(args) > 0 {
cmdPart = fmt.Sprintf("%s %s", sshCommand, strings.Join(args, " "))
}
// Build structured command: DS=<datastore>;CMD=<command>
// For connectivity tests, datastore can be empty
fullCommand := fmt.Sprintf("DS=%s;CMD=%s", datastore, cmdPart)
klog.V(2).Infof("Executing SSH command: %s", fullCommand)
// Create a context with timeout for the command execution
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
// Channel to receive the command result
type commandResult struct {
output []byte
err error
}
resultChan := make(chan commandResult, 1)
// Execute command in a goroutine
go func() {
// The SSH command will be passed as SSH_ORIGINAL_COMMAND to the restricted script
output, err := session.CombinedOutput(fullCommand)
resultChan <- commandResult{output: output, err: err}
}()
// Wait for either the command to complete or timeout
var output []byte
var cmdErr error
select {
case result := <-resultChan:
output = result.output
cmdErr = result.err
case <-ctx.Done():
// Command timed out, try to close the session
session.Close()
return "", fmt.Errorf("SSH command timed out after 60 seconds: %s", fullCommand)
}
if cmdErr != nil {
klog.Warningf("SSH command failed: %s, output: %s, error: %v", fullCommand, string(output), cmdErr)
return string(output), cmdErr
}
klog.V(2).Infof("SSH command succeeded: %s, output: %s", fullCommand, string(output))
return string(output), nil
}
func (c *ESXiSSHClient) Close() error {
if c.sshClient != nil {
err := c.sshClient.Close()
c.sshClient = nil
klog.Infof("Closed SSH connection to %s", c.hostname)
return err
}
return nil
}
// EnableSSHAccess enables SSH service on ESXi host and provides manual SSH key installation instructions
func EnableSSHAccess(ctx context.Context, vmwareClient Client, host *object.HostSystem, privateKey, publicKey []byte, scriptPath string) error {
publicKeyStr := strings.TrimSpace(string(publicKey))
klog.Infof("Enabling SSH access on ESXi host %s", host.Name())
// Step 1: Get host IP address for SSH connectivity testing
hostIP, err := GetHostIPAddress(ctx, host)
if err != nil {
return fmt.Errorf("failed to get host IP address: %w", err)
}
// Step 2: Check ESXi version
version, err := getESXiVersion(vmwareClient, host, ctx)
if err != nil {
return fmt.Errorf("failed to get ESXi version: %w", err)
}
klog.Infof("ESXi version %s detected", version)
// Use the shared restricted SSH command template
restrictedPublicKey := fmt.Sprintf(`command="%s",no-port-forwarding,no-agent-forwarding,no-X11-forwarding %s`,
util.RestrictedSSHCommandTemplate, publicKeyStr)
// Step 7: Test SSH connectivity first (using private key for authentication)
// Pass empty datastore for connectivity test - the wrapper won't be called
// Create a logger adapter from klog to logging.LevelLogger
log := logging.WithName("ssh-setup")
if util.TestSSHConnectivity(ctx, hostIP, privateKey, log) {
klog.Infof("SSH connectivity test passed - keys already configured correctly")
return nil
}
// Step 8: Manual SSH key installation required for all ESXi versions
klog.Errorf("Manual SSH key installation required. Please add the following line to /etc/ssh/keys-root/authorized_keys on the ESXi host:")
klog.Errorf("")
klog.Errorf(" %s", restrictedPublicKey)
klog.Errorf("")
klog.Errorf("Steps to manually configure SSH key:")
klog.Errorf("1. SSH to the ESXi host: ssh root@%s", hostIP)
klog.Errorf("2. Edit the authorized_keys file: vi /etc/ssh/keys-root/authorized_keys")
klog.Errorf("3. Add the above line to the file")
klog.Errorf("4. Save and exit")
klog.Errorf("5. Restart the operation")
return fmt.Errorf("manual SSH key configuration required for ESXi %s - see logs for instructions", version)
}
// getESXiVersion retrieves the ESXi version from the host
func getESXiVersion(vmwareClient Client, host *object.HostSystem, ctx context.Context) (string, error) {
command := []string{"system", "version", "get"}
output, err := vmwareClient.RunEsxCommand(ctx, host, command)
if err != nil {
return "", fmt.Errorf("failed to get ESXi version: %w", err)
}
for _, valueMap := range output {
if version, exists := valueMap["Version"]; exists && len(version) > 0 {
return version[0], nil
}
if product, exists := valueMap["Product"]; exists && len(product) > 0 {
if strings.Contains(product[0], "ESXi") {
if versionField, versionExists := valueMap["Version"]; versionExists && len(versionField) > 0 {
return versionField[0], nil
}
}
}
}
return "", fmt.Errorf("could not parse ESXi version from command output")
}
// GetHostIPAddress retrieves the management IP address of an ESXi host
func GetHostIPAddress(ctx context.Context, host *object.HostSystem) (string, error) {
ips, err := host.ManagementIPs(ctx)
if err != nil {
return "", fmt.Errorf("failed to get management IPs: %w", err)
}
if len(ips) == 0 {
return "", fmt.Errorf("no management IP addresses found")
}
return ips[0].String(), nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/vmware/client_test.go | cmd/vsphere-xcopy-volume-populator/internal/vmware/client_test.go | package vmware
import (
"context"
"testing"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/simulator"
)
func TestNewClientWithSimulator(t *testing.T) {
model := simulator.VPX()
defer model.Remove()
err := model.Create()
if err != nil {
t.Fatal(err)
}
s := model.Service.NewServer()
defer s.Close()
_, err = NewClient(s.URL.String(), "user", "pass")
if err != nil {
t.Errorf("NewClient() error = %v, wantErr %v", err, false)
}
}
func TestVSphereClient_GetEsxByVm(t *testing.T) {
model := simulator.VPX()
defer model.Remove()
err := model.Create()
if err != nil {
t.Fatal(err)
}
s := model.Service.NewServer()
defer s.Close()
client, err := NewClient(s.URL.String(), "user", "pass")
if err != nil {
t.Fatal(err)
}
_, err = client.GetEsxByVm(context.TODO(), "vm-1")
if err == nil {
t.Errorf("GetEsxByVm() error = %v, wantErr %v", err, true)
}
}
func TestVSphereClient_GetDatastore(t *testing.T) {
model := simulator.VPX()
defer model.Remove()
err := model.Create()
if err != nil {
t.Fatal(err)
}
s := model.Service.NewServer()
defer s.Close()
client, err := NewClient(s.URL.String(), "user", "pass")
if err != nil {
t.Fatal(err)
}
finder := find.NewFinder(client.(*VSphereClient).Client.Client, false)
dc, err := finder.DefaultDatacenter(context.TODO())
if err != nil {
t.Fatal(err)
}
_, err = client.GetDatastore(context.TODO(), dc, "LocalDS_0")
if err != nil {
t.Errorf("GetDatastore() error = %v, wantErr %v", err, false)
}
}
func TestVSphereClient_RunEsxCommand(t *testing.T) {
t.Skip("Skipping test that requires esxcli executor on simulator")
model := simulator.VPX()
defer model.Remove()
err := model.Create()
if err != nil {
t.Fatal(err)
}
s := model.Service.NewServer()
defer s.Close()
client, err := NewClient(s.URL.String(), "user", "pass")
if err != nil {
t.Fatal(err)
}
finder := find.NewFinder(client.(*VSphereClient).Client.Client, false)
dc, err := finder.DefaultDatacenter(context.TODO())
if err != nil {
t.Fatal(err)
}
finder.SetDatacenter(dc)
host, err := finder.HostSystem(context.TODO(), "host-21")
if err != nil {
t.Fatal(err)
}
_, err = client.RunEsxCommand(context.TODO(), host, []string{"echo", "hello"})
if err != nil {
t.Errorf("RunEsxCommand() error = %v, wantErr %v", err, false)
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/vmware/mocks/vmware_mock_client.go | cmd/vsphere-xcopy-volume-populator/internal/vmware/mocks/vmware_mock_client.go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/vmware (interfaces: Client)
//
// Generated by this command:
//
// mockgen -destination=mocks/vmware_mock_client.go -package=vmware_mocks . Client
//
// Package vmware_mocks is a generated GoMock package.
package vmware_mocks
import (
context "context"
reflect "reflect"
vmware "github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/vmware"
esx "github.com/vmware/govmomi/cli/esx"
object "github.com/vmware/govmomi/object"
gomock "go.uber.org/mock/gomock"
)
// MockClient is a mock of Client interface.
type MockClient struct {
ctrl *gomock.Controller
recorder *MockClientMockRecorder
isgomock struct{}
}
// MockClientMockRecorder is the mock recorder for MockClient.
type MockClientMockRecorder struct {
mock *MockClient
}
// NewMockClient creates a new mock instance.
func NewMockClient(ctrl *gomock.Controller) *MockClient {
mock := &MockClient{ctrl: ctrl}
mock.recorder = &MockClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClient) EXPECT() *MockClientMockRecorder {
return m.recorder
}
// GetDatastore mocks base method.
func (m *MockClient) GetDatastore(ctx context.Context, dc *object.Datacenter, datastore string) (*object.Datastore, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetDatastore", ctx, dc, datastore)
ret0, _ := ret[0].(*object.Datastore)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetDatastore indicates an expected call of GetDatastore.
func (mr *MockClientMockRecorder) GetDatastore(ctx, dc, datastore any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDatastore", reflect.TypeOf((*MockClient)(nil).GetDatastore), ctx, dc, datastore)
}
// GetEsxByVm mocks base method.
func (m *MockClient) GetEsxByVm(ctx context.Context, vmName string) (*object.HostSystem, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEsxByVm", ctx, vmName)
ret0, _ := ret[0].(*object.HostSystem)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEsxByVm indicates an expected call of GetEsxByVm.
func (mr *MockClientMockRecorder) GetEsxByVm(ctx, vmName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEsxByVm", reflect.TypeOf((*MockClient)(nil).GetEsxByVm), ctx, vmName)
}
// RunEsxCommand mocks base method.
func (m *MockClient) RunEsxCommand(ctx context.Context, host *object.HostSystem, command []string) ([]esx.Values, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RunEsxCommand", ctx, host, command)
ret0, _ := ret[0].([]esx.Values)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RunEsxCommand indicates an expected call of RunEsxCommand.
func (mr *MockClientMockRecorder) RunEsxCommand(ctx, host, command any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunEsxCommand", reflect.TypeOf((*MockClient)(nil).RunEsxCommand), ctx, host, command)
}
// GetVMDiskBacking mocks base method.
func (m *MockClient) GetVMDiskBacking(ctx context.Context, vmId string, vmdkPath string) (*vmware.DiskBacking, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetVMDiskBacking", ctx, vmId, vmdkPath)
ret0, _ := ret[0].(*vmware.DiskBacking)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetVMDiskBacking indicates an expected call of GetVMDiskBacking.
func (mr *MockClientMockRecorder) GetVMDiskBacking(ctx, vmId, vmdkPath any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVMDiskBacking", reflect.TypeOf((*MockClient)(nil).GetVMDiskBacking), ctx, vmId, vmdkPath)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/infinibox/infinibox.go | cmd/vsphere-xcopy-volume-populator/internal/infinibox/infinibox.go | package infinibox
import (
"fmt"
"strconv"
"github.com/go-logr/logr"
"github.com/infinidat/infinibox-csi-driver/iboxapi"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
"k8s.io/klog/v2"
)
const (
hostIDContextKey string = "hostID"
esxLogicalHostNameKey string = "esxLogicalHostName"
esxRealHostNameKey string = "esxRealHostName"
ocpRealHostNameKey string = "ocpRealHostName"
)
type InfiniboxClonner struct {
api iboxapi.Client
}
func (c *InfiniboxClonner) Map(initiatorGroup string, targetLUN populator.LUN, mappingContext populator.MappingContext) (populator.LUN, error) {
if mappingContext == nil {
return targetLUN, fmt.Errorf("mapping context is required")
}
hostName := ""
if initiatorGroup != mappingContext[esxLogicalHostNameKey] {
hostName = mappingContext[ocpRealHostNameKey].(string)
} else {
hostName = mappingContext[esxRealHostNameKey].(string)
}
klog.Infof("mapping volume %s to initiator-group %s", targetLUN.Name, hostName)
host, err := c.api.GetHostByName(hostName)
if err != nil {
return targetLUN, fmt.Errorf("failed to find host for host name %s: %w", hostName, err)
}
volumeID, err := strconv.Atoi(targetLUN.LDeviceID)
// Idempotency: check if already mapped
existingMappings, err := c.api.GetLunsByVolume(volumeID)
if err == nil {
for _, mapping := range existingMappings {
if mapping.HostID == host.ID {
klog.Infof("Volume %s already mapped to initiator group %s", targetLUN.Name, hostName)
return targetLUN, nil
}
}
}
_, err = c.api.MapVolumeToHost(host.ID, volumeID, 0)
if err != nil {
return targetLUN, fmt.Errorf("failed to map volume %s to host %s: %w", targetLUN.Name, hostName, err)
}
klog.Infof("Successfully mapped volume %s to initiator group %s", targetLUN.Name, hostName)
return targetLUN, nil
}
func (c *InfiniboxClonner) UnMap(initiatorGroup string, targetLUN populator.LUN, mappingContext populator.MappingContext) error {
if mappingContext == nil {
return fmt.Errorf("mapping context is required")
}
hostName := ""
if initiatorGroup != mappingContext[esxLogicalHostNameKey] {
hostName = mappingContext[ocpRealHostNameKey].(string)
} else {
hostName = mappingContext[esxRealHostNameKey].(string)
}
klog.Infof("unmapping volume %s from initiator-group %s", targetLUN.Name, hostName)
host, err := c.api.GetHostByName(hostName)
if err != nil {
return fmt.Errorf("failed to find host for host name %s: %w", hostName, err)
}
volumeID, err := strconv.Atoi(targetLUN.LDeviceID)
if err != nil {
return fmt.Errorf("failed to convert volume ID %s to integer: %w", targetLUN.LDeviceID, err)
}
_, err = c.api.UnMapVolumeFromHost(host.ID, volumeID)
if err != nil {
return fmt.Errorf("failed to unmap volume %s from host %s: %w", targetLUN.Name, hostName, err)
}
klog.Infof("Successfully unmapped volume %s from initiator group %s", targetLUN.Name, hostName)
return nil
}
func (c *InfiniboxClonner) EnsureClonnerIgroup(initiatorGroup string, adapterIds []string) (populator.MappingContext, error) {
hosts, err := c.api.GetAllHosts()
if err != nil {
return nil, fmt.Errorf("failed to get all hosts: %w", err)
}
for _, host := range hosts {
for _, port := range host.Ports {
for _, adapterId := range adapterIds {
if port.Address == adapterId {
klog.Infof("Found host %s with adapter ID %s", host.Name, adapterId)
return createMappingContext(&host, initiatorGroup), nil
}
}
}
}
return nil, fmt.Errorf("no host found with adapter IDs %v", adapterIds)
}
func createMappingContext(host *iboxapi.Host, initiatorGroup string) populator.MappingContext {
return populator.MappingContext{
hostIDContextKey: host.ID,
esxLogicalHostNameKey: initiatorGroup,
esxRealHostNameKey: host.Name,
}
}
func NewInfiniboxClonner(hostname, username, password string, insecure bool) (InfiniboxClonner, error) {
// Create credentials
creds := iboxapi.Credentials{
Username: username,
Password: password,
URL: hostname,
}
// Create logger (using klog adapter)
logger := logr.Discard()
// Create InfiniBox client
client := iboxapi.NewIboxClient(logger, creds)
return InfiniboxClonner{
api: client,
}, nil
}
func (c *InfiniboxClonner) ResolvePVToLUN(pv populator.PersistentVolume) (populator.LUN, error) {
volumeAttributes := pv.VolumeAttributes
volumeName := volumeAttributes["Name"]
volume, err := c.api.GetVolumeByName(volumeName)
if err != nil {
return populator.LUN{}, fmt.Errorf("failed to get volume by name %s: %w", volumeName, err)
}
serial := volume.Serial
protocol := volumeAttributes["storage_protocol"]
protocolPrefix := ""
switch protocol {
case "iscsi":
protocolPrefix = "iqn"
default:
protocolPrefix = "naa"
}
IQN := fmt.Sprintf("%s.%s", protocolPrefix, serial)
NAA := fmt.Sprintf("naa.6%s", serial)
klog.Infof("Successfully resolved volume %s", volumeName)
lun := populator.LUN{
Name: volumeName,
LDeviceID: strconv.Itoa(volume.ID),
VolumeHandle: pv.VolumeHandle,
SerialNumber: serial,
IQN: IQN,
NAA: NAA,
}
return lun, nil
}
func (c *InfiniboxClonner) CurrentMappedGroups(targetLUN populator.LUN, mappingContext populator.MappingContext) ([]string, error) {
volumeID := targetLUN.LDeviceID
volumeIDInt, err := strconv.Atoi(volumeID)
if err != nil {
return nil, fmt.Errorf("invalid volume ID '%s', expected integer volume ID: %w", volumeID, err)
}
klog.Infof("Checking mappings for volume ID %d (LDeviceID: %s)", volumeIDInt, volumeID)
lunInfos, err := c.api.GetLunsByVolume(volumeIDInt)
if err != nil {
return nil, fmt.Errorf("failed to get LUN mappings for volume %s: %w", volumeID, err)
}
klog.Infof("Found %d LUN mappings for volume %s", len(lunInfos), volumeID)
if len(lunInfos) == 0 {
klog.Infof("Volume %s is not mapped to any hosts", volumeID)
return []string{}, nil
}
allHosts, err := c.api.GetAllHosts()
if err != nil {
return nil, fmt.Errorf("failed to get all hosts: %w", err)
}
hostByID := make(map[int]*iboxapi.Host)
for i := range allHosts {
hostByID[allHosts[i].ID] = &allHosts[i]
}
mappedHosts := make([]string, 0, len(lunInfos))
hostIDsProcessed := make(map[int]bool)
for _, lunInfo := range lunInfos {
if hostIDsProcessed[lunInfo.HostID] {
continue
}
if lunInfo.CLustered {
klog.Warningf("Volume %s is mapped to host cluster %d (cluster mappings not fully supported)",
volumeID, lunInfo.HostClusterID)
continue
}
host, exists := hostByID[lunInfo.HostID]
if !exists {
klog.Warningf("Failed to find host info for host ID %d", lunInfo.HostID)
continue
}
mappedHosts = append(mappedHosts, host.Name)
hostIDsProcessed[lunInfo.HostID] = true
if _, ok := mappingContext[ocpRealHostNameKey]; !ok {
mappingContext[ocpRealHostNameKey] = host.Name
klog.Infof("Volume %s is currently mapped to host: %s", volumeID, host.Name)
return mappedHosts, nil
}
klog.Infof("Volume %s is mapped to host %s (ID: %d) as LUN %d",
volumeID, host.Name, lunInfo.HostID, lunInfo.Lun)
}
if len(mappedHosts) == 0 {
return nil, fmt.Errorf("volume %s is not mapped to any host", volumeID)
}
return mappedHosts, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/fcutil/fcutil_test.go | cmd/vsphere-xcopy-volume-populator/internal/fcutil/fcutil_test.go | package fcutil
import (
"strings"
"testing"
)
func TestParseFCAdapter(t *testing.T) {
testCases := []struct {
name string
fcID string
expectedWWNN string
expectedWWPN string
expectError bool
errorContains string
}{
{
name: "valid FC adapter ID",
fcID: "fc.20000000C0A80ABC:21000000C0A80DEF",
expectedWWNN: "20000000C0A80ABC",
expectedWWPN: "21000000C0A80DEF",
expectError: false,
},
{
name: "valid with lowercase hex",
fcID: "fc.20000000c0a80abc:2a000000c0a80def",
expectedWWNN: "20000000C0A80ABC",
expectedWWPN: "2A000000C0A80DEF",
expectError: false,
},
{
name: "valid with mixed case",
fcID: "fc.AbCdEf0123456789:FeDcBa9876543210",
expectedWWNN: "ABCDEF0123456789",
expectedWWPN: "FEDCBA9876543210",
expectError: false,
},
{
name: "missing fc. prefix",
fcID: "20000000C0A80ABC:21000000C0A80DEF",
expectError: true,
errorContains: "doesn't start with 'fc.'",
},
{
name: "invalid prefix",
fcID: "f.20000000C0A80ABC:21000000C0A80DEF",
expectError: true,
errorContains: "doesn't start with 'fc.'",
},
{
name: "missing WWPN (no colon)",
fcID: "fc.20000000C0A80ABC",
expectError: true,
errorContains: "not in expected fc.WWNN:WWPN format",
},
{
name: "empty WWPN",
fcID: "fc.20000000C0A80ABC:",
expectError: true,
errorContains: "empty WWNN or WWPN",
},
{
name: "empty WWNN",
fcID: "fc.:21000000C0A80DEF",
expectError: true,
errorContains: "empty WWNN or WWPN",
},
{
name: "odd length WWNN",
fcID: "fc.200000000000001:21000000C0A80DEF",
expectError: true,
errorContains: "WWNN",
},
{
name: "odd length WWPN",
fcID: "fc.20000000C0A80ABC:210000000000001",
expectError: true,
errorContains: "WWPN",
},
{
name: "non-hex characters in WWNN",
fcID: "fc.2000000Z00000ABC:21000000C0A80DEF",
expectError: true,
errorContains: "non-hex",
},
{
name: "non-hex characters in WWPN",
fcID: "fc.20000000C0A80ABC:2100000G00000DEF",
expectError: true,
errorContains: "non-hex",
},
{
name: "empty string",
fcID: "",
expectError: true,
errorContains: "doesn't start with 'fc.'",
},
{
name: "multiple colons",
fcID: "fc.20:00:00:00:C0:A8:0A:BC:21:00:00:00:C0:A8:0D:EF",
expectError: true,
errorContains: "not in expected fc.WWNN:WWPN format",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
wwnn, wwpn, err := ParseFCAdapter(tc.fcID)
if tc.expectError {
if err == nil {
t.Errorf("expected an error but got none")
} else if !strings.Contains(err.Error(), tc.errorContains) {
t.Errorf("expected error to contain %q, but got %q", tc.errorContains, err.Error())
}
} else {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wwnn != tc.expectedWWNN {
t.Errorf("expected WWNN %q, but got %q", tc.expectedWWNN, wwnn)
}
if wwpn != tc.expectedWWPN {
t.Errorf("expected WWPN %q, but got %q", tc.expectedWWPN, wwpn)
}
}
})
}
}
func TestFormatWWNWithColons(t *testing.T) {
testCases := []struct {
name string
input string
expected string
}{
{
name: "16 character WWN",
input: "21000000C0A80DEF",
expected: "21:00:00:00:C0:A8:0D:EF", // NOSONAR
},
{
name: "different WWN",
input: "ABCDEF0123456789",
expected: "AB:CD:EF:01:23:45:67:89", // NOSONAR
},
{
name: "all zeros",
input: "0000000000000000",
expected: "00:00:00:00:00:00:00:00",
},
{
name: "odd length (edge case)",
input: "123456789",
expected: "12:34:56:78:9",
},
{
name: "single character",
input: "A",
expected: "A",
},
{
name: "empty string",
input: "",
expected: "",
},
{
name: "two characters",
input: "AB",
expected: "AB",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := FormatWWNWithColons(tc.input)
if result != tc.expected {
t.Errorf("expected %q, but got %q", tc.expected, result)
}
})
}
}
func TestNormalizeWWN(t *testing.T) {
testCases := []struct {
name string
input string
expected string
}{
{
name: "WWN with colons",
input: "21:00:00:00:C0:A8:0D:EF", // NOSONAR
expected: "21000000C0A80DEF",
},
{
name: "WWN with dashes",
input: "21-00-00-00-C0-A8-0D-EF", // NOSONAR
expected: "21000000C0A80DEF",
},
{
name: "WWN with spaces",
input: "21 00 00 00 C0 A8 0D EF", // NOSONAR
expected: "21000000C0A80DEF",
},
{
name: "WWN with mixed formatting",
input: "21:00-00 00:C0-A8 0D:EF", // NOSONAR
expected: "21000000C0A80DEF",
},
{
name: "lowercase input",
input: "abcdef0123456789",
expected: "ABCDEF0123456789",
},
{
name: "already normalized",
input: "21000000C0A80DEF",
expected: "21000000C0A80DEF",
},
{
name: "empty string",
input: "",
expected: "",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := NormalizeWWN(tc.input)
if result != tc.expected {
t.Errorf("expected %q, but got %q", tc.expected, result)
}
})
}
}
func TestExtractAndFormatWWPN(t *testing.T) {
testCases := []struct {
name string
fcID string
expected string
expectError bool
errorContains string
}{
{
name: "valid FC adapter ID",
fcID: "fc.20000000C0A80ABC:21000000C0A80DEF",
expected: "21:00:00:00:C0:A8:0D:EF", // NOSONAR
expectError: false,
},
{
name: "lowercase input",
fcID: "fc.20000000c0a80abc:abcdef0123456789",
expected: "AB:CD:EF:01:23:45:67:89", // NOSONAR
expectError: false,
},
{
name: "invalid format",
fcID: "fc.20000000C0A80ABC",
expectError: true,
errorContains: "not in expected",
},
{
name: "odd length WWPN",
fcID: "fc.20000000C0A80ABC:210000000000001",
expectError: true,
errorContains: "odd length",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result, err := ExtractAndFormatWWPN(tc.fcID)
if tc.expectError {
if err == nil {
t.Errorf("expected an error but got none")
} else if !strings.Contains(err.Error(), tc.errorContains) {
t.Errorf("expected error to contain %q, but got %q", tc.errorContains, err.Error())
}
} else {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if result != tc.expected {
t.Errorf("expected %q, but got %q", tc.expected, result)
}
}
})
}
}
func TestExtractWWPN(t *testing.T) {
testCases := []struct {
name string
fcID string
expected string
expectError bool
errorContains string
}{
{
name: "valid FC adapter ID",
fcID: "fc.20000000C0A80ABC:21000000C0A80DEF",
expected: "21000000C0A80DEF",
expectError: false,
},
{
name: "lowercase input becomes uppercase",
fcID: "fc.20000000c0a80abc:abcdef0123456789",
expected: "ABCDEF0123456789",
expectError: false,
},
{
name: "invalid format",
fcID: "20000000C0A80ABC:21000000C0A80DEF",
expectError: true,
errorContains: "doesn't start with 'fc.'",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result, err := ExtractWWPN(tc.fcID)
if tc.expectError {
if err == nil {
t.Errorf("expected an error but got none")
} else if !strings.Contains(err.Error(), tc.errorContains) {
t.Errorf("expected error to contain %q, but got %q", tc.errorContains, err.Error())
}
} else {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if result != tc.expected {
t.Errorf("expected %q, but got %q", tc.expected, result)
}
}
})
}
}
func TestCompareWWNs(t *testing.T) {
testCases := []struct {
name string
wwn1 string
wwn2 string
expected bool
}{
{
name: "identical formatted WWNs",
wwn1: "21:00:00:00:C0:A8:0D:EF", // NOSONAR
wwn2: "21:00:00:00:C0:A8:0D:EF", // NOSONAR
expected: true,
},
{
name: "formatted vs unformatted",
wwn1: "21:00:00:00:C0:A8:0D:EF", // NOSONAR
wwn2: "21000000C0A80DEF",
expected: true,
},
{
name: "colon vs dash formatting",
wwn1: "21:00:00:00:C0:A8:0D:EF", // NOSONAR
wwn2: "21-00-00-00-C0-A8-0D-EF",
expected: true,
},
{
name: "lowercase vs uppercase",
wwn1: "abcdef0123456789",
wwn2: "AB:CD:EF:01:23:45:67:89", // NOSONAR
expected: true,
},
{
name: "different WWNs",
wwn1: "21:00:00:00:C0:A8:0D:EF", // NOSONAR
wwn2: "21:00:00:00:C0:A8:0D:FF", // NOSONAR
expected: false,
},
{
name: "empty strings",
wwn1: "",
wwn2: "",
expected: true,
},
{
name: "one empty",
wwn1: "21000000C0A80DEF",
wwn2: "",
expected: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := CompareWWNs(tc.wwn1, tc.wwn2)
if result != tc.expected {
t.Errorf("expected %v, but got %v for comparing %q and %q",
tc.expected, result, tc.wwn1, tc.wwn2)
}
})
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/fcutil/fcutil.go | cmd/vsphere-xcopy-volume-populator/internal/fcutil/fcutil.go | package fcutil
import (
"fmt"
"regexp"
"strings"
)
// ParseFCAdapter parses an ESX FC adapter ID in format "fc.WWNN:WWPN"
// and returns the WWNN and WWPN separately (unformatted hex strings).
//
// Example:
//
// input: "fc.2000000000000001:2100000000000001"
// output: wwnn="2000000000000001", wwpn="2100000000000001", err=nil
//
// The returned WWNN and WWPN are uppercase hex strings without formatting.
func ParseFCAdapter(fcID string) (wwnn, wwpn string, err error) {
if !strings.HasPrefix(fcID, "fc.") {
return "", "", fmt.Errorf("FC adapter ID %q doesn't start with 'fc.'", fcID)
}
// Remove "fc." prefix and split by ":"
parts := strings.Split(fcID[3:], ":")
if len(parts) != 2 {
return "", "", fmt.Errorf("FC adapter ID %q is not in expected fc.WWNN:WWPN format", fcID)
}
wwnn = strings.ToUpper(parts[0])
wwpn = strings.ToUpper(parts[1])
if len(wwnn) == 0 || len(wwpn) == 0 {
return "", "", fmt.Errorf("FC adapter ID %q has empty WWNN or WWPN", fcID)
}
// Validate that WWN parts have even length (required for byte-pair formatting)
if len(wwnn)%2 != 0 {
return "", "", fmt.Errorf("WWNN %q has odd length", wwnn)
}
if len(wwpn)%2 != 0 {
return "", "", fmt.Errorf("WWPN %q has odd length", wwpn)
}
// Validate hex format
hexPattern := regexp.MustCompile(`^[0-9A-Fa-f]+$`)
if !hexPattern.MatchString(wwnn) {
return "", "", fmt.Errorf("WWNN %q contains non-hex characters", wwnn)
}
if !hexPattern.MatchString(wwpn) {
return "", "", fmt.Errorf("WWPN %q contains non-hex characters", wwpn)
}
return wwnn, wwpn, nil
}
// FormatWWNWithColons formats a WWN hex string by inserting colons every 2 characters.
//
// Example:
//
// input: "2100000000000001"
// output: "21:00:00:00:00:00:00:01"
//
// The input should be an uppercase hex string with even length.
// If the input has odd length, the last character will be in its own segment.
func FormatWWNWithColons(wwn string) string {
if len(wwn) == 0 {
return ""
}
formatted := make([]string, 0, (len(wwn)+1)/2)
for i := 0; i < len(wwn); i += 2 {
end := i + 2
if end > len(wwn) {
end = len(wwn)
}
formatted = append(formatted, wwn[i:end])
}
return strings.Join(formatted, ":")
}
// NormalizeWWN removes all formatting characters (colons, dashes, spaces) and uppercases.
// This is useful for comparing WWNs from different sources that may use different formatting.
//
// Example:
//
// input: "21:00:00:00:00:00:00:01"
// output: "2100000000000001"
//
// Example:
//
// input: "21-00-00-00-00-00-00-01"
// output: "2100000000000001"
func NormalizeWWN(wwn string) string {
cleaned := strings.ReplaceAll(wwn, ":", "")
cleaned = strings.ReplaceAll(cleaned, "-", "")
cleaned = strings.ReplaceAll(cleaned, " ", "")
return strings.ToUpper(cleaned)
}
// ExtractAndFormatWWPN is a convenience function that extracts the WWPN from an
// ESX FC adapter ID and formats it with colons.
//
// This is the most common operation needed by storage backends.
//
// Example:
//
// input: "fc.2000000000000001:2100000000000001"
// output: "21:00:00:00:00:00:00:01"
func ExtractAndFormatWWPN(fcID string) (string, error) {
_, wwpn, err := ParseFCAdapter(fcID)
if err != nil {
return "", err
}
return FormatWWNWithColons(wwpn), nil
}
// ExtractWWPN extracts the WWPN from an ESX FC adapter ID without formatting.
//
// Example:
//
// input: "fc.2000000000000001:2100000000000001"
// output: "2100000000000001"
func ExtractWWPN(fcID string) (string, error) {
_, wwpn, err := ParseFCAdapter(fcID)
return wwpn, err
}
// CompareWWNs compares two WWN strings, normalizing them first to ignore formatting differences.
// Returns true if the WWNs are equivalent.
//
// Example:
//
// CompareWWNs("21:00:00:00:00:00:00:01", "2100000000000001") // returns true
// CompareWWNs("21-00-00-00-00-00-00-01", "21:00:00:00:00:00:00:01") // returns true
func CompareWWNs(wwn1, wwn2 string) bool {
return NormalizeWWN(wwn1) == NormalizeWWN(wwn2)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/pure/flashArray.go | cmd/vsphere-xcopy-volume-populator/internal/pure/flashArray.go | package pure
import (
"context"
"errors"
"fmt"
"path/filepath"
"slices"
"strings"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/fcutil"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/vmware"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog/v2"
)
const FlashProviderID = "624a9370"
// Ensure FlashArrayClonner implements required interfaces
var _ populator.RDMCapable = &FlashArrayClonner{}
var _ populator.VVolCapable = &FlashArrayClonner{}
var _ populator.VMDKCapable = &FlashArrayClonner{}
type FlashArrayClonner struct {
restClient *RestClient
clusterPrefix string
}
const ClusterPrefixEnv = "PURE_CLUSTER_PREFIX"
const helpMessage = `clusterPrefix is missing and PURE_CLUSTER_PREFIX is not set.
Use this to extract the value:
printf "px_%s" $(oc get storagecluster -A -o=jsonpath='{.items[0].status.clusterUid}'| head -c 8)
`
// NewFlashArrayClonner creates a new FlashArrayClonner
// Authentication is mutually exclusive:
// - If apiToken is provided (non-empty), it will be used for authentication (username/password ignored)
// - If apiToken is empty, username and password will be used for authentication
func NewFlashArrayClonner(hostname, username, password, apiToken string, skipSSLVerification bool, clusterPrefix string) (FlashArrayClonner, error) {
if clusterPrefix == "" {
return FlashArrayClonner{}, errors.New(helpMessage)
}
// Create the REST client for all operations
restClient, err := NewRestClient(hostname, username, password, apiToken, skipSSLVerification)
if err != nil {
return FlashArrayClonner{}, fmt.Errorf("failed to create REST client: %w", err)
}
return FlashArrayClonner{
restClient: restClient,
clusterPrefix: clusterPrefix,
}, nil
}
// EnsureClonnerIgroup creates or updates an initiator group with the ESX adapters
// Named hgroup in flash terminology
func (f *FlashArrayClonner) EnsureClonnerIgroup(initiatorGroup string, esxAdapters []string) (populator.MappingContext, error) {
// pure does not allow a single host to connect to 2 separae groups. Hence
// we must connect map the volume to the host, and not to the group
hosts, err := f.restClient.ListHosts()
if err != nil {
return nil, err
}
for _, h := range hosts {
klog.Infof("checking host %s, iqns: %v, wwns: %v", h.Name, h.Iqn, h.Wwn)
for _, wwn := range h.Wwn {
for _, hostAdapter := range esxAdapters {
if !strings.HasPrefix(hostAdapter, "fc.") {
continue
}
adapterWWPN, err := fcUIDToWWPN(hostAdapter)
if err != nil {
klog.Warningf("failed to extract WWPN from adapter %s: %s", hostAdapter, err)
continue
}
// Compare WWNs using the utility function that normalizes formatting
klog.Infof("comparing ESX adapter WWPN %s with Pure host WWN %s", adapterWWPN, wwn)
if fcutil.CompareWWNs(adapterWWPN, wwn) {
klog.Infof("match found. Adding host %s to mapping context.", h.Name)
return populator.MappingContext{"hosts": []string{h.Name}}, nil
}
}
}
for _, iqn := range h.Iqn {
if slices.Contains(esxAdapters, iqn) {
klog.Infof("adding host to group %v", h.Name)
return populator.MappingContext{"hosts": []string{h.Name}}, nil
}
}
}
return nil, fmt.Errorf("no hosts found matching any of the provided IQNs/FC adapters: %v", esxAdapters)
}
// Map is responsible to mapping an initiator group to a populator.LUN
func (f *FlashArrayClonner) Map(
initatorGroup string,
targetLUN populator.LUN,
context populator.MappingContext) (populator.LUN, error) {
hosts, ok := context["hosts"]
if !ok {
return populator.LUN{}, fmt.Errorf("hosts not found in context")
}
hs, ok := hosts.([]string)
if !ok || len(hs) == 0 {
return populator.LUN{}, errors.New("invalid or empty hosts list in mapping context")
}
for _, host := range hs {
klog.Infof("connecting host %s to volume %s", host, targetLUN.Name)
err := f.restClient.ConnectHost(host, targetLUN.Name)
if err != nil {
if strings.Contains(err.Error(), "Connection already exists.") {
continue
}
return populator.LUN{}, fmt.Errorf("connect host %q to volume %q: %w", host, targetLUN.Name, err)
}
return targetLUN, nil
}
return populator.LUN{}, fmt.Errorf("connection failed for all hosts in context")
}
// UnMap is responsible to unmapping an initiator group from a populator.LUN
func (f *FlashArrayClonner) UnMap(initatorGroup string, targetLUN populator.LUN, context populator.MappingContext) error {
hosts, ok := context["hosts"]
if ok {
hs, ok := hosts.([]string)
if ok && len(hs) > 0 {
for _, host := range hs {
klog.Infof("disconnecting host %s from volume %s", host, targetLUN.Name)
err := f.restClient.DisconnectHost(host, targetLUN.Name)
if err != nil {
return err
}
}
}
}
return nil
}
// CurrentMappedGroups returns the initiator groups the populator.LUN is mapped to
func (f *FlashArrayClonner) CurrentMappedGroups(targetLUN populator.LUN, context populator.MappingContext) ([]string, error) {
// we don't use the host group feature, as a host in pure flasharray can not belong to two separate groups, and we
// definitely don't want to break host from their current groups. insted we'll just map/unmap the volume to individual hosts
return nil, nil
}
// ResolvePVToLUN resolves a PersistentVolume to Pure FlashArray LUN details
func (f *FlashArrayClonner) ResolvePVToLUN(pv populator.PersistentVolume) (populator.LUN, error) {
klog.Infof("Resolving target volume for PV %s", pv.Name)
volumeName := fmt.Sprintf("%s-%s", f.clusterPrefix, pv.Name)
klog.Infof("Target volume name: %s", volumeName)
v, err := f.restClient.GetVolume(volumeName)
if err != nil {
return populator.LUN{}, fmt.Errorf("failed to get target volume from Pure FlashArray: %w", err)
}
klog.Infof("volume %+v\n", v)
l := populator.LUN{Name: v.Name, SerialNumber: v.Serial, NAA: fmt.Sprintf("naa.%s%s", FlashProviderID, strings.ToLower(v.Serial))}
return l, nil
}
// fcUIDToWWPN extracts the WWPN (port name) from an ESXi fcUid string.
// The expected input is of the form: 'fc.WWNN:WWPN' where the WWNN and WWPN
// are not separated with columns every byte (2 hex chars) like 00:00:00:00:00:00:00:00
func fcUIDToWWPN(fcUid string) (string, error) {
return fcutil.ExtractAndFormatWWPN(fcUid)
}
// VvolCopy performs a direct copy operation using vSphere API to discover source volume
func (f *FlashArrayClonner) VvolCopy(vsphereClient vmware.Client, vmId string, sourceVMDKFile string, persistentVolume populator.PersistentVolume, progress chan<- uint64) error {
klog.Infof("Starting VVol copy operation for VM %s", vmId)
// Parse the VMDK path
vmDisk, err := populator.ParseVmdkPath(sourceVMDKFile)
if err != nil {
return fmt.Errorf("failed to parse VMDK path: %w", err)
}
// Resolve target volume details
targetLUN, err := f.ResolvePVToLUN(persistentVolume)
if err != nil {
return fmt.Errorf("failed to resolve target volume: %w", err)
}
// Try to get source volume from vSphere API
sourceVolume, err := f.getSourceVolume(vsphereClient, vmId, vmDisk)
if err != nil {
return fmt.Errorf("failed to get source volume from vSphere: %w", err)
}
klog.Infof("Copying from source volume %s to target volume %s", sourceVolume, targetLUN.Name)
// Perform the copy operation
err = f.performVolumeCopy(sourceVolume, targetLUN.Name, progress)
if err != nil {
return fmt.Errorf("copy operation failed: %w", err)
}
klog.Infof("VVol copy operation completed successfully")
return nil
}
// getSourceVolume find the Pure volume name for a VMDK
func (f *FlashArrayClonner) getSourceVolume(vsphereClient vmware.Client, vmId string, vmDisk populator.VMDisk) (string, error) {
ctx := context.Background()
// Get VM object from vSphere
finder := find.NewFinder(vsphereClient.(*vmware.VSphereClient).Client.Client, true)
vm, err := finder.VirtualMachine(ctx, vmId)
if err != nil {
return "", fmt.Errorf("failed to get VM: %w", err)
}
// Get VM hardware configuration
var vmObject mo.VirtualMachine
pc := property.DefaultCollector(vsphereClient.(*vmware.VSphereClient).Client.Client)
err = pc.RetrieveOne(ctx, vm.Reference(), []string{"config.hardware.device"}, &vmObject)
if err != nil {
return "", fmt.Errorf("failed to get VM hardware config: %w", err)
}
// Look through VM's virtual disks to find VVol backing
if vmObject.Config == nil || vmObject.Config.Hardware.Device == nil {
return "", fmt.Errorf("VM config or hardware devices not found")
}
for _, device := range vmObject.Config.Hardware.Device {
if disk, ok := device.(*types.VirtualDisk); ok {
if backing, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
// Check if this is a VVol backing and matches our target VMDK
if backing.BackingObjectId != "" && f.matchesVMDKPath(backing.FileName, vmDisk) {
klog.Infof("Found VVol backing for VMDK %s with ID %s", vmDisk.VmdkFile, backing.BackingObjectId)
// Use REST client to find the volume by VVol ID
volumeName, err := f.restClient.FindVolumeByVVolID(backing.BackingObjectId)
if err != nil {
klog.Warningf("Failed to find volume by VVol ID %s: %v", backing.BackingObjectId, err)
continue
}
return volumeName, nil
}
}
}
}
return "", fmt.Errorf("VVol backing for VMDK %s not found", vmDisk.VmdkFile)
}
// matchesVMDKPath checks if a vSphere VVol filename matches the target VMDK
func (f *FlashArrayClonner) matchesVMDKPath(fileName string, vmDisk populator.VMDisk) bool {
fileBase := filepath.Base(fileName)
targetBase := filepath.Base(vmDisk.VmdkFile)
return fileBase == targetBase
}
// RDMCopy performs a copy operation for RDM-backed disks using Pure FlashArray APIs
func (f *FlashArrayClonner) RDMCopy(vsphereClient vmware.Client, vmId string, sourceVMDKFile string, persistentVolume populator.PersistentVolume, progress chan<- uint64) error {
klog.Infof("Pure RDM Copy: Starting RDM copy operation for VM %s", vmId)
// Get disk backing info to find the RDM device
backing, err := vsphereClient.GetVMDiskBacking(context.Background(), vmId, sourceVMDKFile)
if err != nil {
return fmt.Errorf("failed to get RDM disk backing info: %w", err)
}
if !backing.IsRDM {
return fmt.Errorf("disk %s is not an RDM disk", sourceVMDKFile)
}
klog.Infof("Pure RDM Copy: Found RDM device: %s", backing.DeviceName)
// Resolve the source LUN from the RDM device name
sourceLUN, err := f.resolveRDMToLUN(backing.DeviceName)
if err != nil {
return fmt.Errorf("failed to resolve RDM device to source LUN: %w", err)
}
// Resolve the target PV to LUN
targetLUN, err := f.ResolvePVToLUN(persistentVolume)
if err != nil {
return fmt.Errorf("failed to resolve target volume: %w", err)
}
klog.Infof("Pure RDM Copy: Copying from source LUN %s to target LUN %s", sourceLUN.Name, targetLUN.Name)
// Report progress start
progress <- 10
// Perform the copy operation using Pure FlashArray API
err = f.restClient.CopyVolume(sourceLUN.Name, targetLUN.Name)
if err != nil {
return fmt.Errorf("Pure FlashArray CopyVolume failed: %w", err)
}
// Report progress complete
progress <- 100
klog.Infof("Pure RDM Copy: Copy operation completed successfully")
return nil
}
// resolveRDMToLUN resolves an RDM device name to a Pure FlashArray LUN
func (f *FlashArrayClonner) resolveRDMToLUN(deviceName string) (populator.LUN, error) {
klog.Infof("Pure RDM Copy: Resolving RDM device %s to LUN", deviceName)
// The device name from RDM typically contains the NAA identifier
// For Pure FlashArray, format is "naa.624a9370<serial>" where 624a9370 is the FlashProviderID
// We need to extract the serial number and find the corresponding LUN
// Extract serial number from NAA identifier
serial, err := extractSerialFromNAA(deviceName)
if err != nil {
// Try to find by listing all volumes and matching
klog.Warningf("Could not extract serial from NAA %s: %v, trying to find by listing volumes", deviceName, err)
return f.findVolumeByDeviceName(deviceName)
}
// Find volume by serial number
volume, err := f.restClient.FindVolumeBySerial(serial)
if err != nil {
return populator.LUN{}, fmt.Errorf("failed to find volume by serial %s: %w", serial, err)
}
klog.Infof("Pure RDM Copy: Found matching volume %s for device %s", volume.Name, deviceName)
return populator.LUN{
Name: volume.Name,
SerialNumber: volume.Serial,
NAA: fmt.Sprintf("naa.%s%s", FlashProviderID, strings.ToLower(volume.Serial)),
}, nil
}
// extractSerialFromNAA extracts the serial number from a NAA identifier
// NAA format for Pure: naa.624a9370<serial> where serial is the volume serial
func extractSerialFromNAA(naa string) (string, error) {
naa = strings.ToLower(naa)
// Remove "naa." prefix if present
naa = strings.TrimPrefix(naa, "naa.")
// Check if it starts with Pure's provider ID
providerIDLower := strings.ToLower(FlashProviderID)
if !strings.HasPrefix(naa, providerIDLower) {
return "", fmt.Errorf("NAA %s does not appear to be a Pure FlashArray device (expected prefix %s)", naa, FlashProviderID)
}
// Extract serial (everything after the provider ID)
serial := strings.TrimPrefix(naa, providerIDLower)
if serial == "" {
return "", fmt.Errorf("could not extract serial from NAA %s", naa)
}
return strings.ToUpper(serial), nil
}
// findVolumeByDeviceName finds a volume by searching through all volumes
func (f *FlashArrayClonner) findVolumeByDeviceName(deviceName string) (populator.LUN, error) {
// List all volumes and find the one matching the device name
volumes, err := f.restClient.ListVolumes()
if err != nil {
return populator.LUN{}, fmt.Errorf("failed to list volumes: %w", err)
}
deviceName = strings.ToLower(deviceName)
for _, volume := range volumes {
// Build the expected NAA for this volume
naa := fmt.Sprintf("naa.%s%s", FlashProviderID, strings.ToLower(volume.Serial))
// Compare with the device name
if strings.Contains(deviceName, strings.ToLower(volume.Serial)) ||
strings.Contains(deviceName, naa) ||
deviceName == naa {
klog.Infof("Pure RDM Copy: Found matching volume %s for device %s", volume.Name, deviceName)
return populator.LUN{
Name: volume.Name,
SerialNumber: volume.Serial,
NAA: fmt.Sprintf("naa.%s%s", FlashProviderID, strings.ToLower(volume.Serial)),
}, nil
}
}
return populator.LUN{}, fmt.Errorf("could not find volume matching RDM device %s", deviceName)
}
// performVolumeCopy executes the volume copy operation on Pure FlashArray
func (f *FlashArrayClonner) performVolumeCopy(sourceVolumeName, targetVolumeName string, progress chan<- uint64) error {
// Perform the copy operation using Pure FlashArray API
err := f.restClient.CopyVolume(sourceVolumeName, targetVolumeName)
if err != nil {
return fmt.Errorf("Pure FlashArray CopyVolume failed: %w", err)
}
progress <- 100
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/pure/flashArray_test.go | cmd/vsphere-xcopy-volume-populator/internal/pure/flashArray_test.go | package pure
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
)
func TestFcUIDToWWPN(t *testing.T) {
testCases := []struct {
name string
fcUid string
expectedWwpn string
expectError bool
errorContains string
}{
{
name: "valid fcUid",
fcUid: "fc.2020202020202020:2121212121212121",
expectedWwpn: "21:21:21:21:21:21:21:21",
expectError: false,
},
{
name: "missing WWPN",
fcUid: "fc.2020202020202020",
expectedWwpn: "",
expectError: true,
errorContains: "not in expected fc.WWNN:WWPN format",
},
{
name: "invalid prefix",
fcUid: "f.2020202020202020:2121212121212121",
expectedWwpn: "",
expectError: true,
errorContains: "doesn't start with 'fc.'",
},
{
name: "invalid format",
fcUid: "fc.2020202020202020:",
expectedWwpn: "",
expectError: true,
errorContains: "empty WWNN or WWPN",
},
{
name: "odd length wwpn",
fcUid: "fc.2020202020202020:12345",
expectedWwpn: "",
expectError: true,
errorContains: "odd length",
},
{
name: "lowercase input",
fcUid: "fc.2020202020202020:2a2b2c2d2e2f2021",
expectedWwpn: "2A:2B:2C:2D:2E:2F:20:21", // NOSONAR
expectError: false,
},
{
name: "empty string",
fcUid: "",
expectedWwpn: "",
expectError: true,
errorContains: "doesn't start with 'fc.'",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
wwpn, err := fcUIDToWWPN(tc.fcUid)
if tc.expectError {
if err == nil {
t.Errorf("expected an error but got none")
} else if !strings.Contains(err.Error(), tc.errorContains) {
t.Errorf("expected error to contain %q, but got %q", tc.errorContains, err.Error())
}
} else {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wwpn != tc.expectedWwpn {
t.Errorf("expected wwpn %q, but got %q", tc.expectedWwpn, wwpn)
}
}
})
}
}
func TestExtractSerialFromNAA(t *testing.T) {
testCases := []struct {
name string
naa string
expectedSerial string
expectError bool
errorContains string
}{
{
name: "valid NAA with naa. prefix",
naa: "naa.624a9370abcd1234efgh5678",
expectedSerial: "ABCD1234EFGH5678",
expectError: false,
},
{
name: "valid NAA without prefix",
naa: "624a9370abcd1234efgh5678",
expectedSerial: "ABCD1234EFGH5678",
expectError: false,
},
{
name: "uppercase NAA",
naa: "NAA.624A9370ABCD1234EFGH5678",
expectedSerial: "ABCD1234EFGH5678",
expectError: false,
},
{
name: "wrong provider ID",
naa: "naa.600a0980abcd1234efgh5678",
expectError: true,
errorContains: "does not appear to be a Pure FlashArray device",
},
{
name: "empty serial",
naa: "naa.624a9370",
expectError: true,
errorContains: "could not extract serial",
},
{
name: "empty string",
naa: "",
expectError: true,
errorContains: "does not appear to be a Pure FlashArray device",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
serial, err := extractSerialFromNAA(tc.naa)
if tc.expectError {
if err == nil {
t.Errorf("expected an error but got none")
} else if !strings.Contains(err.Error(), tc.errorContains) {
t.Errorf("expected error to contain %q, but got %q", tc.errorContains, err.Error())
}
} else {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if serial != tc.expectedSerial {
t.Errorf("expected serial %q, but got %q", tc.expectedSerial, serial)
}
}
})
}
}
// TestAuthenticationMethods tests the different authentication methods for Pure FlashArray
func TestAuthenticationMethods(t *testing.T) {
testCases := []struct {
name string
username string
password string
token string
setupMockServer func() *httptest.Server
expectError bool
errorContains string
expectedAuthMethod string // "token" or "username_password"
}{
{
name: "token-based authentication should skip username/password",
username: "",
password: "",
token: "test-api-token-12345",
setupMockServer: func() *httptest.Server {
return httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Mock API version endpoint
if strings.HasSuffix(r.URL.Path, "/api/api_version") {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"version": ["1.19", "2.4"]}`))
return
}
// Mock login endpoint for getting auth token
if strings.Contains(r.URL.Path, "/login") {
// Verify that api-token header is present
apiToken := r.Header.Get("api-token")
if apiToken != "test-api-token-12345" {
w.WriteHeader(http.StatusUnauthorized)
return
}
w.Header().Set("x-auth-token", "test-auth-token")
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{}`))
return
}
w.WriteHeader(http.StatusNotFound)
}))
},
expectError: false,
expectedAuthMethod: "token",
},
{
name: "username/password authentication should work when token is empty",
username: "testuser",
password: "testpass",
token: "",
setupMockServer: func() *httptest.Server {
return httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Mock API version endpoint
if strings.HasSuffix(r.URL.Path, "/api/api_version") {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"version": ["1.19", "2.4"]}`))
return
}
// Mock API token endpoint
if strings.Contains(r.URL.Path, "/auth/apitoken") {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"api_token": "obtained-api-token"}`))
return
}
// Mock login endpoint for getting auth token
if strings.Contains(r.URL.Path, "/login") {
w.Header().Set("x-auth-token", "test-auth-token")
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{}`))
return
}
w.WriteHeader(http.StatusNotFound)
}))
},
expectError: false,
expectedAuthMethod: "username_password",
},
{
name: "token takes precedence when both token and username/password are provided",
username: "testuser",
password: "testpass",
token: "test-api-token-precedence",
setupMockServer: func() *httptest.Server {
apiTokenCalled := false
return httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Mock API version endpoint
if strings.HasSuffix(r.URL.Path, "/api/api_version") {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"version": ["1.19", "2.4"]}`))
return
}
// Mock API token endpoint - should NOT be called when token is provided
if strings.Contains(r.URL.Path, "/auth/apitoken") {
apiTokenCalled = true
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(`{"error": "should not call this endpoint when token is provided"}`))
return
}
// Mock login endpoint for getting auth token
if strings.Contains(r.URL.Path, "/login") {
// Verify we're using the provided token, not obtaining a new one
if apiTokenCalled {
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("x-auth-token", "test-auth-token")
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{}`))
return
}
w.WriteHeader(http.StatusNotFound)
}))
},
expectError: false,
expectedAuthMethod: "token",
},
{
name: "invalid token should fail authentication",
username: "",
password: "",
token: "invalid-token",
setupMockServer: func() *httptest.Server {
return httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Mock API version endpoint
if strings.HasSuffix(r.URL.Path, "/api/api_version") {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"version": ["1.19", "2.4"]}`))
return
}
// Mock login endpoint - reject invalid token
if strings.Contains(r.URL.Path, "/login") {
w.WriteHeader(http.StatusUnauthorized)
w.Write([]byte(`{"error": "invalid token"}`))
return
}
w.WriteHeader(http.StatusNotFound)
}))
},
expectError: true,
errorContains: "failed to get auth token",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
server := tc.setupMockServer()
defer server.Close()
// Extract hostname from test server URL (remove https://)
hostname := strings.TrimPrefix(server.URL, "https://")
// Create REST client with test parameters
client, err := NewRestClient(hostname, tc.username, tc.password, tc.token, true)
if tc.expectError {
if err == nil {
t.Errorf("expected an error but got none")
} else if tc.errorContains != "" && !strings.Contains(err.Error(), tc.errorContains) {
t.Errorf("expected error to contain %q, but got %q", tc.errorContains, err.Error())
}
} else {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if client == nil {
t.Errorf("expected client to be created, but got nil")
}
}
})
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/pure/rest_client.go | cmd/vsphere-xcopy-volume-populator/internal/pure/rest_client.go | package pure
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
"k8s.io/klog/v2"
)
// RestClient provides REST API access to Pure FlashArray
type RestClient struct {
hostname string
httpClient *http.Client
apiToken string
authToken string
apiV1 string // Latest 1.x API version
apiV2 string // Latest 2.x API version
}
// APIVersionResponse represents the response from /api/api_version
type APIVersionResponse struct {
Version []string `json:"version"`
}
// APITokenRequest represents the request for getting an API token
type APITokenRequest struct {
Username string `json:"username"`
Password string `json:"password"`
}
// APITokenResponse represents the response containing the API token
type APITokenResponse struct {
APIToken string `json:"api_token"`
}
// VolumeTagItem represents a volume tag item from the tags API
type VolumeTagItem struct {
Namespace string `json:"namespace"`
Value string `json:"value"`
Resource struct {
Name string `json:"name"`
ID string `json:"id"`
} `json:"resource"`
Key string `json:"key"`
Copyable bool `json:"copyable"`
}
// VolumeTagsResponse represents the response from the volume tags API
type VolumeTagsResponse struct {
Items []VolumeTagItem `json:"items"`
ContinuationToken *string `json:"continuation_token"`
MoreItemsRemaining bool `json:"more_items_remaining"`
TotalItemCount *int `json:"total_item_count"`
}
// CopyVolumeRequest represents the request for copying a volume
type CopyVolumeRequest struct {
Source struct {
Name string `json:"name"`
} `json:"source"`
Names string `json:"names"` // Changed from []string to string
}
// Host represents a Pure FlashArray host
type Host struct {
Name string `json:"name"`
Iqn []string `json:"iqns"`
Wwn []string `json:"wwns"`
}
// Volume represents a Pure FlashArray volume
type Volume struct {
Name string `json:"name"`
Serial string `json:"serial"`
Size uint64 `json:"size"`
}
// HostsResponse represents the response from hosts API
type HostsResponse struct {
Items []Host `json:"items"`
}
// VolumesResponse represents the response from volumes API
type VolumesResponse struct {
Items []Volume `json:"items"`
}
// HostConnectionRequest represents a host connection request
type HostConnectionRequest struct {
HostNames string `json:"host_names"`
VolumeNames string `json:"volume_names"`
}
// NewRestClient creates a new REST client for Pure FlashArray
// If apiToken is provided (non-empty), it will be used directly, skipping username/password authentication
// If apiToken is empty, username and password will be used to obtain an API token
func NewRestClient(hostname, username, password, apiToken string, skipSSLVerify bool) (*RestClient, error) {
client := &RestClient{
hostname: hostname,
httpClient: &http.Client{
Timeout: 30 * time.Second,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: skipSSLVerify,
},
},
},
}
// Step 1: Detect available API versions
if err := client.detectAPIVersions(); err != nil {
return nil, fmt.Errorf("failed to detect API versions: %w", err)
}
// Step 2: Get API token - either use provided token or obtain via username/password
if apiToken != "" {
// Use provided API token directly
klog.Infof("Pure REST Client: Using provided API token for authentication")
client.apiToken = apiToken
} else {
// Get API token using latest 1.x API (only 1.x supports this)
klog.Infof("Pure REST Client: Using username/password authentication to obtain API token")
if err := client.getAPIToken(username, password); err != nil {
return nil, fmt.Errorf("failed to get API token: %w", err)
}
}
// Step 3: Get auth token using latest 2.x API
if err := client.getAuthToken(); err != nil {
return nil, fmt.Errorf("failed to get auth token: %w", err)
}
klog.Infof("Pure REST Client: Successfully initialized with API v%s (token)/v%s (operations)", client.apiV1, client.apiV2)
return client, nil
}
// detectAPIVersions detects available API versions and selects the latest 1.x and 2.x versions
func (c *RestClient) detectAPIVersions() error {
url := fmt.Sprintf("https://%s/api/api_version", c.hostname)
resp, err := c.httpClient.Get(url)
if err != nil {
return fmt.Errorf("failed to get API versions: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("API version request failed with status: %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read API version response: %w", err)
}
var apiResponse APIVersionResponse
if err := json.Unmarshal(body, &apiResponse); err != nil {
return fmt.Errorf("failed to parse API version response: %w", err)
}
// Find latest 1.x and 2.x versions
var v1Versions, v2Versions []string
for _, version := range apiResponse.Version {
if strings.HasPrefix(version, "1.") {
v1Versions = append(v1Versions, version)
} else if strings.HasPrefix(version, "2.") {
v2Versions = append(v2Versions, version)
}
}
if len(v1Versions) == 0 {
return fmt.Errorf("no API v1.x versions found")
}
if len(v2Versions) == 0 {
return fmt.Errorf("no API v2.x versions found")
}
// Sort to get the latest versions
sort.Slice(v1Versions, func(i, j int) bool {
return compareVersions(v1Versions[i], v1Versions[j]) > 0
})
sort.Slice(v2Versions, func(i, j int) bool {
return compareVersions(v2Versions[i], v2Versions[j]) > 0
})
c.apiV1 = v1Versions[0]
c.apiV2 = v2Versions[0]
klog.Infof("Pure REST Client: Using API versions v%s (token)/v%s (operations)", c.apiV1, c.apiV2)
return nil
}
// getAPIToken gets an API token using username/password via latest 1.x API
func (c *RestClient) getAPIToken(username, password string) error {
url := fmt.Sprintf("https://%s/api/%s/auth/apitoken", c.hostname, c.apiV1)
requestBody := APITokenRequest{
Username: username,
Password: password,
}
jsonBody, err := json.Marshal(requestBody)
if err != nil {
return fmt.Errorf("failed to marshal API token request: %w", err)
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody))
if err != nil {
return fmt.Errorf("failed to create API token request: %w", err)
}
req.Header.Set("Content-Type", "application/json; charset=utf-8")
req.Header.Set("Accept", "application/json")
resp, err := c.httpClient.Do(req)
if err != nil {
return fmt.Errorf("failed to send API token request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read API token response: %w", err)
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("API token request failed with status %d: %s", resp.StatusCode, string(body))
}
var tokenResponse APITokenResponse
if err := json.Unmarshal(body, &tokenResponse); err != nil {
return fmt.Errorf("failed to parse API token response: %w", err)
}
c.apiToken = tokenResponse.APIToken
return nil
}
// getAuthToken gets an authentication token using API token via latest 2.x API
func (c *RestClient) getAuthToken() error {
url := fmt.Sprintf("https://%s/api/%s/login", c.hostname, c.apiV2)
req, err := http.NewRequest("POST", url, nil)
if err != nil {
return fmt.Errorf("failed to create auth token request: %w", err)
}
req.Header.Set("api-token", c.apiToken)
resp, err := c.httpClient.Do(req)
if err != nil {
return fmt.Errorf("failed to send auth token request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read auth token response: %w", err)
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("auth token request failed with status %d: %s", resp.StatusCode, string(body))
}
authToken := resp.Header.Get("x-auth-token")
if authToken == "" {
return fmt.Errorf("no x-auth-token header in response")
}
c.authToken = authToken
return nil
}
// FindVolumeByVVolID finds a volume using its VVol ID via the tags API
func (c *RestClient) FindVolumeByVVolID(vvolID string) (string, error) {
filter := fmt.Sprintf("key='PURE_VVOL_ID' AND value='%s'", vvolID)
baseURL := fmt.Sprintf("https://%s/api/%s/volumes/tags", c.hostname, c.apiV2)
params := url.Values{}
params.Set("resource_destroyed", "False")
params.Set("namespaces", "vasa-integration.purestorage.com")
params.Set("filter", filter)
finalURL := baseURL + "?" + params.Encode()
req, err := http.NewRequest("GET", finalURL, nil)
if err != nil {
return "", fmt.Errorf("failed to create volume search request: %w", err)
}
req.Header.Set("x-auth-token", c.authToken)
resp, err := c.httpClient.Do(req)
if err != nil {
return "", fmt.Errorf("failed to send volume search request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("failed to read volume search response: %w", err)
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("volume search request failed with status %d: %s", resp.StatusCode, string(body))
}
var tagsResponse VolumeTagsResponse
if err := json.Unmarshal(body, &tagsResponse); err != nil {
return "", fmt.Errorf("failed to parse volume search response: %w", err)
}
if len(tagsResponse.Items) == 0 {
return "", fmt.Errorf("no volume found with VVol ID: %s", vvolID)
}
volumeName := tagsResponse.Items[0].Resource.Name
klog.Infof("Pure REST Client: Found volume %s for VVol ID %s", volumeName, vvolID)
return volumeName, nil
}
// CopyVolume copies a volume from source to target
func (c *RestClient) CopyVolume(sourceVolumeName, targetVolumeName string) error {
url := fmt.Sprintf("https://%s/api/%s/volumes?overwrite=true", c.hostname, c.apiV2)
requestBody := CopyVolumeRequest{
Names: targetVolumeName,
}
requestBody.Source.Name = sourceVolumeName
jsonBody, err := json.Marshal(requestBody)
if err != nil {
return fmt.Errorf("failed to marshal copy volume request: %w", err)
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody))
if err != nil {
return fmt.Errorf("failed to create copy volume request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("x-auth-token", c.authToken)
resp, err := c.httpClient.Do(req)
if err != nil {
return fmt.Errorf("failed to send copy volume request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read copy volume response: %w", err)
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
return fmt.Errorf("copy volume request failed with status %d: %s", resp.StatusCode, string(body))
}
klog.Infof("Pure REST Client: Successfully copied volume from %s to %s", sourceVolumeName, targetVolumeName)
return nil
}
// ListHosts lists all local hosts on the Pure FlashArray
func (c *RestClient) ListHosts() ([]Host, error) {
// Filter for local hosts only to avoid selecting remote hosts in active cluster setups
url := fmt.Sprintf("https://%s/api/%s/hosts?filter=is_local", c.hostname, c.apiV2)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("failed to create list hosts request: %w", err)
}
req.Header.Set("x-auth-token", c.authToken)
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to send list hosts request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read list hosts response: %w", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("list hosts request failed with status %d: %s", resp.StatusCode, string(body))
}
var hostsResponse HostsResponse
if err := json.Unmarshal(body, &hostsResponse); err != nil {
return nil, fmt.Errorf("failed to parse list hosts response: %w", err)
}
return hostsResponse.Items, nil
}
// ConnectHost connects a volume to a host
func (c *RestClient) ConnectHost(hostName, volumeName string) error {
url := fmt.Sprintf("https://%s/api/%s/connections", c.hostname, c.apiV2)
requestBody := HostConnectionRequest{
HostNames: hostName,
VolumeNames: volumeName,
}
jsonBody, err := json.Marshal(requestBody)
if err != nil {
return fmt.Errorf("failed to marshal connect host request: %w", err)
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody))
if err != nil {
return fmt.Errorf("failed to create connect host request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("x-auth-token", c.authToken)
resp, err := c.httpClient.Do(req)
if err != nil {
return fmt.Errorf("failed to send connect host request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read connect host response: %w", err)
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
return fmt.Errorf("connect host request failed with status %d: %s", resp.StatusCode, string(body))
}
return nil
}
// DisconnectHost disconnects a volume from a host
func (c *RestClient) DisconnectHost(hostName, volumeName string) error {
url := fmt.Sprintf("https://%s/api/%s/connections?host_names=%s&volume_names=%s", c.hostname, c.apiV2, hostName, volumeName)
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
return fmt.Errorf("failed to create disconnect host request: %w", err)
}
req.Header.Set("x-auth-token", c.authToken)
resp, err := c.httpClient.Do(req)
if err != nil {
return fmt.Errorf("failed to send disconnect host request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read disconnect host response: %w", err)
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return fmt.Errorf("disconnect host request failed with status %d: %s", resp.StatusCode, string(body))
}
return nil
}
// GetVolume gets information about a specific volume
func (c *RestClient) GetVolume(volumeName string) (*Volume, error) {
baseURL := fmt.Sprintf("https://%s/api/%s/volumes", c.hostname, c.apiV2)
params := url.Values{}
params.Set("names", volumeName)
finalURL := baseURL + "?" + params.Encode()
req, err := http.NewRequest("GET", finalURL, nil)
if err != nil {
return nil, fmt.Errorf("failed to create get volume request: %w", err)
}
req.Header.Set("x-auth-token", c.authToken)
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to send get volume request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read get volume response: %w", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("get volume request failed with status %d: %s", resp.StatusCode, string(body))
}
var volumesResponse VolumesResponse
if err := json.Unmarshal(body, &volumesResponse); err != nil {
return nil, fmt.Errorf("failed to parse get volume response: %w", err)
}
if len(volumesResponse.Items) == 0 {
return nil, fmt.Errorf("volume not found: %s", volumeName)
}
return &volumesResponse.Items[0], nil
}
// ListVolumes lists all volumes on the Pure FlashArray
func (c *RestClient) ListVolumes() ([]Volume, error) {
url := fmt.Sprintf("https://%s/api/%s/volumes", c.hostname, c.apiV2)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("failed to create list volumes request: %w", err)
}
req.Header.Set("x-auth-token", c.authToken)
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to send list volumes request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read list volumes response: %w", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("list volumes request failed with status %d: %s", resp.StatusCode, string(body))
}
var volumesResponse VolumesResponse
if err := json.Unmarshal(body, &volumesResponse); err != nil {
return nil, fmt.Errorf("failed to parse list volumes response: %w", err)
}
return volumesResponse.Items, nil
}
// FindVolumeBySerial finds a volume by its serial number
func (c *RestClient) FindVolumeBySerial(serial string) (*Volume, error) {
// Pure FlashArray API allows filtering by serial
baseURL := fmt.Sprintf("https://%s/api/%s/volumes", c.hostname, c.apiV2)
// Normalize serial to uppercase for comparison
serial = strings.ToUpper(serial)
params := url.Values{}
params.Set("filter", fmt.Sprintf("serial='%s'", serial))
finalURL := baseURL + "?" + params.Encode()
req, err := http.NewRequest("GET", finalURL, nil)
if err != nil {
return nil, fmt.Errorf("failed to create find volume request: %w", err)
}
req.Header.Set("x-auth-token", c.authToken)
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to send find volume request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read find volume response: %w", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("find volume request failed with status %d: %s", resp.StatusCode, string(body))
}
var volumesResponse VolumesResponse
if err := json.Unmarshal(body, &volumesResponse); err != nil {
return nil, fmt.Errorf("failed to parse find volume response: %w", err)
}
if len(volumesResponse.Items) == 0 {
return nil, fmt.Errorf("volume not found with serial: %s", serial)
}
klog.Infof("Pure REST Client: Found volume %s for serial %s", volumesResponse.Items[0].Name, serial)
return &volumesResponse.Items[0], nil
}
// compareVersions compares two version strings (e.g., "1.19" vs "1.2")
// Returns > 0 if v1 > v2, 0 if equal, < 0 if v1 < v2
func compareVersions(v1, v2 string) int {
parts1 := strings.Split(v1, ".")
parts2 := strings.Split(v2, ".")
maxLen := len(parts1)
if len(parts2) > maxLen {
maxLen = len(parts2)
}
for i := 0; i < maxLen; i++ {
var p1, p2 int
if i < len(parts1) {
p1, _ = strconv.Atoi(parts1[i])
}
if i < len(parts2) {
p2, _ = strconv.Atoi(parts2[i])
}
if p1 != p2 {
return p1 - p2
}
}
return 0
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/powermax/powermax.go | cmd/vsphere-xcopy-volume-populator/internal/powermax/powermax.go | package powermax
//go:generate mockgen -destination=mock_powermax_client_test.go -package=powermax github.com/dell/gopowermax/v2 Pmax
import (
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"os"
"slices"
"strings"
gopowermax "github.com/dell/gopowermax/v2"
pmxtypes "github.com/dell/gopowermax/v2/types/v100"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
"k8s.io/klog/v2"
)
type PowermaxClonner struct {
client gopowermax.Pmax
symmetrixID string
portGroup string
initiatorID string
storageGroupID string
hostID string
maskingViewID string
}
// CurrentMappedGroups implements populator.StorageApi.
func (p *PowermaxClonner) CurrentMappedGroups(targetLUN populator.LUN, mappingContext populator.MappingContext) ([]string, error) {
ctx := context.TODO()
volume, err := p.client.GetVolumeByID(ctx, p.symmetrixID, targetLUN.ProviderID)
if err != nil {
return nil, fmt.Errorf("Error getting volume %s: %v", targetLUN.ProviderID, err)
}
if len(volume.StorageGroups) == 0 {
return nil, fmt.Errorf("Volume %s is not associated with any Storage Group.\n", targetLUN.ProviderID)
}
klog.Infof("Volume %s is in Storage Group(s): %v\n", targetLUN.ProviderID, volume.StorageGroups)
foundHostGroups := []string{}
for _, sgID := range volume.StorageGroups {
foundHostGroups = append(foundHostGroups, sgID.StorageGroupName)
maskingViewList, err := p.client.GetMaskingViewList(ctx, p.symmetrixID)
if err != nil {
klog.Infof("Error getting masking views for Storage Group %s: %v", sgID, err)
continue
}
if len(maskingViewList.MaskingViewIDs) == 0 {
klog.Infof("No masking views found for Storage Group %s.\n", sgID)
continue
}
// Step 3: Get details of each Masking View to find the Host Group
for _, mvID := range maskingViewList.MaskingViewIDs {
maskingView, err := p.client.GetMaskingViewByID(ctx, p.symmetrixID, mvID)
if err != nil {
klog.Errorf("Error getting masking view %s: %v", mvID, err)
continue
}
if maskingView.HostID != "" {
// This masking view is directly mapped to a Host, not a Host Group
klog.Infof("Volume %s is mapped via Masking View %s to Host: %s\n", targetLUN.ProviderID, mvID, maskingView.HostID)
foundHostGroups = append(foundHostGroups, maskingView.HostID)
} else if maskingView.HostGroupID != "" {
// This masking view is mapped to a Host Group
klog.Infof("Volume %s is mapped via Masking View %s to Host Group: %s\n", targetLUN.ProviderID, mvID, maskingView.HostGroupID)
foundHostGroups = append(foundHostGroups, maskingView.HostGroupID)
}
}
}
if len(foundHostGroups) > 0 {
klog.Info("Unique Host Groups found for the volume:")
for _, hg := range foundHostGroups {
klog.Infof("- %s", hg)
}
} else {
klog.Info("No host groups found for the volume.")
}
return foundHostGroups, nil
}
// EnsureClonnerIgroup implements populator.StorageApi.
func (p *PowermaxClonner) EnsureClonnerIgroup(_ string, clonnerIqn []string) (populator.MappingContext, error) {
ctx := context.TODO()
randomString, err := generateRandomString(4)
if err != nil {
return nil, err
}
p.initiatorID = fmt.Sprintf("xcopy-%s", randomString)
klog.Infof("Generated unique initiator group name: %s", p.initiatorID)
// steps:
// 1.create the storage group
// 2. create a masking view, add the storage group to it - name it with the same name
// 3. create InitiatorGroup on the masking view
// 4. add clonnerIqn to that initiar group
// 5. add port group with protocol type that match the cloner IQN type, only if they all online
p.storageGroupID = fmt.Sprintf("%s-SG", p.initiatorID)
klog.Infof("ensuring storage group %s exists with hosts %v", p.storageGroupID, clonnerIqn)
_, err = p.client.GetStorageGroup(ctx, p.symmetrixID, p.storageGroupID)
if err == nil {
klog.Infof("group %s exists", p.storageGroupID)
}
if e, ok := err.(*pmxtypes.Error); ok && e.HTTPStatusCode == 404 {
klog.Infof("group %s doesn't exist - create it", p.storageGroupID)
_, err := p.client.CreateStorageGroup(ctx, p.symmetrixID, p.storageGroupID, "none", "", true, nil)
if err != nil {
klog.Errorf("failed to create group %v ", err)
return nil, err
}
}
klog.Infof("storage group %s", p.storageGroupID)
// Fetch port group to determine protocol type
portGroup, err := p.client.GetPortGroupByID(ctx, p.symmetrixID, p.portGroup)
if err != nil {
return nil, fmt.Errorf("failed to get port group %s: %w", p.portGroup, err)
}
klog.Infof("port group %s has protocol: %s", p.portGroup, portGroup.PortGroupProtocol)
// Filter initiators based on port group protocol
filteredInitiators := filterInitiatorsByProtocol(clonnerIqn, portGroup.PortGroupProtocol)
if len(filteredInitiators) == 0 {
return nil, fmt.Errorf("no initiators matching protocol %s found in %v", portGroup.PortGroupProtocol, clonnerIqn)
}
klog.Infof("filtered initiators for protocol %s: %v", portGroup.PortGroupProtocol, filteredInitiators)
hosts, err := p.client.GetHostList(ctx, p.symmetrixID)
h:
for _, hostId := range hosts.HostIDs {
host, err := p.client.GetHostByID(ctx, p.symmetrixID, hostId)
if err != nil {
return nil, err
}
klog.Infof("host ID %s and initiators %v", host.HostID, host.Initiators)
for _, initiator := range host.Initiators {
for _, filteredInit := range filteredInitiators {
if strings.HasSuffix(filteredInit, initiator) {
p.hostID = hostId
break h
}
}
}
}
if p.hostID != "" {
klog.Infof("found host ID %s matching protocol %s", p.hostID, portGroup.PortGroupProtocol)
} else {
klog.Infof("cannot find host matching filtered initiators %v", filteredInitiators)
}
klog.Infof("port group ID %s", p.portGroup)
mappingContext := map[string]any{}
return mappingContext, err
}
// Map implements populator.StorageApi.
func (p *PowermaxClonner) Map(_ string, targetLUN populator.LUN, mappingContext populator.MappingContext) (populator.LUN, error) {
klog.Infof("mapping volume %s to %s", targetLUN.ProviderID, p.storageGroupID)
ctx := context.TODO()
volumesMapped, err := p.client.GetVolumeIDListInStorageGroup(ctx, p.symmetrixID, p.storageGroupID)
if err != nil {
return targetLUN, err
}
if slices.Contains(volumesMapped, targetLUN.ProviderID) {
klog.Infof("volume %s already mapped to storage-group %s", targetLUN.ProviderID, p.storageGroupID)
return targetLUN, nil
}
err = p.client.AddVolumesToStorageGroupS(ctx, p.symmetrixID, p.storageGroupID, false, targetLUN.ProviderID)
if err != nil {
klog.Infof("failed mapping volume %s to %s: %v", targetLUN.ProviderID, p.storageGroupID, err)
return targetLUN, err
}
mv, err := p.client.GetMaskingViewByID(ctx, p.symmetrixID, p.initiatorID)
if err != nil {
// probably not found, will be created later
if e, ok := err.(*pmxtypes.Error); ok && e.HTTPStatusCode == 404 {
klog.Infof("masking view not found %s ", e)
} else {
return populator.LUN{}, err
}
}
if mv == nil {
mv, err = p.client.CreateMaskingView(ctx, p.symmetrixID, p.initiatorID, p.storageGroupID, p.hostID, false, p.portGroup)
if err != nil {
return populator.LUN{}, err
}
}
klog.Infof("successfully mapped volume %s to %s with masking view %s", targetLUN.ProviderID, p.initiatorID, mv.MaskingViewID)
p.maskingViewID = mv.MaskingViewID
return targetLUN, err
}
// ResolvePVToLUN implements populator.StorageApi.
func (p *PowermaxClonner) ResolvePVToLUN(pv populator.PersistentVolume) (populator.LUN, error) {
ctx := context.TODO()
volID := pv.VolumeHandle[strings.LastIndex(pv.VolumeHandle, "-")+1:]
volume, err := p.client.GetVolumeByID(ctx, p.symmetrixID, volID)
if err != nil || volume.VolumeID == "" {
return populator.LUN{}, fmt.Errorf("failed getting details for volume %v: %v", volume, err)
}
naa := fmt.Sprintf("naa.%s", volume.WWN)
return populator.LUN{Name: volume.VolumeIdentifier, ProviderID: volume.VolumeID, NAA: naa}, nil
}
// UnMap implements populator.StorageApi.
func (p *PowermaxClonner) UnMap(_ string, targetLUN populator.LUN, mappingContext populator.MappingContext) error {
ctx := context.TODO()
cleanup, ok := mappingContext[populator.CleanupXcopyInitiatorGroup]
if ok && cleanup.(bool) {
klog.Infof("deleting masking view %s", p.maskingViewID)
err := p.client.DeleteMaskingView(ctx, p.symmetrixID, p.maskingViewID)
if err != nil {
return fmt.Errorf("failed to delete masking view: %w", err)
}
klog.Infof("removing volume ID %s from storage group %s", targetLUN.ProviderID, p.storageGroupID)
_, err = p.client.RemoveVolumesFromStorageGroup(ctx, p.symmetrixID, p.storageGroupID, false, targetLUN.ProviderID)
if err != nil {
return fmt.Errorf("failed removing volume from storage group: %w", err)
}
klog.Infof("deleting storage group %s", p.storageGroupID)
err = p.client.DeleteStorageGroup(ctx, p.symmetrixID, p.storageGroupID)
if err != nil {
return fmt.Errorf("failed to delete storage group: %w", err)
}
return nil
}
klog.Infof("removing volume ID %s from storage group %s", targetLUN.ProviderID, p.storageGroupID)
_, err := p.client.RemoveVolumesFromStorageGroup(ctx, p.symmetrixID, p.storageGroupID, false, targetLUN.ProviderID)
if err != nil {
return fmt.Errorf("failed removing volume from storage group: %w", err)
}
return nil
}
var newClientWithArgs = gopowermax.NewClientWithArgs
func NewPowermaxClonner(hostname, username, password string, sslSkipVerify bool) (PowermaxClonner, error) {
symID := os.Getenv("POWERMAX_SYMMETRIX_ID")
if symID == "" {
return PowermaxClonner{}, fmt.Errorf("Please set POWERMAX_SYMMETRIX_ID in the pod environment or in the secret" +
" attached to the relevant storage map")
}
portGroup := os.Getenv("POWERMAX_PORT_GROUP_NAME")
if portGroup == "" {
return PowermaxClonner{}, fmt.Errorf("Please set POWERMAX_PORT_GROUP_NAME in the pod environment or in the secret" +
" attached to the relevant storage map")
}
// using the same application name as the driver
applicationName := "csi"
client, err := newClientWithArgs(
hostname,
applicationName,
sslSkipVerify,
false,
"")
if err != nil {
return PowermaxClonner{}, err
}
c := gopowermax.ConfigConnect{
Endpoint: hostname,
Version: "",
Username: username,
Password: password,
}
err = client.Authenticate(context.TODO(), &c)
if err != nil {
return PowermaxClonner{}, err
}
klog.Info("successfuly logged in to PowerMax")
return PowermaxClonner{client: client, symmetrixID: symID, portGroup: portGroup}, nil
}
func generateRandomString(length int) (string, error) {
bytes := make([]byte, length)
if _, err := rand.Read(bytes); err != nil {
return "", err
}
return hex.EncodeToString(bytes), nil
}
// filterInitiatorsByProtocol filters the initiator list based on the port group protocol
// iSCSI protocol requires IQN format initiators (e.g., "iqn.1994-05.com.redhat:...")
// SCSI_FC protocol requires FC WWN format initiators (e.g., "10000000c9a12345:10000000c9a12346")
func filterInitiatorsByProtocol(initiators []string, protocol string) []string {
var filtered []string
for _, initiator := range initiators {
switch protocol {
case "iSCSI":
// iSCSI initiators start with "iqn."
if strings.HasPrefix(strings.ToLower(initiator), "iqn.") {
filtered = append(filtered, initiator)
}
case "SCSI_FC":
// FC initiators are in WWNN:WWPN format (hex pairs separated by colon)
// They don't start with "iqn." and typically contain colons
if !strings.HasPrefix(strings.ToLower(initiator), "iqn.") && strings.Contains(initiator, ":") {
filtered = append(filtered, initiator)
}
default:
klog.Warningf("Unknown protocol %s, skipping initiator filtering", protocol)
// For unknown protocols, return all initiators
return initiators
}
}
return filtered
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/powermax/mock_powermax_client_test.go | cmd/vsphere-xcopy-volume-populator/internal/powermax/mock_powermax_client_test.go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/dell/gopowermax/v2 (interfaces: Pmax)
//
// Generated by this command:
//
// mockgen -destination=mock_powermax_client_test.go -package=powermax github.com/dell/gopowermax/v2 Pmax
//
// Package powermax is a generated GoMock package.
package powermax
import (
context "context"
http "net/http"
reflect "reflect"
pmax "github.com/dell/gopowermax/v2"
v100 "github.com/dell/gopowermax/v2/types/v100"
gomock "go.uber.org/mock/gomock"
)
// MockPmax is a mock of Pmax interface.
type MockPmax struct {
ctrl *gomock.Controller
recorder *MockPmaxMockRecorder
isgomock struct{}
}
// MockPmaxMockRecorder is the mock recorder for MockPmax.
type MockPmaxMockRecorder struct {
mock *MockPmax
}
// NewMockPmax creates a new mock instance.
func NewMockPmax(ctrl *gomock.Controller) *MockPmax {
mock := &MockPmax{ctrl: ctrl}
mock.recorder = &MockPmaxMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockPmax) EXPECT() *MockPmaxMockRecorder {
return m.recorder
}
// AddVolumesToProtectedStorageGroup mocks base method.
func (m *MockPmax) AddVolumesToProtectedStorageGroup(ctx context.Context, symID, storageGroupID, remoteSymID, remoteStorageGroupID string, force bool, volumeIDs ...string) error {
m.ctrl.T.Helper()
varargs := []any{ctx, symID, storageGroupID, remoteSymID, remoteStorageGroupID, force}
for _, a := range volumeIDs {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "AddVolumesToProtectedStorageGroup", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// AddVolumesToProtectedStorageGroup indicates an expected call of AddVolumesToProtectedStorageGroup.
func (mr *MockPmaxMockRecorder) AddVolumesToProtectedStorageGroup(ctx, symID, storageGroupID, remoteSymID, remoteStorageGroupID, force any, volumeIDs ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, symID, storageGroupID, remoteSymID, remoteStorageGroupID, force}, volumeIDs...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddVolumesToProtectedStorageGroup", reflect.TypeOf((*MockPmax)(nil).AddVolumesToProtectedStorageGroup), varargs...)
}
// AddVolumesToStorageGroup mocks base method.
func (m *MockPmax) AddVolumesToStorageGroup(ctx context.Context, symID, storageGroupID string, force bool, volumeIDs ...string) error {
m.ctrl.T.Helper()
varargs := []any{ctx, symID, storageGroupID, force}
for _, a := range volumeIDs {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "AddVolumesToStorageGroup", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// AddVolumesToStorageGroup indicates an expected call of AddVolumesToStorageGroup.
func (mr *MockPmaxMockRecorder) AddVolumesToStorageGroup(ctx, symID, storageGroupID, force any, volumeIDs ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, symID, storageGroupID, force}, volumeIDs...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddVolumesToStorageGroup", reflect.TypeOf((*MockPmax)(nil).AddVolumesToStorageGroup), varargs...)
}
// AddVolumesToStorageGroupS mocks base method.
func (m *MockPmax) AddVolumesToStorageGroupS(ctx context.Context, symID, storageGroupID string, force bool, volumeIDs ...string) error {
m.ctrl.T.Helper()
varargs := []any{ctx, symID, storageGroupID, force}
for _, a := range volumeIDs {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "AddVolumesToStorageGroupS", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// AddVolumesToStorageGroupS indicates an expected call of AddVolumesToStorageGroupS.
func (mr *MockPmaxMockRecorder) AddVolumesToStorageGroupS(ctx, symID, storageGroupID, force any, volumeIDs ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, symID, storageGroupID, force}, volumeIDs...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddVolumesToStorageGroupS", reflect.TypeOf((*MockPmax)(nil).AddVolumesToStorageGroupS), varargs...)
}
// Authenticate mocks base method.
func (m *MockPmax) Authenticate(ctx context.Context, configConnect *pmax.ConfigConnect) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Authenticate", ctx, configConnect)
ret0, _ := ret[0].(error)
return ret0
}
// Authenticate indicates an expected call of Authenticate.
func (mr *MockPmaxMockRecorder) Authenticate(ctx, configConnect any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Authenticate", reflect.TypeOf((*MockPmax)(nil).Authenticate), ctx, configConnect)
}
// CreateFileSystem mocks base method.
func (m *MockPmax) CreateFileSystem(ctx context.Context, symID, name, nasServer, serviceLevel string, sizeInMiB int64) (*v100.FileSystem, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateFileSystem", ctx, symID, name, nasServer, serviceLevel, sizeInMiB)
ret0, _ := ret[0].(*v100.FileSystem)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateFileSystem indicates an expected call of CreateFileSystem.
func (mr *MockPmaxMockRecorder) CreateFileSystem(ctx, symID, name, nasServer, serviceLevel, sizeInMiB any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateFileSystem", reflect.TypeOf((*MockPmax)(nil).CreateFileSystem), ctx, symID, name, nasServer, serviceLevel, sizeInMiB)
}
// CreateHost mocks base method.
func (m *MockPmax) CreateHost(ctx context.Context, symID, hostID string, initiatorIDs []string, hostFlags *v100.HostFlags) (*v100.Host, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateHost", ctx, symID, hostID, initiatorIDs, hostFlags)
ret0, _ := ret[0].(*v100.Host)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateHost indicates an expected call of CreateHost.
func (mr *MockPmaxMockRecorder) CreateHost(ctx, symID, hostID, initiatorIDs, hostFlags any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateHost", reflect.TypeOf((*MockPmax)(nil).CreateHost), ctx, symID, hostID, initiatorIDs, hostFlags)
}
// CreateHostGroup mocks base method.
func (m *MockPmax) CreateHostGroup(ctx context.Context, symID, hostGroupID string, hostIDs []string, hostFlags *v100.HostFlags) (*v100.HostGroup, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateHostGroup", ctx, symID, hostGroupID, hostIDs, hostFlags)
ret0, _ := ret[0].(*v100.HostGroup)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateHostGroup indicates an expected call of CreateHostGroup.
func (mr *MockPmaxMockRecorder) CreateHostGroup(ctx, symID, hostGroupID, hostIDs, hostFlags any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateHostGroup", reflect.TypeOf((*MockPmax)(nil).CreateHostGroup), ctx, symID, hostGroupID, hostIDs, hostFlags)
}
// CreateMaskingView mocks base method.
func (m *MockPmax) CreateMaskingView(ctx context.Context, symID, maskingViewID, storageGroupID, hostOrhostGroupID string, isHost bool, portGroupID string) (*v100.MaskingView, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateMaskingView", ctx, symID, maskingViewID, storageGroupID, hostOrhostGroupID, isHost, portGroupID)
ret0, _ := ret[0].(*v100.MaskingView)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateMaskingView indicates an expected call of CreateMaskingView.
func (mr *MockPmaxMockRecorder) CreateMaskingView(ctx, symID, maskingViewID, storageGroupID, hostOrhostGroupID, isHost, portGroupID any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMaskingView", reflect.TypeOf((*MockPmax)(nil).CreateMaskingView), ctx, symID, maskingViewID, storageGroupID, hostOrhostGroupID, isHost, portGroupID)
}
// CreateMigrationEnvironment mocks base method.
func (m *MockPmax) CreateMigrationEnvironment(ctx context.Context, sourceSymID, remoteSymID string) (*v100.MigrationEnv, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateMigrationEnvironment", ctx, sourceSymID, remoteSymID)
ret0, _ := ret[0].(*v100.MigrationEnv)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateMigrationEnvironment indicates an expected call of CreateMigrationEnvironment.
func (mr *MockPmaxMockRecorder) CreateMigrationEnvironment(ctx, sourceSymID, remoteSymID any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMigrationEnvironment", reflect.TypeOf((*MockPmax)(nil).CreateMigrationEnvironment), ctx, sourceSymID, remoteSymID)
}
// CreateNFSExport mocks base method.
func (m *MockPmax) CreateNFSExport(ctx context.Context, symID string, createNFSExportPayload v100.CreateNFSExport) (*v100.NFSExport, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateNFSExport", ctx, symID, createNFSExportPayload)
ret0, _ := ret[0].(*v100.NFSExport)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateNFSExport indicates an expected call of CreateNFSExport.
func (mr *MockPmaxMockRecorder) CreateNFSExport(ctx, symID, createNFSExportPayload any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNFSExport", reflect.TypeOf((*MockPmax)(nil).CreateNFSExport), ctx, symID, createNFSExportPayload)
}
// CreatePortGroup mocks base method.
func (m *MockPmax) CreatePortGroup(ctx context.Context, symID, portGroupID string, dirPorts []v100.PortKey, protocol string) (*v100.PortGroup, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreatePortGroup", ctx, symID, portGroupID, dirPorts, protocol)
ret0, _ := ret[0].(*v100.PortGroup)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreatePortGroup indicates an expected call of CreatePortGroup.
func (mr *MockPmaxMockRecorder) CreatePortGroup(ctx, symID, portGroupID, dirPorts, protocol any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePortGroup", reflect.TypeOf((*MockPmax)(nil).CreatePortGroup), ctx, symID, portGroupID, dirPorts, protocol)
}
// CreateRDFPair mocks base method.
func (m *MockPmax) CreateRDFPair(ctx context.Context, symID, rdfGroupNo, deviceID, rdfMode, rdfType string, establish, exemptConsistency bool) (*v100.RDFDevicePairList, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateRDFPair", ctx, symID, rdfGroupNo, deviceID, rdfMode, rdfType, establish, exemptConsistency)
ret0, _ := ret[0].(*v100.RDFDevicePairList)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateRDFPair indicates an expected call of CreateRDFPair.
func (mr *MockPmaxMockRecorder) CreateRDFPair(ctx, symID, rdfGroupNo, deviceID, rdfMode, rdfType, establish, exemptConsistency any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRDFPair", reflect.TypeOf((*MockPmax)(nil).CreateRDFPair), ctx, symID, rdfGroupNo, deviceID, rdfMode, rdfType, establish, exemptConsistency)
}
// CreateSGMigration mocks base method.
func (m *MockPmax) CreateSGMigration(ctx context.Context, localSymID, remoteSymID, storageGroup string) (*v100.MigrationSession, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateSGMigration", ctx, localSymID, remoteSymID, storageGroup)
ret0, _ := ret[0].(*v100.MigrationSession)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateSGMigration indicates an expected call of CreateSGMigration.
func (mr *MockPmaxMockRecorder) CreateSGMigration(ctx, localSymID, remoteSymID, storageGroup any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSGMigration", reflect.TypeOf((*MockPmax)(nil).CreateSGMigration), ctx, localSymID, remoteSymID, storageGroup)
}
// CreateSGReplica mocks base method.
func (m *MockPmax) CreateSGReplica(ctx context.Context, symID, remoteSymID, rdfMode, rdfGroupNo, sourceSG, remoteSGName, remoteServiceLevel string, bias bool) (*v100.SGRDFInfo, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateSGReplica", ctx, symID, remoteSymID, rdfMode, rdfGroupNo, sourceSG, remoteSGName, remoteServiceLevel, bias)
ret0, _ := ret[0].(*v100.SGRDFInfo)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateSGReplica indicates an expected call of CreateSGReplica.
func (mr *MockPmaxMockRecorder) CreateSGReplica(ctx, symID, remoteSymID, rdfMode, rdfGroupNo, sourceSG, remoteSGName, remoteServiceLevel, bias any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSGReplica", reflect.TypeOf((*MockPmax)(nil).CreateSGReplica), ctx, symID, remoteSymID, rdfMode, rdfGroupNo, sourceSG, remoteSGName, remoteServiceLevel, bias)
}
// CreateSnapshot mocks base method.
func (m *MockPmax) CreateSnapshot(ctx context.Context, symID, SnapID string, sourceVolumeList []v100.VolumeList, ttl int64) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateSnapshot", ctx, symID, SnapID, sourceVolumeList, ttl)
ret0, _ := ret[0].(error)
return ret0
}
// CreateSnapshot indicates an expected call of CreateSnapshot.
func (mr *MockPmaxMockRecorder) CreateSnapshot(ctx, symID, SnapID, sourceVolumeList, ttl any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSnapshot", reflect.TypeOf((*MockPmax)(nil).CreateSnapshot), ctx, symID, SnapID, sourceVolumeList, ttl)
}
// CreateSnapshotPolicy mocks base method.
func (m *MockPmax) CreateSnapshotPolicy(ctx context.Context, symID, snapshotPolicyID, interval string, offsetMins int32, complianceCountWarn, complianceCountCritical int64, optionalPayload map[string]any) (*v100.SnapshotPolicy, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateSnapshotPolicy", ctx, symID, snapshotPolicyID, interval, offsetMins, complianceCountWarn, complianceCountCritical, optionalPayload)
ret0, _ := ret[0].(*v100.SnapshotPolicy)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateSnapshotPolicy indicates an expected call of CreateSnapshotPolicy.
func (mr *MockPmaxMockRecorder) CreateSnapshotPolicy(ctx, symID, snapshotPolicyID, interval, offsetMins, complianceCountWarn, complianceCountCritical, optionalPayload any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSnapshotPolicy", reflect.TypeOf((*MockPmax)(nil).CreateSnapshotPolicy), ctx, symID, snapshotPolicyID, interval, offsetMins, complianceCountWarn, complianceCountCritical, optionalPayload)
}
// CreateStorageGroup mocks base method.
func (m *MockPmax) CreateStorageGroup(ctx context.Context, symID, storageGroupID, srpID, serviceLevel string, thickVolumes bool, optionalPayload map[string]any) (*v100.StorageGroup, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateStorageGroup", ctx, symID, storageGroupID, srpID, serviceLevel, thickVolumes, optionalPayload)
ret0, _ := ret[0].(*v100.StorageGroup)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateStorageGroup indicates an expected call of CreateStorageGroup.
func (mr *MockPmaxMockRecorder) CreateStorageGroup(ctx, symID, storageGroupID, srpID, serviceLevel, thickVolumes, optionalPayload any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateStorageGroup", reflect.TypeOf((*MockPmax)(nil).CreateStorageGroup), ctx, symID, storageGroupID, srpID, serviceLevel, thickVolumes, optionalPayload)
}
// CreateStorageGroupSnapshot mocks base method.
func (m *MockPmax) CreateStorageGroupSnapshot(ctx context.Context, symID, storageGroupID string, payload *v100.CreateStorageGroupSnapshot) (*v100.StorageGroupSnap, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateStorageGroupSnapshot", ctx, symID, storageGroupID, payload)
ret0, _ := ret[0].(*v100.StorageGroupSnap)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateStorageGroupSnapshot indicates an expected call of CreateStorageGroupSnapshot.
func (mr *MockPmaxMockRecorder) CreateStorageGroupSnapshot(ctx, symID, storageGroupID, payload any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateStorageGroupSnapshot", reflect.TypeOf((*MockPmax)(nil).CreateStorageGroupSnapshot), ctx, symID, storageGroupID, payload)
}
// CreateVolumeInProtectedStorageGroupS mocks base method.
func (m *MockPmax) CreateVolumeInProtectedStorageGroupS(ctx context.Context, symID, remoteSymID, storageGroupID, remoteStorageGroupID, volumeName string, volumeSize any, volOpts map[string]any, opts ...http.Header) (*v100.Volume, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, symID, remoteSymID, storageGroupID, remoteStorageGroupID, volumeName, volumeSize, volOpts}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "CreateVolumeInProtectedStorageGroupS", varargs...)
ret0, _ := ret[0].(*v100.Volume)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateVolumeInProtectedStorageGroupS indicates an expected call of CreateVolumeInProtectedStorageGroupS.
func (mr *MockPmaxMockRecorder) CreateVolumeInProtectedStorageGroupS(ctx, symID, remoteSymID, storageGroupID, remoteStorageGroupID, volumeName, volumeSize, volOpts any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, symID, remoteSymID, storageGroupID, remoteStorageGroupID, volumeName, volumeSize, volOpts}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolumeInProtectedStorageGroupS", reflect.TypeOf((*MockPmax)(nil).CreateVolumeInProtectedStorageGroupS), varargs...)
}
// CreateVolumeInStorageGroup mocks base method.
func (m *MockPmax) CreateVolumeInStorageGroup(ctx context.Context, symID, storageGroupID, volumeName string, volumeSize any, volOpts map[string]any) (*v100.Volume, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateVolumeInStorageGroup", ctx, symID, storageGroupID, volumeName, volumeSize, volOpts)
ret0, _ := ret[0].(*v100.Volume)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateVolumeInStorageGroup indicates an expected call of CreateVolumeInStorageGroup.
func (mr *MockPmaxMockRecorder) CreateVolumeInStorageGroup(ctx, symID, storageGroupID, volumeName, volumeSize, volOpts any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolumeInStorageGroup", reflect.TypeOf((*MockPmax)(nil).CreateVolumeInStorageGroup), ctx, symID, storageGroupID, volumeName, volumeSize, volOpts)
}
// CreateVolumeInStorageGroupS mocks base method.
func (m *MockPmax) CreateVolumeInStorageGroupS(ctx context.Context, symID, storageGroupID, volumeName string, volumeSize any, volOpts map[string]any, opts ...http.Header) (*v100.Volume, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, symID, storageGroupID, volumeName, volumeSize, volOpts}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "CreateVolumeInStorageGroupS", varargs...)
ret0, _ := ret[0].(*v100.Volume)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateVolumeInStorageGroupS indicates an expected call of CreateVolumeInStorageGroupS.
func (mr *MockPmaxMockRecorder) CreateVolumeInStorageGroupS(ctx, symID, storageGroupID, volumeName, volumeSize, volOpts any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, symID, storageGroupID, volumeName, volumeSize, volOpts}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolumeInStorageGroupS", reflect.TypeOf((*MockPmax)(nil).CreateVolumeInStorageGroupS), varargs...)
}
// DeleteFileSystem mocks base method.
func (m *MockPmax) DeleteFileSystem(ctx context.Context, symID, fsID string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteFileSystem", ctx, symID, fsID)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteFileSystem indicates an expected call of DeleteFileSystem.
func (mr *MockPmaxMockRecorder) DeleteFileSystem(ctx, symID, fsID any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteFileSystem", reflect.TypeOf((*MockPmax)(nil).DeleteFileSystem), ctx, symID, fsID)
}
// DeleteHost mocks base method.
func (m *MockPmax) DeleteHost(ctx context.Context, symID, hostID string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteHost", ctx, symID, hostID)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteHost indicates an expected call of DeleteHost.
func (mr *MockPmaxMockRecorder) DeleteHost(ctx, symID, hostID any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteHost", reflect.TypeOf((*MockPmax)(nil).DeleteHost), ctx, symID, hostID)
}
// DeleteHostGroup mocks base method.
func (m *MockPmax) DeleteHostGroup(ctx context.Context, symID, hostGroupID string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteHostGroup", ctx, symID, hostGroupID)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteHostGroup indicates an expected call of DeleteHostGroup.
func (mr *MockPmaxMockRecorder) DeleteHostGroup(ctx, symID, hostGroupID any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteHostGroup", reflect.TypeOf((*MockPmax)(nil).DeleteHostGroup), ctx, symID, hostGroupID)
}
// DeleteMaskingView mocks base method.
func (m *MockPmax) DeleteMaskingView(ctx context.Context, symID, maskingViewID string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteMaskingView", ctx, symID, maskingViewID)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteMaskingView indicates an expected call of DeleteMaskingView.
func (mr *MockPmaxMockRecorder) DeleteMaskingView(ctx, symID, maskingViewID any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMaskingView", reflect.TypeOf((*MockPmax)(nil).DeleteMaskingView), ctx, symID, maskingViewID)
}
// DeleteMigrationEnvironment mocks base method.
func (m *MockPmax) DeleteMigrationEnvironment(ctx context.Context, localSymID, remoteSymID string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteMigrationEnvironment", ctx, localSymID, remoteSymID)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteMigrationEnvironment indicates an expected call of DeleteMigrationEnvironment.
func (mr *MockPmaxMockRecorder) DeleteMigrationEnvironment(ctx, localSymID, remoteSymID any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMigrationEnvironment", reflect.TypeOf((*MockPmax)(nil).DeleteMigrationEnvironment), ctx, localSymID, remoteSymID)
}
// DeleteNASServer mocks base method.
func (m *MockPmax) DeleteNASServer(ctx context.Context, symID, nasID string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteNASServer", ctx, symID, nasID)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteNASServer indicates an expected call of DeleteNASServer.
func (mr *MockPmaxMockRecorder) DeleteNASServer(ctx, symID, nasID any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNASServer", reflect.TypeOf((*MockPmax)(nil).DeleteNASServer), ctx, symID, nasID)
}
// DeleteNFSExport mocks base method.
func (m *MockPmax) DeleteNFSExport(ctx context.Context, symID, nfsExportID string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteNFSExport", ctx, symID, nfsExportID)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteNFSExport indicates an expected call of DeleteNFSExport.
func (mr *MockPmaxMockRecorder) DeleteNFSExport(ctx, symID, nfsExportID any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNFSExport", reflect.TypeOf((*MockPmax)(nil).DeleteNFSExport), ctx, symID, nfsExportID)
}
// DeletePortGroup mocks base method.
func (m *MockPmax) DeletePortGroup(ctx context.Context, symID, portGroupID string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeletePortGroup", ctx, symID, portGroupID)
ret0, _ := ret[0].(error)
return ret0
}
// DeletePortGroup indicates an expected call of DeletePortGroup.
func (mr *MockPmaxMockRecorder) DeletePortGroup(ctx, symID, portGroupID any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePortGroup", reflect.TypeOf((*MockPmax)(nil).DeletePortGroup), ctx, symID, portGroupID)
}
// DeleteSnapshot mocks base method.
func (m *MockPmax) DeleteSnapshot(ctx context.Context, symID, SnapID string, sourceVolumes []v100.VolumeList, generation int64) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteSnapshot", ctx, symID, SnapID, sourceVolumes, generation)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteSnapshot indicates an expected call of DeleteSnapshot.
func (mr *MockPmaxMockRecorder) DeleteSnapshot(ctx, symID, SnapID, sourceVolumes, generation any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSnapshot", reflect.TypeOf((*MockPmax)(nil).DeleteSnapshot), ctx, symID, SnapID, sourceVolumes, generation)
}
// DeleteSnapshotPolicy mocks base method.
func (m *MockPmax) DeleteSnapshotPolicy(ctx context.Context, symID, snapshotPolicyID string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteSnapshotPolicy", ctx, symID, snapshotPolicyID)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteSnapshotPolicy indicates an expected call of DeleteSnapshotPolicy.
func (mr *MockPmaxMockRecorder) DeleteSnapshotPolicy(ctx, symID, snapshotPolicyID any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSnapshotPolicy", reflect.TypeOf((*MockPmax)(nil).DeleteSnapshotPolicy), ctx, symID, snapshotPolicyID)
}
// DeleteSnapshotS mocks base method.
func (m *MockPmax) DeleteSnapshotS(ctx context.Context, symID, SnapID string, sourceVolumes []v100.VolumeList, generation int64) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteSnapshotS", ctx, symID, SnapID, sourceVolumes, generation)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteSnapshotS indicates an expected call of DeleteSnapshotS.
func (mr *MockPmaxMockRecorder) DeleteSnapshotS(ctx, symID, SnapID, sourceVolumes, generation any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSnapshotS", reflect.TypeOf((*MockPmax)(nil).DeleteSnapshotS), ctx, symID, SnapID, sourceVolumes, generation)
}
// DeleteStorageGroup mocks base method.
func (m *MockPmax) DeleteStorageGroup(ctx context.Context, symID, storageGroupID string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteStorageGroup", ctx, symID, storageGroupID)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteStorageGroup indicates an expected call of DeleteStorageGroup.
func (mr *MockPmaxMockRecorder) DeleteStorageGroup(ctx, symID, storageGroupID any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteStorageGroup", reflect.TypeOf((*MockPmax)(nil).DeleteStorageGroup), ctx, symID, storageGroupID)
}
// DeleteStorageGroupSnapshot mocks base method.
func (m *MockPmax) DeleteStorageGroupSnapshot(ctx context.Context, symID, storageGroupID, snapshotID, snapID string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteStorageGroupSnapshot", ctx, symID, storageGroupID, snapshotID, snapID)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteStorageGroupSnapshot indicates an expected call of DeleteStorageGroupSnapshot.
func (mr *MockPmaxMockRecorder) DeleteStorageGroupSnapshot(ctx, symID, storageGroupID, snapshotID, snapID any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteStorageGroupSnapshot", reflect.TypeOf((*MockPmax)(nil).DeleteStorageGroupSnapshot), ctx, symID, storageGroupID, snapshotID, snapID)
}
// DeleteVolume mocks base method.
func (m *MockPmax) DeleteVolume(ctx context.Context, symID, volumeID string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteVolume", ctx, symID, volumeID)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteVolume indicates an expected call of DeleteVolume.
func (mr *MockPmaxMockRecorder) DeleteVolume(ctx, symID, volumeID any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVolume", reflect.TypeOf((*MockPmax)(nil).DeleteVolume), ctx, symID, volumeID)
}
// DeleteVolumeIDsIterator mocks base method.
func (m *MockPmax) DeleteVolumeIDsIterator(ctx context.Context, iter *v100.VolumeIterator) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteVolumeIDsIterator", ctx, iter)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteVolumeIDsIterator indicates an expected call of DeleteVolumeIDsIterator.
func (mr *MockPmaxMockRecorder) DeleteVolumeIDsIterator(ctx, iter any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVolumeIDsIterator", reflect.TypeOf((*MockPmax)(nil).DeleteVolumeIDsIterator), ctx, iter)
}
// ExecuteCreateRDFGroup mocks base method.
func (m *MockPmax) ExecuteCreateRDFGroup(ctx context.Context, symID string, CreateRDFPayload *v100.RDFGroupCreate) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExecuteCreateRDFGroup", ctx, symID, CreateRDFPayload)
ret0, _ := ret[0].(error)
return ret0
}
// ExecuteCreateRDFGroup indicates an expected call of ExecuteCreateRDFGroup.
func (mr *MockPmaxMockRecorder) ExecuteCreateRDFGroup(ctx, symID, CreateRDFPayload any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteCreateRDFGroup", reflect.TypeOf((*MockPmax)(nil).ExecuteCreateRDFGroup), ctx, symID, CreateRDFPayload)
}
// ExecuteReplicationActionOnSG mocks base method.
func (m *MockPmax) ExecuteReplicationActionOnSG(ctx context.Context, symID, action, storageGroup, rdfGroup string, force, exemptConsistency, bias bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExecuteReplicationActionOnSG", ctx, symID, action, storageGroup, rdfGroup, force, exemptConsistency, bias)
ret0, _ := ret[0].(error)
return ret0
}
// ExecuteReplicationActionOnSG indicates an expected call of ExecuteReplicationActionOnSG.
func (mr *MockPmaxMockRecorder) ExecuteReplicationActionOnSG(ctx, symID, action, storageGroup, rdfGroup, force, exemptConsistency, bias any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteReplicationActionOnSG", reflect.TypeOf((*MockPmax)(nil).ExecuteReplicationActionOnSG), ctx, symID, action, storageGroup, rdfGroup, force, exemptConsistency, bias)
}
// ExpandVolume mocks base method.
func (m *MockPmax) ExpandVolume(ctx context.Context, symID, volumeID string, rdfGNo int, volumeSize any, capUnits ...string) (*v100.Volume, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, symID, volumeID, rdfGNo, volumeSize}
for _, a := range capUnits {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ExpandVolume", varargs...)
ret0, _ := ret[0].(*v100.Volume)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ExpandVolume indicates an expected call of ExpandVolume.
func (mr *MockPmaxMockRecorder) ExpandVolume(ctx, symID, volumeID, rdfGNo, volumeSize any, capUnits ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, symID, volumeID, rdfGNo, volumeSize}, capUnits...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExpandVolume", reflect.TypeOf((*MockPmax)(nil).ExpandVolume), varargs...)
}
// GetAllowedArrays mocks base method.
func (m *MockPmax) GetAllowedArrays() []string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetAllowedArrays")
ret0, _ := ret[0].([]string)
return ret0
}
// GetAllowedArrays indicates an expected call of GetAllowedArrays.
func (mr *MockPmaxMockRecorder) GetAllowedArrays() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllowedArrays", reflect.TypeOf((*MockPmax)(nil).GetAllowedArrays))
}
// GetArrayPerfKeys mocks base method.
func (m *MockPmax) GetArrayPerfKeys(ctx context.Context) (*v100.ArrayKeysResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetArrayPerfKeys", ctx)
ret0, _ := ret[0].(*v100.ArrayKeysResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetArrayPerfKeys indicates an expected call of GetArrayPerfKeys.
func (mr *MockPmaxMockRecorder) GetArrayPerfKeys(ctx any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetArrayPerfKeys", reflect.TypeOf((*MockPmax)(nil).GetArrayPerfKeys), ctx)
}
// GetCreateVolInSGPayload mocks base method.
func (m *MockPmax) GetCreateVolInSGPayload(volumeSize any, capUnit, volumeName string, isSync, enableMobility bool, remoteSymID, storageGroupID string, opts ...http.Header) any {
m.ctrl.T.Helper()
varargs := []any{volumeSize, capUnit, volumeName, isSync, enableMobility, remoteSymID, storageGroupID}
for _, a := range opts {
varargs = append(varargs, a)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | true |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/powermax/powermax_test.go | cmd/vsphere-xcopy-volume-populator/internal/powermax/powermax_test.go | package powermax
import (
"context"
"os"
"testing"
gopowermax "github.com/dell/gopowermax/v2"
"github.com/dell/gopowermax/v2/types/v100"
"github.com/onsi/gomega"
"go.uber.org/mock/gomock"
)
func TestNewPowermaxClonner(t *testing.T) {
g := gomega.NewGomegaWithT(t)
t.Run("should return error if POWERMAX_SYMMETRIX_ID is not set", func(t *testing.T) {
os.Unsetenv("POWERMAX_SYMMETRIX_ID")
_, err := NewPowermaxClonner("host", "user", "pass", true)
g.Expect(err).To(gomega.HaveOccurred())
})
t.Run("should return error if POWERMAX_PORT_GROUP_NAME is not set", func(t *testing.T) {
os.Setenv("POWERMAX_SYMMETRIX_ID", "123")
os.Unsetenv("POWERMAX_PORT_GROUP_NAME")
_, err := NewPowermaxClonner("host", "user", "pass", true)
g.Expect(err).To(gomega.HaveOccurred())
})
t.Run("should return a clonner if all env vars are set", func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockClient := NewMockPmax(ctrl)
os.Setenv("POWERMAX_SYMMETRIX_ID", "123")
os.Setenv("POWERMAX_PORT_GROUP_NAME", "456")
mockClient.EXPECT().Authenticate(gomock.Any(), gomock.Any()).Return(nil)
// not testing the gopowermax constructor
origNewClientWithArgs := newClientWithArgs
newClientWithArgs = func(string, string, bool, bool, string) (gopowermax.Pmax, error) {
return mockClient, nil
}
defer func() { newClientWithArgs = origNewClientWithArgs }()
clonner, err := NewPowermaxClonner("host", "user", "pass", true)
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(clonner).ToNot(gomega.BeNil())
g.Expect(clonner.symmetrixID).To(gomega.Equal("123"))
g.Expect(clonner.portGroup).To(gomega.Equal("456"))
})
}
func TestEnsureClonnerIgroup(t *testing.T) {
g := gomega.NewGomegaWithT(t)
t.Run("should return a mapping context with the port group id", func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockClient := NewMockPmax(ctrl)
clonner := PowermaxClonner{
client: mockClient,
symmetrixID: "123",
portGroup: "456",
}
initiatorGroup := "test-ig"
clonnerIqn := []string{"iqn.1994-05.com.redhat:rhv-host"}
mockClient.EXPECT().GetStorageGroup(context.TODO(), "123", gomock.Not(gomock.Nil())).Return(&v100.StorageGroup{}, nil)
mockClient.EXPECT().GetPortGroupByID(context.TODO(), "123", "456").Return(&v100.PortGroup{PortGroupProtocol: "iSCSI"}, nil)
mockClient.EXPECT().GetHostList(context.TODO(), "123").Return(&v100.HostList{HostIDs: []string{"host1"}}, nil)
mockClient.EXPECT().GetHostByID(context.TODO(), "123", "host1").Return(&v100.Host{Initiators: []string{"iqn.1994-05.com.redhat:rhv-host"}}, nil)
mappingContext, err := clonner.EnsureClonnerIgroup(initiatorGroup, clonnerIqn)
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(mappingContext).ToNot(gomega.BeNil())
})
t.Run("should fail when port group protocol is SCSI_FC but only iSCSI initiators are provided", func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockClient := NewMockPmax(ctrl)
clonner := PowermaxClonner{
client: mockClient,
symmetrixID: "123",
portGroup: "fc-port-group",
}
initiatorGroup := "test-ig"
// Only iSCSI initiators provided
clonnerIqn := []string{"iqn.1994-05.com.redhat:rhv-host"}
mockClient.EXPECT().GetStorageGroup(context.TODO(), "123", gomock.Not(gomock.Nil())).Return(&v100.StorageGroup{}, nil)
// Port group is configured for Fibre Channel
mockClient.EXPECT().GetPortGroupByID(context.TODO(), "123", "fc-port-group").Return(&v100.PortGroup{PortGroupProtocol: "SCSI_FC"}, nil)
mappingContext, err := clonner.EnsureClonnerIgroup(initiatorGroup, clonnerIqn)
g.Expect(err).To(gomega.HaveOccurred())
g.Expect(err.Error()).To(gomega.ContainSubstring("no initiators matching protocol SCSI_FC"))
g.Expect(mappingContext).To(gomega.BeNil())
})
t.Run("should succeed when port group protocol is SCSI_FC and FC initiators are provided", func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockClient := NewMockPmax(ctrl)
clonner := PowermaxClonner{
client: mockClient,
symmetrixID: "123",
portGroup: "fc-port-group",
}
initiatorGroup := "test-ig"
// FC initiators in WWNN:WWPN format
clonnerIqn := []string{"10000000c9a12345:10000000c9a12346"}
mockClient.EXPECT().GetStorageGroup(context.TODO(), "123", gomock.Not(gomock.Nil())).Return(&v100.StorageGroup{}, nil)
mockClient.EXPECT().GetPortGroupByID(context.TODO(), "123", "fc-port-group").Return(&v100.PortGroup{PortGroupProtocol: "SCSI_FC"}, nil)
mockClient.EXPECT().GetHostList(context.TODO(), "123").Return(&v100.HostList{HostIDs: []string{"host1"}}, nil)
mockClient.EXPECT().GetHostByID(context.TODO(), "123", "host1").Return(&v100.Host{Initiators: []string{"10000000c9a12345:10000000c9a12346"}}, nil)
mappingContext, err := clonner.EnsureClonnerIgroup(initiatorGroup, clonnerIqn)
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(mappingContext).ToNot(gomega.BeNil())
})
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/secure_script.go | cmd/vsphere-xcopy-volume-populator/internal/populator/secure_script.go | package populator
import (
"context"
"fmt"
"os"
"time"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/vmware"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/vmkfstools-wrapper"
"github.com/vmware/govmomi/object"
"k8s.io/klog/v2"
)
const (
secureScriptName = "secure-vmkfstools-wrapper"
)
// writeSecureScriptToTemp writes the embedded script to a temporary file
func writeSecureScriptToTemp() (string, error) {
tempFile, err := os.CreateTemp("", "secure-vmkfstools-wrapper-*.py")
if err != nil {
return "", fmt.Errorf("failed to create temp file: %w", err)
}
defer tempFile.Close()
_, err = tempFile.Write(vmkfstoolswrapper.Script)
if err != nil {
os.Remove(tempFile.Name())
return "", fmt.Errorf("failed to write script content: %w", err)
}
return tempFile.Name(), nil
}
// ensureSecureScript ensures the secure script is uploaded and available on the target ESX
func ensureSecureScript(ctx context.Context, client vmware.Client, esx *object.HostSystem, datastore string) (string, error) {
klog.Infof("ensuring secure script on ESXi %s", esx.Name())
// ALWAYS force re-upload to ensure latest version
klog.Infof("Force uploading secure script to ensure latest version")
dc, err := getHostDC(esx)
if err != nil {
return "", err
}
scriptPath, err := uploadScript(ctx, client, dc, datastore)
if err != nil {
return "", fmt.Errorf("failed to upload the secure script to ESXi %s: %w", esx.Name(), err)
}
// Script will execute directly from datastore - no need for shell commands
klog.Infof("uploaded secure script to ESXi %s at %s - ready for execution", esx.Name(), scriptPath)
return scriptPath, nil
}
func uploadScript(ctx context.Context, client vmware.Client, dc *object.Datacenter, datastore string) (string, error) {
// Lookup datastore with timeout
dsCtx, dsCancel := context.WithTimeout(ctx, 30*time.Second)
defer dsCancel()
ds, err := client.GetDatastore(dsCtx, dc, datastore)
if err != nil {
return "", fmt.Errorf("failed to get datastore: %w", err)
}
// Write embedded script to temporary file
tempScriptPath, err := writeSecureScriptToTemp()
if err != nil {
return "", fmt.Errorf("failed to write embedded script to temp file: %w", err)
}
defer os.Remove(tempScriptPath) // Clean up temp file
scriptName := fmt.Sprintf("%s.py", secureScriptName)
klog.Infof("Uploading embedded script to datastore as %s", scriptName)
// Upload the file with timeout
upCtx, upCancel := context.WithTimeout(ctx, 30*time.Second)
defer upCancel()
if err = ds.UploadFile(upCtx, tempScriptPath, scriptName, nil); err != nil {
return "", fmt.Errorf("failed to upload embedded script: %w", err)
}
datastorePath := fmt.Sprintf("/vmfs/volumes/%s/%s", datastore, scriptName)
klog.Infof("Successfully uploaded embedded script to datastore path: %s", datastorePath)
return datastorePath, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/storage.go | cmd/vsphere-xcopy-volume-populator/internal/populator/storage.go | package populator
import (
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/vmware"
)
const (
// CleanupXcopyInitiatorGroup is the key to signal cleanup of the initiator group.
CleanupXcopyInitiatorGroup = "cleanupXcopyInitiatorGroup"
)
//go:generate go run go.uber.org/mock/mockgen -destination=mocks/storage_mock_client.go -package=storage_mocks . StorageApi
type StorageApi interface {
VMDKCapable
}
// StorageResolver resolves a PersistentVolume to LUN details
// This interface is embedded by VVolCapable, RDMCapable, and VMDKCapable
type StorageResolver interface {
// ResolvePVToLUN resolves PersistentVolume to LUN details
ResolvePVToLUN(persistentVolume PersistentVolume) (LUN, error)
}
// VVolCapable defines storage that can perform VVol operations
type VVolCapable interface {
StorageResolver
// VvolCopy performs a direct copy operation using vSphere API to discover source volume
VvolCopy(vsphereClient vmware.Client, vmId string, sourceVMDKFile string, persistentVolume PersistentVolume, progress chan<- uint64) error
}
// RDMCapable defines storage that can perform RDM operations
type RDMCapable interface {
StorageResolver
// RDMCopy performs a copy operation for RDM-backed disks
RDMCopy(vsphereClient vmware.Client, vmId string, sourceVMDKFile string, persistentVolume PersistentVolume, progress chan<- uint64) error
}
// StorageMapper handles initiator group mapping for VMDK/Xcopy operations
type StorageMapper interface {
// EnsureClonnerIgroup creates or updates an initiator group with the clonnerIqn
EnsureClonnerIgroup(initiatorGroup string, clonnerIqn []string) (MappingContext, error)
// Map is responsible for mapping an initiator group to a LUN
Map(initatorGroup string, targetLUN LUN, context MappingContext) (LUN, error)
// UnMap is responsible for unmapping an initiator group from a LUN
UnMap(initatorGroup string, targetLUN LUN, context MappingContext) error
// CurrentMappedGroups returns the initiator groups the LUN is mapped to
CurrentMappedGroups(targetLUN LUN, context MappingContext) ([]string, error)
}
// VMDKCapable defines storage that can perform VMDK/Xcopy operations (DEFAULT fallback)
// This is the required interface - all storage implementations must support this
type VMDKCapable interface {
StorageMapper
StorageResolver
}
// MappingContext holds context information for mapping operations
type MappingContext map[string]any
// SciniAware indicates that a storage requires scini module (PowerFlex)
type SciniAware interface {
SciniRequired() bool
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/remote_esxcli.go | cmd/vsphere-xcopy-volume-populator/internal/populator/remote_esxcli.go | package populator
import (
"context"
"fmt"
"slices"
"strings"
"time"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/vmware"
"github.com/vmware/govmomi/object"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
)
var xcopyInitiatorGroup = "xcopy-esxs"
const (
taskPollingInterval = 5 * time.Second
rescanSleepInterval = 5 * time.Second
rescanRetries = 5
)
// CloneMethod represents the method used for cloning operations
type CloneMethod string
const (
// CloneMethodSSH uses SSH to perform cloning operations
CloneMethodSSH CloneMethod = "ssh"
// CloneMethodVIB uses VIB to perform cloning operations
CloneMethodVIB CloneMethod = "vib"
)
type vmkfstoolsTask struct {
Pid int `json:"pid"`
ExitCode string `json:"exitCode"`
Stderr string `json:"stdErr"`
LastLine string `json:"lastLine"`
XcopyUsed bool `json:"xcopyUsed"`
TaskId string `json:"taskId"`
}
type EsxCli interface {
ListVibs() ([]string, error)
VmkfstoolsClone(sourceVMDKFile, targetLUN string) error
}
type RemoteEsxcliPopulator struct {
VSphereClient vmware.Client
StorageApi VMDKCapable
// SSH-related fields (only used when using SSH method)
SSHPrivateKey []byte
SSHPublicKey []byte
UseSSHMethod bool
SSHTimeout time.Duration
}
func NewWithRemoteEsxcli(storageApi VMDKCapable, vmwareClient vmware.Client) (Populator, error) {
return &RemoteEsxcliPopulator{
VSphereClient: vmwareClient,
StorageApi: storageApi,
UseSSHMethod: false, // VIB method
SSHTimeout: 30 * time.Second, // Default timeout (not used for VIB method)
}, nil
}
func NewWithRemoteEsxcliSSH(storageApi VMDKCapable, vmwareClient vmware.Client, sshPrivateKey, sshPublicKey []byte, sshTimeoutSeconds int) (Populator, error) {
if len(sshPrivateKey) == 0 || len(sshPublicKey) == 0 {
return nil, fmt.Errorf("ssh key material must be non-empty")
}
return &RemoteEsxcliPopulator{
VSphereClient: vmwareClient,
StorageApi: storageApi,
SSHPrivateKey: sshPrivateKey,
SSHPublicKey: sshPublicKey,
UseSSHMethod: true,
SSHTimeout: time.Duration(sshTimeoutSeconds) * time.Second,
}, nil
}
func (p *RemoteEsxcliPopulator) Populate(vmId string, sourceVMDKFile string, pv PersistentVolume, hostLocker Hostlocker, progress chan<- uint64, xcopyUsed chan<- int, quit chan error) (errFinal error) {
// isn't it better to not call close the channel from the caller?
defer func() {
r := recover()
if r != nil {
klog.Infof("recovered %v", r)
// if we paniced we must return with an error. Otherwise, the pod will exit with 0 and will
// continue to convertion, and will likely fail, if the copy wasn't completed.
quit <- fmt.Errorf("recovered failure: %v", r)
return
}
quit <- errFinal
}()
vmDisk, err := ParseVmdkPath(sourceVMDKFile)
if err != nil {
return err
}
var cloneMethod CloneMethod
klog.Infof("Debug: UseSSHMethod field value: %t", p.UseSSHMethod)
if p.UseSSHMethod {
cloneMethod = CloneMethodSSH
klog.Infof("Debug: Set cloneMethod to SSH")
} else {
cloneMethod = CloneMethodVIB
klog.Infof("Debug: Set cloneMethod to VIB")
}
klog.Infof(
"Starting populate via remote esxcli vmkfstools (%s), source vmdk=%s, pv=%v",
cloneMethod,
sourceVMDKFile,
pv)
host, err := p.VSphereClient.GetEsxByVm(context.Background(), vmId)
if err != nil {
return err
}
klog.Infof("Got ESXi host: %s", host)
// Only ensure VIB if using VIB method
if !p.UseSSHMethod {
err = ensureVib(p.VSphereClient, host, vmDisk.Datastore, VibVersion)
if err != nil {
return fmt.Errorf("failed to ensure VIB is installed: %w", err)
}
}
// for iSCSI add the host to the group using IQN. Is there something else for FC?
r, err := p.VSphereClient.RunEsxCommand(context.Background(), host, []string{"storage", "core", "adapter", "list"})
if err != nil {
return err
}
uniqueUIDs := make(map[string]bool)
hbaUIDs := []string{}
hbaUIDsNamesMap := make(map[string]string)
isSciniRequired := false
if sciniAware, ok := p.StorageApi.(SciniAware); ok {
if sciniAware.SciniRequired() {
isSciniRequired = true
}
}
// powerflex handling - scini is the powerflex kernel module and is not
// using any iqn/wwn to identity the host. Instead extract the SdcGuid
// as the possible clonner identifier
if isSciniRequired {
klog.Infof("scini is required for the storage api")
sciModule, err := p.VSphereClient.RunEsxCommand(context.Background(), host, []string{"system", "module", "parameters", "list", "-m", "scini"})
if err != nil {
klog.Infof("failed to fetch the scini module parameters %s: ", err)
return err
}
for _, moduleFields := range sciModule {
if slices.Contains(moduleFields["Name"], "IoctlIniGuidStr") {
klog.Infof("scini guid %v", moduleFields["Value"])
for _, s := range moduleFields["Value"] {
hbaUIDs = append(hbaUIDs, strings.ToUpper(s))
}
klog.Infof("Scini hbas found: %+v", hbaUIDs)
}
}
}
if !isSciniRequired {
klog.Infof("scini is not required for the storage api")
for _, a := range r {
hbaName, hasHbaName := a["HBAName"]
if !hasHbaName {
continue
}
driver, hasDriver := a["Driver"]
if !hasDriver {
// irrelevant adapter
continue
}
// 'esxcli storage core adapter list' returns LinkState field
// 'esxcli iscsi adapater list' returns State field
linkState, hasLink := a["LinkState"]
uid, hasUID := a["UID"]
if !hasDriver || !hasLink || !hasUID || len(driver) == 0 || len(linkState) == 0 || len(uid) == 0 {
continue
}
drv := driver[0]
link := linkState[0]
id := uid[0]
id = strings.ToLower(strings.TrimSpace(id))
// Check if the UID is FC, iSCSI or NVMe-oF
isTargetUID := strings.HasPrefix(id, "fc.") || strings.HasPrefix(id, "iqn.") || strings.HasPrefix(id, "nqn.")
if (link == "link-up" || link == "online") && isTargetUID {
if _, exists := uniqueUIDs[id]; !exists {
uniqueUIDs[id] = true
hbaUIDs = append(hbaUIDs, id)
hbaUIDsNamesMap[id] = hbaName[0]
klog.Infof("Storage Adapter UID: %s (Driver: %s)", id, drv)
}
}
}
klog.Infof("HBA UIDs found: %+v", hbaUIDs)
}
if len(hbaUIDs) == 0 {
klog.Infof("no valid HBA UIDs found for host %s", host)
return fmt.Errorf("no valid HBA UIDs found for host %s", host)
}
mappingContext, err := p.StorageApi.EnsureClonnerIgroup(xcopyInitiatorGroup, hbaUIDs)
if err != nil {
return fmt.Errorf("failed to add the ESX HBA UID %s to the initiator group %w", hbaUIDs, err)
}
lun, err := p.StorageApi.ResolvePVToLUN(pv)
if err != nil {
return err
}
originalInitiatorGroups, err := p.StorageApi.CurrentMappedGroups(lun, mappingContext)
if err != nil {
return fmt.Errorf("failed to fetch the current initiator groups of the lun %s: %w", lun.Name, err)
}
klog.Infof("Current initiator groups the LUN %s is mapped to %+v", lun.IQN, originalInitiatorGroups)
if isSciniRequired {
sdcId, ok := mappingContext["sdcId"]
if !ok {
klog.Infof("sdcId is required but not found in mappingContext")
return fmt.Errorf("sdcId is required but not found in mappingContext")
} else {
xcopyInitiatorGroup = sdcId.(string)
klog.Infof("sdcId found in mappingContext: %s", sdcId)
}
}
fullCleanUpAttempted := false
defer func() {
if fullCleanUpAttempted {
return
}
if !slices.Contains(originalInitiatorGroups, xcopyInitiatorGroup) {
// Only attempt cleanup if lun was successfully resolved
if lun.Name != "" {
if mappingContext != nil {
mappingContext["UnmapAllSdc"] = false
}
errUnmap := p.StorageApi.UnMap(xcopyInitiatorGroup, lun, mappingContext)
if errUnmap != nil {
klog.Infof("failed to unmap all initiator groups during partial cleanup: %s", errUnmap)
}
} else {
klog.V(2).Infof("Skipping cleanup unmap as LUN was not successfully resolved")
}
}
}()
lun, err = p.StorageApi.Map(xcopyInitiatorGroup, lun, mappingContext)
if err != nil {
return fmt.Errorf("failed to map lun %s to initiator group %s: %w", lun, xcopyInitiatorGroup, err)
}
targetLUN := fmt.Sprintf("/vmfs/devices/disks/%s", lun.NAA)
klog.Infof("resolved lun with IQN %s to lun %s", lun.IQN, targetLUN)
leaseHostID := strings.ReplaceAll(strings.ToLower(host.String()), ":", "-")
err = hostLocker.WithLock(context.Background(), leaseHostID,
func(ctx context.Context) error {
return rescan(ctx, p.VSphereClient, host, lun.NAA)
},
)
if err != nil {
return fmt.Errorf("failed to find the device %s after scanning: %w", targetLUN, err)
}
defer func() {
klog.Infof("cleaning up - unmap and rescan to clean dead devices")
fullCleanUpAttempted = true
if mappingContext != nil {
mappingContext["UnmapAllSdc"] = true
mappingContext[CleanupXcopyInitiatorGroup] = true
}
klog.Errorf("cleaning up lun %s:", lun.NAA)
// set device state to off and prevents any i/o to it
_, err = p.VSphereClient.RunEsxCommand(context.Background(), host, []string{"storage", "core", "device", "set", "--state", "off", "-d", lun.NAA})
if err != nil {
klog.Errorf("failed to set state off for device %s: %s", lun.Name, err)
} else {
// Wait for the device state to become "off" using exponential backoff
err = waitForDeviceStateOff(p.VSphereClient, host, lun.NAA)
if err != nil {
klog.Errorf("timeout waiting for device %s to reach off state: %s", lun.Name, err)
}
}
_, err = p.VSphereClient.RunEsxCommand(context.Background(), host, []string{"storage", "core", "device", "detached", "remove", "-d", lun.NAA})
if err != nil {
klog.Errorf("failed to remove device from detached list %s: %s", lun.Name, err)
}
// finaly after the kernel have it detached and not having any i/o we can unmap
errUnmap := p.StorageApi.UnMap(xcopyInitiatorGroup, lun, mappingContext)
if errUnmap != nil {
klog.Errorf("failed in unmap during cleanup, lun %s: %s", lun.Name, errUnmap)
}
// map the LUN back to the original OCP worker
klog.Infof("about to map the volume back to the originalInitiatorGroups, which are: %s", originalInitiatorGroups)
for _, group := range originalInitiatorGroups {
_, errMap := p.StorageApi.Map(group, lun, mappingContext)
if errMap != nil {
klog.Warningf("failed to map the volume back the original holder - this may cause problems: %v", errMap)
}
}
// unmap devices appear dead in ESX right after they are unmapped, now
// clean them
klog.Infof("about to delete dead devices")
klog.Infof("taking a short nap to let the ESX settle down")
time.Sleep(5 * time.Second)
deleteDeadDevices(p.VSphereClient, host, hbaUIDs, hbaUIDsNamesMap)
}()
// Execute the clone using the unified task handling approach
var executor TaskExecutor
if p.UseSSHMethod {
sshSetupCtx, sshCancel := context.WithTimeout(context.Background(), p.SSHTimeout)
defer sshCancel()
// Setup secure script
finalScriptPath, err := ensureSecureScript(sshSetupCtx, p.VSphereClient, host, vmDisk.Datastore)
if err != nil {
return fmt.Errorf("failed to ensure secure script: %w", err)
}
klog.V(2).Infof("Secure script ready at path: %s", finalScriptPath)
// Enable SSH access
err = vmware.EnableSSHAccess(sshSetupCtx, p.VSphereClient, host, p.SSHPrivateKey, p.SSHPublicKey, finalScriptPath)
if err != nil {
return fmt.Errorf("failed to enable SSH access: %w", err)
}
// Get host IP
hostIP, err := vmware.GetHostIPAddress(sshSetupCtx, host)
if err != nil {
return fmt.Errorf("failed to get host IP address: %w", err)
}
// Create SSH client with background context (no timeout for long-running operations)
sshClient := vmware.NewSSHClient()
err = sshClient.Connect(context.Background(), hostIP, "root", p.SSHPrivateKey)
if err != nil {
return fmt.Errorf("failed to connect via SSH: %w", err)
}
defer sshClient.Close()
klog.V(2).Infof("SSH connection established with restricted commands")
executor = NewSSHTaskExecutor(sshClient)
} else {
executor = NewVIBTaskExecutor(p.VSphereClient)
}
// Use unified task execution
return ExecuteCloneTask(context.Background(), executor, host, vmDisk.Datastore, vmDisk.Path(), targetLUN, progress, xcopyUsed)
}
// waitForDeviceStateOff waits for the device state to become "off" using exponential backoff
func waitForDeviceStateOff(client vmware.Client, host *object.HostSystem, deviceNAA string) error {
backoff := wait.Backoff{
Duration: 1 * time.Second,
Factor: 2.0,
Jitter: 0.1,
Steps: 10, // Max retries
}
return wait.ExponentialBackoff(backoff, func() (bool, error) {
result, err := client.RunEsxCommand(context.Background(), host, []string{"storage", "core", "device", "list", "-d", deviceNAA})
if err != nil {
klog.V(2).Infof("failed to check device %s state: %v", deviceNAA, err)
return false, nil // Retry on error
}
if len(result) > 0 && result[0] != nil && len(result[0]["Status"]) > 0 {
status := result[0]["Status"][0]
klog.V(2).Infof("device %s status: %s", deviceNAA, status)
if status == "off" {
klog.Infof("device %s state is now off", deviceNAA)
return true, nil // Success
}
}
return false, nil // Retry
})
}
// After mapping a volume the ESX needs a rescan to see the device. ESXs can opt-in to do it automatically
func rescan(ctx context.Context, client vmware.Client, host *object.HostSystem, targetLUN string) error {
for i := 1; i <= rescanRetries; i++ {
// Check if we should abort (lease was lost)
if ctx.Err() != nil {
return fmt.Errorf("rescan aborted (lease lost): %w", ctx.Err())
}
result, err := client.RunEsxCommand(context.Background(), host, []string{"storage", "core", "device", "list", "-d", targetLUN})
if err == nil {
status := ""
if result != nil && result[0] != nil && len(result[0]["Status"]) > 0 {
status = result[0]["Status"][0]
}
klog.Infof("found device %s with status %v", targetLUN, status)
if status == "off" || status == "dead timeout" {
klog.Infof("try to remove the device from the detached list (this can happen if restarting this pod or using the same volume)")
_, err = client.RunEsxCommand(context.Background(), host, []string{"storage", "core", "device", "detached", "remove", "-d", targetLUN})
continue
}
return nil
} else {
_, err = client.RunEsxCommand(
context.Background(), host, []string{"storage", "core", "adapter", "rescan", "-t", "add", "-a", "1"})
if err != nil {
klog.Errorf("failed to rescan for adapters, attempt %d/%d due to: %s", i, rescanRetries, err)
}
// Sleep but respect context cancellation
select {
case <-time.After(rescanSleepInterval):
// Continue to next iteration
case <-ctx.Done():
return fmt.Errorf("rescan aborted during retry sleep (lease lost): %w", ctx.Err())
}
}
}
// Check one more time before final attempt
if ctx.Err() != nil {
return fmt.Errorf("rescan aborted before final attempt (lease lost): %w", ctx.Err())
}
// last check after the last rescan
_, err := client.RunEsxCommand(context.Background(), host, []string{"storage", "core", "device", "list", "-d", targetLUN})
if err == nil {
klog.Infof("found device %s", targetLUN)
return nil
} else {
return fmt.Errorf("failed to find device %s: %w", targetLUN, err)
}
}
func deleteDeadDevices(client vmware.Client, host *object.HostSystem, hbaUIDs []string, hbaUIDsNamesMap map[string]string) error {
failedDevices := []string{}
for _, adapter := range hbaUIDs {
adapterName, ok := hbaUIDsNamesMap[adapter]
if !ok {
adapterName = adapter
}
klog.Infof("deleting dead devices for adapter %s", adapterName)
success := false
for i := 0; i < rescanRetries; i++ {
_, errClean := client.RunEsxCommand(
context.Background(),
host,
[]string{"storage", "core", "adapter", "rescan", "-t", "delete", "-A", adapterName})
if errClean == nil {
klog.Infof("rescan to delete dead devices completed for adapter %s", adapter)
success = true
break // finsihed with current adapter, move to the next one
}
time.Sleep(rescanSleepInterval)
}
if !success {
failedDevices = append(failedDevices, adapter)
}
}
if len(failedDevices) > 0 {
klog.Warningf("failed to delete dead devices for adapters %s", failedDevices)
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/ssh_executor.go | cmd/vsphere-xcopy-volume-populator/internal/populator/ssh_executor.go | package populator
import (
"context"
"fmt"
"encoding/json"
"encoding/xml"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/vmware"
"github.com/vmware/govmomi/object"
"k8s.io/klog/v2"
)
// SSHTaskExecutor implements TaskExecutor for the SSH method
type SSHTaskExecutor struct {
sshClient vmware.SSHClient
}
func NewSSHTaskExecutor(sshClient vmware.SSHClient) TaskExecutor {
return &SSHTaskExecutor{
sshClient: sshClient,
}
}
func (e *SSHTaskExecutor) StartClone(_ context.Context, _ *object.HostSystem, datastore, sourcePath, targetLUN string) (*vmkfstoolsTask, error) {
klog.Infof("Starting vmkfstools clone: datastore=%s, source=%s, target=%s", datastore, sourcePath, targetLUN)
output, err := e.sshClient.ExecuteCommand(datastore, "--clone", "-s", sourcePath, "-t", targetLUN)
if err != nil {
return nil, fmt.Errorf("failed to start clone: %w", err)
}
klog.Infof("Received output from script: %s", output)
t, err := parseTaskResponse(output)
if err != nil {
return nil, fmt.Errorf("failed to parse clone response: %w", err)
}
klog.Infof("Started vmkfstools clone task %s with PID %d", t.TaskId, t.Pid)
return t, nil
}
func (e *SSHTaskExecutor) GetTaskStatus(_ context.Context, _ *object.HostSystem, datastore, taskId string) (*vmkfstoolsTask, error) {
klog.V(2).Infof("Getting task status for %s (datastore=%s)", taskId, datastore)
output, err := e.sshClient.ExecuteCommand(datastore, "--task-get", "-i", taskId)
if err != nil {
return nil, fmt.Errorf("failed to get task status: %w", err)
}
t, err := parseTaskResponse(output)
if err != nil {
return nil, fmt.Errorf("failed to parse status response: %w", err)
}
klog.V(2).Infof("Task %s status: PID=%d, ExitCode=%s, LastLine=%s",
taskId, t.Pid, t.ExitCode, t.LastLine)
return t, nil
}
func (e *SSHTaskExecutor) CleanupTask(_ context.Context, _ *object.HostSystem, datastore, taskId string) error {
klog.Infof("Cleaning up task %s (datastore=%s)", taskId, datastore)
output, err := e.sshClient.ExecuteCommand(datastore, "--task-clean", "-i", taskId)
if err != nil {
return fmt.Errorf("failed to cleanup task: %w", err)
}
_, err = parseTaskResponse(output)
if err != nil {
klog.Warningf("Cleanup response parsing failed (task may still be cleaned): %v", err)
}
klog.Infof("Cleaned up task %s", taskId)
return nil
}
// XMLResponse represents the XML response structure
type XMLResponse struct {
XMLName xml.Name `xml:"output"`
Structure Structure `xml:"structure"`
}
// Structure represents the structure element in the XML response
type Structure struct {
TypeName string `xml:"typeName,attr"`
Fields []Field `xml:"field"`
}
// Field represents a field in the XML response
type Field struct {
Name string `xml:"name,attr"`
String string `xml:"string"`
}
// parseTaskResponse parses the XML response from the script
func parseTaskResponse(xmlOutput string) (*vmkfstoolsTask, error) {
// Parse the XML response to extract the JSON result
// Expected format: XML with status and message fields
// The message field contains JSON with task information
var response XMLResponse
if err := xml.Unmarshal([]byte(xmlOutput), &response); err != nil {
return nil, fmt.Errorf("failed to parse XML response: %w", err)
}
// Find status and message fields
var status, message string
for _, field := range response.Structure.Fields {
switch field.Name {
case "status":
status = field.String
case "message":
message = field.String
}
}
if status == "" {
return nil, fmt.Errorf("status field not found in XML response")
}
if message == "" {
return nil, fmt.Errorf("message field not found in XML response")
}
// Check if operation was successful (script returns "0" for success)
if status != "0" {
return nil, fmt.Errorf("operation failed with status %s: %s", status, message)
}
// Parse the JSON message to extract task information
task := &vmkfstoolsTask{}
// Try to parse as JSON first
if err := json.Unmarshal([]byte(message), task); err != nil {
// If JSON parsing fails, check if it's a simple text message (e.g., for cleanup operations)
// In this case, we return a minimal task structure
klog.V(2).Infof("Message is not JSON, treating as plain text: %s", message)
// For non-JSON messages (like cleanup confirmations), return a basic task
// The caller should check the original status for success/failure
return &vmkfstoolsTask{
LastLine: message, // Store the text message in LastLine for reference
}, nil
}
return task, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/errors.go | cmd/vsphere-xcopy-volume-populator/internal/populator/errors.go | package populator
import "fmt"
// MapUnmapError represents a non-fatal error that occurs during map/unmap operations
// These errors should not cause the populate container to restart
type MapUnmapError struct {
Operation string // "map" or "unmap"
Message string
Err error
}
func (e *MapUnmapError) Error() string {
if e.Err != nil {
return fmt.Sprintf("%s operation failed: %s: %v", e.Operation, e.Message, e.Err)
}
return fmt.Sprintf("%s operation failed: %s", e.Operation, e.Message)
}
func (e *MapUnmapError) Unwrap() error {
return e.Err
}
// IsMapUnmapError checks if an error is a MapUnmapError
func IsMapUnmapError(err error) bool {
_, ok := err.(*MapUnmapError)
return ok
}
// NewMapError creates a new MapUnmapError for map operations
func NewMapError(message string, err error) *MapUnmapError {
return &MapUnmapError{
Operation: "map",
Message: message,
Err: err,
}
}
// NewUnmapError creates a new MapUnmapError for unmap operations
func NewUnmapError(message string, err error) *MapUnmapError {
return &MapUnmapError{
Operation: "unmap",
Message: message,
Err: err,
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/disk_type.go | cmd/vsphere-xcopy-volume-populator/internal/populator/disk_type.go | package populator
import (
"context"
"fmt"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/vmware"
"k8s.io/klog/v2"
)
// DiskType represents the type of disk backing in vSphere
type DiskType string
const (
// DiskTypeVVol represents a Virtual Volume backed disk
DiskTypeVVol DiskType = "vvol"
// DiskTypeRDM represents a Raw Device Mapping disk
DiskTypeRDM DiskType = "rdm"
// DiskTypeVMDK represents a traditional VMDK on datastore (default)
DiskTypeVMDK DiskType = "vmdk"
)
// PopulatorSettings controls which optimized methods are disabled
// All methods are enabled by default unless explicitly disabled
// VMDK/Xcopy cannot be disabled as it's the default fallback
type populatorSettings struct {
// VVolDisabled disables VVol optimization when disk is VVol-backed
VVolDisabled bool
// RDMDisabled disables RDM optimization when disk is RDM-backed
RDMDisabled bool
// Note: VMDK cannot be disabled as it's the default fallback
}
func detectDiskType(ctx context.Context, client vmware.Client, vmId string, vmdkPath string) (DiskType, error) {
klog.V(2).Infof("Detecting disk type for VM %s, disk %s", vmId, vmdkPath)
backing, err := client.GetVMDiskBacking(ctx, vmId, vmdkPath)
if err != nil {
return "", fmt.Errorf("failed to get disk backing info: %w", err)
}
switch {
case backing.VVolId != "":
klog.Infof("Detected VVol disk (VVolId: %s)", backing.VVolId)
return DiskTypeVVol, nil
case backing.IsRDM:
klog.Infof("Detected RDM disk (DeviceName: %s)", backing.DeviceName)
return DiskTypeRDM, nil
default:
klog.Infof("Detected VMDK disk")
return DiskTypeVMDK, nil
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/populate.go | cmd/vsphere-xcopy-volume-populator/internal/populator/populate.go | package populator
import (
"context"
"fmt"
"strings"
)
type Populator interface {
// Populate will populate the volume identified by volumeHanle with the content of
// the sourceVMDKFile.
// persistentVolume is a slim version of k8s PersistentVolume created by the CSI driver,
// to help identify its underlying LUN in the storage system.
Populate(vmId string, sourceVMDKFile string, persistentVolume PersistentVolume, hostLocker Hostlocker, progress chan<- uint64, xcopyUsed chan<- int, quit chan error) error
}
//go:generate go run go.uber.org/mock/mockgen -destination=mocks/hostlocker_mock.go -package=mocks . Hostlocker
type Hostlocker interface {
// WithLock acquires a distributed lock and executes work. The work function receives a context
// that will be cancelled if the lock is lost (e.g., due to lease renewal failure).
// Work should check ctx.Err() periodically and abort gracefully if cancelled.
WithLock(ctx context.Context, hostID string, work func(ctx context.Context) error) error
}
type PersistentVolume struct {
Name string
VolumeHandle string
VolumeAttributes map[string]string
}
// LUN describes the object in the storage system
type LUN struct {
//Name is the volume name or just name in the storage system
Name string
// naa
NAA string
// SerialNumber is a representation of the disk. With combination of the
// vendor ID it should ve globally unique and can be identified by udev, usually
// under /dev/disk/by-id/ with some prefix or postfix, depending on the udev rule
// and can also be found by lsblk -o name,serial
SerialNumber string
// target's IQN
IQN string
// Storage provider ID in hex
ProviderID string
// the volume handle as set by the CSI driver field spec.volumeHandle
VolumeHandle string
// Logical device ID of the volume
LDeviceID string
// Storage device Serial Number
StorageSerialNumber string
// Storage Protocol
Protocol string
}
// VMDisk is the target VMDisk in vmware
type VMDisk struct {
Datastore string
VmHomeDir string
VmdkFile string
}
func (d *VMDisk) Path() string {
return fmt.Sprintf("/vmfs/volumes/%s/%s/%s", d.Datastore, d.VmHomeDir, d.VmdkFile)
}
func ParseVmdkPath(vmdkPath string) (VMDisk, error) {
if vmdkPath == "" {
return VMDisk{}, fmt.Errorf("vmdkPath cannot be empty")
}
parts := strings.SplitN(vmdkPath, "] ", 2)
if len(parts) != 2 {
return VMDisk{}, fmt.Errorf("Invalid vmdkPath %q, should be '[datastore] vmname/xyz.vmdk'", vmdkPath)
}
datastore := strings.TrimPrefix(parts[0], "[")
pathParts := strings.Split(parts[1], "/")
if len(pathParts) != 2 {
return VMDisk{}, fmt.Errorf("Invalid vmdkPath %q, should be '[datastore] vmname/xyz.vmdk'", vmdkPath)
}
return VMDisk{
Datastore: datastore,
VmHomeDir: pathParts[0],
VmdkFile: pathParts[1],
}, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/host_lease.go | cmd/vsphere-xcopy-volume-populator/internal/populator/host_lease.go | package populator
import (
"context"
"errors"
"fmt"
"os"
"time"
"github.com/google/uuid"
coordinationv1 "k8s.io/api/coordination/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/client-go/kubernetes"
coordinationclientv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
"k8s.io/klog/v2"
)
// HostLeaseLocker is a mechanism to use k8s lease object to perform
// critical sections exclusively. This is important to prevent heavy
// operations such as rescans from destabilizing the ESX and the copy process.
type HostLeaseLocker struct {
// the namespace should be constant as we want to lock ESX operations across migration
// plans. One option is to hardcode openshift-mtv, the other is to consider
// a new secret value or a flag
namespace string
clientset kubernetes.Interface
// leaseDuration is how long the lease is held (in seconds). Default: 10 seconds
// Can be configured via HOST_LEASE_DURATION_SECONDS env var
leaseDuration time.Duration
// retryInterval is how long to wait before retrying to acquire a held lease. Default: 10 seconds
retryInterval time.Duration
// renewInterval is how often to renew the lease while work is running. Default: 3 seconds
renewInterval time.Duration
// maxConcurrentHolders is the maximum number of concurrent lease holders per host. Default: 2
maxConcurrentHolders int
}
// NewHostLeaseLocker creates a new HostLeaseLocker with the given clientset
func NewHostLeaseLocker(clientset kubernetes.Interface) *HostLeaseLocker {
h := HostLeaseLocker{
clientset: clientset,
leaseDuration: 10 * time.Second,
retryInterval: 10 * time.Second,
renewInterval: 3 * time.Second,
maxConcurrentHolders: 2,
namespace: "openshift-mtv",
}
if leaseNs := os.Getenv("HOST_LEASE_NAMESPACE"); leaseNs != "" {
h.namespace = leaseNs
}
if durationStr := os.Getenv("HOST_LEASE_DURATION_SECONDS"); durationStr != "" {
if duration, err := time.ParseDuration(durationStr + "s"); err == nil {
h.leaseDuration = duration
}
}
return &h
}
// WithLock acquires a distributed lock for a specific ESXi host using direct Lease API.
// It blocks until the lock is acquired or the context is canceled.
// The actual work (the critical section) is performed by the provided `work` function.
// The lease is automatically renewed while work is running and deleted when complete.
func (h *HostLeaseLocker) WithLock(ctx context.Context, hostID string, work func(ctx context.Context) error) error {
if hostID == "" {
return fmt.Errorf("hostID is empty, can't hold a lease without any identity")
}
if dnsValidationErrors := validation.IsDNS1123Label(hostID); len(dnsValidationErrors) > 0 {
return fmt.Errorf("the hostID to use for the lease isn't a valid DNS name: %v", dnsValidationErrors)
}
// 1. Define a unique identity for this populator instance (the lock holder).
lockHolderIdentity, err := os.Hostname()
if err != nil {
lockHolderIdentity = "populator-" + uuid.New().String()
}
klog.Infof("This populator's identity is: %s", lockHolderIdentity)
// 2. Get the lease client
leaseClient := h.clientset.CoordinationV1().Leases(h.namespace)
// 3. Pre-check: Verify we can access the Lease API before entering retry loop.
// Try to get slot-0 as a test (it may or may not exist)
testLeaseName := fmt.Sprintf("esxi-lock-%s-slot-0", hostID)
_, err = leaseClient.Get(ctx, testLeaseName, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
// API access error (not just "lease doesn't exist") - fail fast
return fmt.Errorf("failed to access lease API for host %s (failing fast - not retrying): %w", hostID, err)
}
// 4. Try to acquire any available lease slot in a retry loop
leaseDurationSec := int32(h.leaseDuration.Seconds())
for {
// Check if context is canceled
if ctx.Err() != nil {
return fmt.Errorf("context canceled while waiting for lock: %w", ctx.Err())
}
// Try each slot in order
for slot := 0; slot < h.maxConcurrentHolders; slot++ {
leaseName := fmt.Sprintf("esxi-lock-%s-slot-%d", hostID, slot)
// Try to create the lease for this slot
now := metav1.NewMicroTime(time.Now())
lease := &coordinationv1.Lease{
ObjectMeta: metav1.ObjectMeta{
Name: leaseName,
Namespace: h.namespace,
},
Spec: coordinationv1.LeaseSpec{
HolderIdentity: &lockHolderIdentity,
LeaseDurationSeconds: &leaseDurationSec,
AcquireTime: &now,
RenewTime: &now,
},
}
createdLease, err := leaseClient.Create(ctx, lease, metav1.CreateOptions{})
if err == nil {
// Successfully created the lease - we have a slot!
klog.Infof("Acquired lease slot %d for host %s", slot, hostID)
return h.executeWorkWithLease(ctx, leaseClient, createdLease, hostID, slot, work)
}
// If it's not an "already exists" error, it's an API error - fail fast
if !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("failed to create lease for host %s slot %d (API error - not retrying): %w", hostID, slot, err)
}
// Lease already exists - check if it's expired or still held
existingLease, getErr := leaseClient.Get(ctx, leaseName, metav1.GetOptions{})
if getErr != nil {
if !apierrors.IsNotFound(getErr) {
// Failed to get the existing lease - this is an API error
return fmt.Errorf("failed to get existing lease for host %s slot %d (API error - not retrying): %w", hostID, slot, getErr)
}
// Lease was deleted between create and get - try this slot again
klog.V(2).Infof("Lease %s was deleted, trying to acquire it", leaseName)
// Retry this slot immediately by continuing the loop
slot--
continue
}
// Check if the lease is expired
if h.isLeaseExpired(existingLease) {
// Lease is expired - try to take it over
klog.Infof("Lease %s (slot %d) is expired, attempting to take it over", leaseName, slot)
existingLease.Spec.HolderIdentity = &lockHolderIdentity
now := metav1.NewMicroTime(time.Now())
existingLease.Spec.AcquireTime = &now
existingLease.Spec.RenewTime = &now
updatedLease, updateErr := leaseClient.Update(ctx, existingLease, metav1.UpdateOptions{})
if updateErr == nil {
// Successfully took over the expired lease
klog.Infof("Acquired expired lease slot %d for host %s", slot, hostID)
return h.executeWorkWithLease(ctx, leaseClient, updatedLease, hostID, slot, work)
}
// Update failed (likely someone else took it or conflict) - try next slot
klog.V(2).Infof("Failed to take over expired lease slot %d (conflict), trying next slot: %v", slot, updateErr)
} else {
// Lease is held by someone else
holder := "unknown"
if existingLease.Spec.HolderIdentity != nil {
holder = *existingLease.Spec.HolderIdentity
}
klog.V(2).Infof("Lease slot %d for host %s is held by %s, trying next slot", slot, hostID, holder)
}
}
// All slots are taken - wait and retry
klog.Infof("All %d lease slots for host %s are taken, waiting %v before retry", h.maxConcurrentHolders, hostID, h.retryInterval)
select {
case <-time.After(h.retryInterval):
// Retry all slots
case <-ctx.Done():
return fmt.Errorf("context canceled while waiting for lock: %w", ctx.Err())
}
}
}
// isLeaseExpired checks if a lease has expired
func (h *HostLeaseLocker) isLeaseExpired(lease *coordinationv1.Lease) bool {
if lease.Spec.RenewTime == nil || lease.Spec.LeaseDurationSeconds == nil {
return false
}
expiryTime := lease.Spec.RenewTime.Add(time.Duration(*lease.Spec.LeaseDurationSeconds) * time.Second)
return time.Now().After(expiryTime)
}
// executeWorkWithLease executes the work while holding the lease and renewing it periodically
func (h *HostLeaseLocker) executeWorkWithLease(
ctx context.Context,
leaseClient coordinationclientv1.LeaseInterface,
lease *coordinationv1.Lease,
hostID string,
slot int,
work func(context.Context) error,
) error {
klog.Infof("Successfully acquired lock slot %d for host %s", slot, hostID)
// Create a context for the work that we can cancel if renewal fails
workCtx, workCancel := context.WithCancel(ctx)
defer workCancel()
// Create a context for the renewal goroutine
renewCtx, renewCancel := context.WithCancel(ctx)
defer renewCancel()
// Channel to signal work completion
workDone := make(chan struct{})
renewalErrors := make(chan error, 1)
// Start lease renewal goroutine
go func() {
ticker := time.NewTicker(h.renewInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// Renew the lease
now := metav1.NewMicroTime(time.Now())
lease.Spec.RenewTime = &now
updatedLease, err := leaseClient.Update(renewCtx, lease, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("Failed to renew lease slot %d for host %s: %v", slot, hostID, err)
// Cancel the work context immediately - we've lost the lock!
workCancel()
select {
case renewalErrors <- fmt.Errorf("failed to renew lease, work cancelled: %w", err):
default:
}
return
}
lease = updatedLease
klog.V(2).Infof("Renewed lease slot %d for host %s", slot, hostID)
case <-renewCtx.Done():
// Work completed or context canceled
return
case <-workDone:
// Work completed
return
}
}
}()
// Execute the work
workErr := work(workCtx)
close(workDone)
renewCancel() // Stop the renewal goroutine
// Check if there was a renewal error
select {
case renewErr := <-renewalErrors:
if workErr == nil {
workErr = renewErr
}
// Add context to help debugging
if errors.Is(workCtx.Err(), context.Canceled) {
klog.Warningf("Work for slot %d host %s was cancelled due to lease renewal failure", slot, hostID)
}
default:
}
klog.Infof("Work complete for slot %d host %s", slot, hostID)
// Note: We intentionally do NOT delete the lease explicitly.
// The lease will auto-expire after leaseDuration (10s), at which point
// other pods can acquire it. This is simpler and more reliable than
// explicit deletion, which can fail silently. The 10-second delay is
// acceptable given typical operation durations (30-300s per disk).
klog.V(2).Infof("Lease for slot %d host %s will auto-expire in %v", slot, hostID, h.leaseDuration)
return workErr
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/vib_executor.go | cmd/vsphere-xcopy-volume-populator/internal/populator/vib_executor.go | package populator
import (
"context"
"encoding/json"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/vmware"
"github.com/vmware/govmomi/object"
"k8s.io/klog/v2"
)
// VIBTaskExecutor implements TaskExecutor for the VIB method
type VIBTaskExecutor struct {
VSphereClient vmware.Client
}
func NewVIBTaskExecutor(client vmware.Client) TaskExecutor {
return &VIBTaskExecutor{
VSphereClient: client,
}
}
func (e *VIBTaskExecutor) StartClone(ctx context.Context, host *object.HostSystem, _, sourcePath, targetLUN string) (*vmkfstoolsTask, error) {
// Note: VIB method doesn't use the datastore parameter, but accepts it for interface compatibility
r, err := e.VSphereClient.RunEsxCommand(ctx, host, []string{"vmkfstools", "clone", "-s", sourcePath, "-t", targetLUN})
if err != nil {
klog.Infof("error during copy, response from esxcli %+v", r)
return nil, err
}
response := ""
klog.Info("response from esxcli ", r)
for _, l := range r {
response += l.Value("message")
}
t := vmkfstoolsTask{}
err = json.Unmarshal([]byte(response), &t)
if err != nil {
return nil, err
}
return &t, nil
}
func (e *VIBTaskExecutor) GetTaskStatus(ctx context.Context, host *object.HostSystem, _, taskId string) (*vmkfstoolsTask, error) {
// Note: VIB method doesn't use the datastore parameter, but accepts it for interface compatibility
r, err := e.VSphereClient.RunEsxCommand(ctx, host, []string{"vmkfstools", "taskGet", "-i", taskId})
if err != nil {
return nil, err
}
response := ""
klog.Info("response from esxcli ", r)
for _, l := range r {
response += l.Value("message")
}
t := vmkfstoolsTask{}
err = json.Unmarshal([]byte(response), &t)
if err != nil {
klog.Errorf("failed to unmarshal response from esxcli %+v", r)
return nil, err
}
klog.Infof("response from esxcli %+v", t)
return &t, nil
}
func (e *VIBTaskExecutor) CleanupTask(ctx context.Context, host *object.HostSystem, datastore, taskId string) error {
// Note: VIB method doesn't use the datastore parameter, but accepts it for interface compatibility
r, errClean := e.VSphereClient.RunEsxCommand(ctx, host, []string{"vmkfstools", "taskClean", "-i", taskId})
if errClean != nil {
klog.Errorf("failed cleaning up task artifacts %v", r)
return errClean
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/factory.go | cmd/vsphere-xcopy-volume-populator/internal/populator/factory.go | package populator
import (
"context"
"fmt"
"os"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/vmware"
"k8s.io/klog/v2"
)
var settings = populatorSettings{
VVolDisabled: os.Getenv("DISABLE_VVOL_METHOD") == "true",
RDMDisabled: os.Getenv("DISABLE_RDM_METHOD") == "true",
}
// SSHConfig holds SSH configuration for VMDK/Xcopy populator
type SSHConfig struct {
UseSSH bool
PrivateKey []byte
PublicKey []byte
TimeoutSeconds int
}
// NewPopulator creates a new PopulatorSelector
func NewPopulator(
storageApi StorageApi,
vsphereHostname string,
vsphereUsername string,
vspherePassword string,
vmId string,
vmdkPath string,
sshConfig *SSHConfig,
) (Populator, error) {
// Create vSphere client for type detection
vsphereClient, err := vmware.NewClient(vsphereHostname, vsphereUsername, vspherePassword)
if err != nil {
return nil, fmt.Errorf("failed to create vSphere client: %w", err)
}
ctx := context.Background()
diskType, err := detectDiskType(ctx, vsphereClient, vmId, vmdkPath)
if err != nil {
klog.Warningf("Failed to detect disk type: %v, using VMDK/Xcopy", err)
return createVMDKPopulator(storageApi, vsphereClient, sshConfig)
}
klog.Infof("Detected disk type: %s", diskType)
// Step 2: Try to use optimized method for detected disk type
switch diskType {
case DiskTypeVVol:
if canUse(storageApi, DiskTypeVVol) {
klog.Infof("VVol method is available, using VVol populator")
return createVVolPopulator(storageApi, vsphereClient)
}
case DiskTypeRDM:
if canUse(storageApi, DiskTypeRDM) {
klog.Infof("RDM method is available, using RDM populator")
return createRDMPopulator(storageApi, vsphereClient)
}
}
// Default: Use VMDK/Xcopy (always works)
klog.Infof("Using VMDK/Xcopy populator")
return createVMDKPopulator(storageApi, vsphereClient, sshConfig)
}
// createVVolPopulator creates VVol populator
func createVVolPopulator(storageApi StorageApi, vmwareClient vmware.Client) (Populator, error) {
vvolApi, ok := storageApi.(VVolCapable)
if !ok {
return nil, fmt.Errorf("storage API does not implement VVolCapable")
}
return NewVvolPopulator(vvolApi, vmwareClient)
}
// createRDMPopulator creates RDM populator
func createRDMPopulator(storageApi StorageApi, vmwareClient vmware.Client) (Populator, error) {
rdmApi, ok := storageApi.(RDMCapable)
if !ok {
return nil, fmt.Errorf("storage API does not implement RDMCapable")
}
return NewRDMPopulator(rdmApi, vmwareClient)
}
// createVMDKPopulator creates VMDK/Xcopy populator (default/fallback)
func createVMDKPopulator(storageApi StorageApi, vmwareClient vmware.Client, sshConfig *SSHConfig) (Populator, error) {
vmdkApi, ok := storageApi.(VMDKCapable)
if !ok {
return nil, fmt.Errorf("storage API does not implement VMDKCapable (required)")
}
var pop Populator
var err error
if sshConfig != nil && sshConfig.UseSSH {
timeout := sshConfig.TimeoutSeconds
if timeout == 0 {
timeout = 30
}
pop, err = NewWithRemoteEsxcliSSH(vmdkApi,
vmwareClient,
sshConfig.PrivateKey,
sshConfig.PublicKey,
timeout)
} else {
pop, err = NewWithRemoteEsxcli(vmdkApi, vmwareClient)
}
if err != nil {
return nil, fmt.Errorf("failed to create VMDK/Xcopy populator: %w", err)
}
return pop, nil
}
// canUse checks if a disk type method is enabled and supported
func canUse(storageApi StorageApi, diskType DiskType) bool {
switch diskType {
case DiskTypeVVol:
if settings.VVolDisabled {
return false
}
_, ok := storageApi.(VVolCapable)
return ok
case DiskTypeRDM:
if settings.RDMDisabled {
return false
}
_, ok := storageApi.(RDMCapable)
return ok
default:
return false
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/task_executor.go | cmd/vsphere-xcopy-volume-populator/internal/populator/task_executor.go | package populator
import (
"context"
"fmt"
"regexp"
"strconv"
"time"
"github.com/vmware/govmomi/object"
"k8s.io/klog/v2"
)
// Unified progress pattern that handles both VIB and SSH output formats
var progressPattern = regexp.MustCompile(`(\d+)\%`)
// TaskExecutor abstracts the transport-specific operations for task execution
type TaskExecutor interface {
// StartClone initiates the clone operation and returns task information
StartClone(ctx context.Context, host *object.HostSystem, datastore, sourcePath, targetLUN string) (*vmkfstoolsTask, error)
// GetTaskStatus retrieves the current status of the specified task
GetTaskStatus(ctx context.Context, host *object.HostSystem, datastore, taskId string) (*vmkfstoolsTask, error)
// CleanupTask cleans up task artifacts
CleanupTask(ctx context.Context, host *object.HostSystem, datastore, taskId string) error
}
// ParseProgress extracts progress percentage from vmkfstools output
// Returns -1 if no progress is found, otherwise returns 0-100
func ParseProgress(lastLine string) (int, error) {
if lastLine == "" {
return -1, fmt.Errorf("lastLine is empty")
}
// VIB format: "Clone: 15% done."
match := progressPattern.FindStringSubmatch(lastLine)
if len(match) > 1 {
progress, err := strconv.Atoi(match[1])
if err == nil {
klog.Infof("ParseProgress: extracted progress: %d%%", progress)
return progress, nil
} else {
klog.Warningf("ParseProgress: failed to parse progress number from %q: %v", match[1], err)
return -1, fmt.Errorf("failed to parse progress number from %q: %v", match[1], err)
}
}
return -1, nil
}
func updateTaskStatus(ctx context.Context, task *vmkfstoolsTask, executor TaskExecutor, host *object.HostSystem, datastore string, progress chan<- uint64, xcopyUsed chan<- int) (*vmkfstoolsTask, error) {
taskStatus, err := executor.GetTaskStatus(ctx, host, datastore, task.TaskId)
if err != nil {
return nil, fmt.Errorf("failed to get task status: %w", err)
}
klog.V(2).Infof("Task status: %+v", taskStatus)
// Report progress if found
if progressValue, err := ParseProgress(taskStatus.LastLine); err == nil {
progress <- uint64(progressValue)
}
// Report xcopyUsed as 0 or 1
if taskStatus.XcopyUsed {
xcopyUsed <- 1
} else {
xcopyUsed <- 0
}
return taskStatus, nil
}
// ExecuteCloneTask handles the unified task execution logic
func ExecuteCloneTask(ctx context.Context, executor TaskExecutor, host *object.HostSystem, datastore, sourcePath, targetLUN string, progress chan<- uint64, xcopyUsed chan<- int) error {
// Start the clone task
task, err := executor.StartClone(ctx, host, datastore, sourcePath, targetLUN)
if err != nil {
return fmt.Errorf("failed to start clone task: %w", err)
}
klog.Infof("Started clone task %s", task.TaskId)
// Cleanup task artifacts when done
if task.TaskId != "" {
defer func() {
err := executor.CleanupTask(ctx, host, datastore, task.TaskId)
if err != nil {
klog.Errorf("Failed cleaning up task artifacts: %v", err)
}
}()
}
// Poll for task completion
for {
taskStatus, err := updateTaskStatus(ctx, task, executor, host, datastore, progress, xcopyUsed)
if err != nil {
return fmt.Errorf("failed to update task status: %w", err)
}
// Check for task completion
if taskStatus != nil && taskStatus.ExitCode != "" {
time.Sleep(taskPollingInterval)
taskStatus, err := updateTaskStatus(ctx, task, executor, host, datastore, progress, xcopyUsed)
if err != nil {
return fmt.Errorf("failed to update task status: %w", err)
}
if taskStatus.ExitCode == "0" {
klog.Infof("Clone task completed successfully")
return nil
} else {
return fmt.Errorf("clone task failed with exit code %s, stderr: %s", taskStatus.ExitCode, taskStatus.Stderr)
}
}
time.Sleep(taskPollingInterval)
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/remote_esxcli_test.go | cmd/vsphere-xcopy-volume-populator/internal/populator/remote_esxcli_test.go | package populator
import (
"context"
"errors"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/object"
"go.uber.org/mock/gomock"
vmware_mocks "github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/vmware/mocks"
)
func TestRemoteEsxcli(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Remote ESXCLI Suite")
}
var _ = Describe("rescan", func() {
var (
ctrl *gomock.Controller
mockClient *vmware_mocks.MockClient
host *object.HostSystem
targetLUN string
)
BeforeEach(func() {
ctrl = gomock.NewController(GinkgoT())
mockClient = vmware_mocks.NewMockClient(ctrl)
host = &object.HostSystem{} // A dummy host system
targetLUN = "naa.1234567890"
})
AfterEach(func() {
ctrl.Finish()
})
Context("when the device is found on the first attempt", func() {
It("should return nil", func() {
listCmd := []string{"storage", "core", "device", "list", "-d", targetLUN}
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(listCmd)).Return(nil, nil)
err := rescan(context.Background(), mockClient, host, targetLUN)
Expect(err).NotTo(HaveOccurred())
})
})
Context("when the device is found after a rescan", func() {
It("should return nil", func() {
listCmd := []string{"storage", "core", "device", "list", "-d", targetLUN}
rescanCmd := []string{"storage", "core", "adapter", "rescan", "-t", "add", "-a", "1"}
gomock.InOrder(
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(listCmd)).Return(nil, errors.New("device not found")),
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(rescanCmd)).Return(nil, nil),
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(listCmd)).Return(nil, nil),
)
err := rescan(context.Background(), mockClient, host, targetLUN)
Expect(err).NotTo(HaveOccurred())
})
})
Context("when the device is never found", func() {
It("should return an error", func() {
listCmd := []string{"storage", "core", "device", "list", "-d", targetLUN}
rescanCmd := []string{"storage", "core", "adapter", "rescan", "-t", "add", "-a", "1"}
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(listCmd)).Return(nil, errors.New("device not found")).Times(rescanRetries + 1)
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(rescanCmd)).Return(nil, nil).Times(rescanRetries)
err := rescan(context.Background(), mockClient, host, targetLUN)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("failed to find device"))
})
})
Context("when rescan command always fails", func() {
It("should retry and eventually fail if device not found", func() {
listCmd := []string{"storage", "core", "device", "list", "-d", targetLUN}
rescanCmd := []string{"storage", "core", "adapter", "rescan", "-t", "add", "-a", "1"}
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(listCmd)).Return(nil, errors.New("device not found")).Times(rescanRetries + 1)
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(rescanCmd)).Return(nil, errors.New("rescan failed")).Times(rescanRetries)
err := rescan(context.Background(), mockClient, host, targetLUN)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("failed to find device"))
})
})
Context("when rescan command fails", func() {
It("should retry and eventually succeed if device found", func() {
listCmd := []string{"storage", "core", "device", "list", "-d", targetLUN}
rescanCmd := []string{"storage", "core", "adapter", "rescan", "-t", "add", "-a", "1"}
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(listCmd)).Return(nil, errors.New("device not found")).Times(3)
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(listCmd)).Return(nil, nil).Times(1)
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(rescanCmd)).Return(nil, errors.New("rescan failed")).Times(2)
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(rescanCmd)).Return(nil, nil).Times(1)
err := rescan(context.Background(), mockClient, host, targetLUN)
Expect(err).ToNot(HaveOccurred())
})
It("should retry even when scan fails and eventually succeed if device found", func() {
listCmd := []string{"storage", "core", "device", "list", "-d", targetLUN}
rescanCmd := []string{"storage", "core", "adapter", "rescan", "-t", "add", "-a", "1"}
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(listCmd)).Return(nil, errors.New("device not found")).Times(3)
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(listCmd)).Return(nil, nil).Times(1)
mockClient.EXPECT().RunEsxCommand(gomock.Any(), host, gomock.Eq(rescanCmd)).Return(nil, errors.New("rescan failed")).Times(3)
err := rescan(context.Background(), mockClient, host, targetLUN)
Expect(err).ToNot(HaveOccurred())
})
})
})
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/rdm_populator.go | cmd/vsphere-xcopy-volume-populator/internal/populator/rdm_populator.go | package populator
import (
"fmt"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/vmware"
"k8s.io/klog/v2"
)
// RDMPopulator handles population of RDM-backed disks
type RDMPopulator struct {
vSphereClient vmware.Client
storageApi RDMCapable
}
// NewRDMPopulator creates a new RDM populator
func NewRDMPopulator(storageApi RDMCapable, vmwareClient vmware.Client) (Populator, error) {
return &RDMPopulator{
vSphereClient: vmwareClient,
storageApi: storageApi,
}, nil
}
// Populate performs the RDM copy operation
func (p *RDMPopulator) Populate(vmId string, sourceVMDKFile string, pv PersistentVolume, hostLocker Hostlocker, progress chan<- uint64, xcopyUsed chan<- int, quit chan error) (errFinal error) {
defer func() {
r := recover()
if r != nil {
klog.Infof("RDM Populator: recovered from panic: %v", r)
}
klog.Infof("RDM Populator: exiting with final error: %v", errFinal)
quit <- errFinal
}()
klog.Infof("RDM Populator: Starting copy operation")
klog.Infof("RDM Populator: VM ID: %s, Source VMDK: %s, Target: %s", vmId, sourceVMDKFile, pv.Name)
// RDM copy does not use xcopy
xcopyUsed <- 0
// Perform the RDM copy operation
klog.Infof("RDM Populator: Starting RDM copy operation...")
err := p.storageApi.RDMCopy(p.vSphereClient, vmId, sourceVMDKFile, pv, progress)
if err != nil {
klog.Errorf("RDM Populator: RDM copy operation failed: %v", err)
return fmt.Errorf("failed to copy RDM disk: %w", err)
}
klog.Infof("RDM Populator: Copy operation completed successfully")
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/populate_test.go | cmd/vsphere-xcopy-volume-populator/internal/populator/populate_test.go | package populator_test
import (
"testing"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
populator_mocks "github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator/populator_mocks"
"go.uber.org/mock/gomock"
)
func TestVMDisk_Path(t *testing.T) {
tests := []struct {
name string
disk populator.VMDisk
expected string
}{
{
name: "Standard VMDisk Path",
disk: populator.VMDisk{
Datastore: "mydatastore",
VmHomeDir: "vm-1",
VmdkFile: "disk-1.vmdk",
},
expected: "/vmfs/volumes/mydatastore/vm-1/disk-1.vmdk",
},
{
name: "Empty VMDisk fields",
disk: populator.VMDisk{},
expected: "/vmfs/volumes///",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.disk.Path()
if got != tt.expected {
t.Errorf("\ngot %q\nwant %q", got, tt.expected)
}
})
}
}
func TestParseVmdkPath(t *testing.T) {
tests := []struct {
name string
vmdkPath string
expectedVMDisk populator.VMDisk
expectError bool
}{
{
name: "Valid VMDK Path",
vmdkPath: "[mydatastore] vm-1/disk-1.vmdk",
expectedVMDisk: populator.VMDisk{VmHomeDir: "vm-1", Datastore: "mydatastore", VmdkFile: "disk-1.vmdk"},
expectError: false,
},
{
name: "Valid VMDK Path with spaces",
vmdkPath: "[my datastore] my vm/my vm-disk-1.vmdk",
expectedVMDisk: populator.VMDisk{VmHomeDir: "my vm", Datastore: "my datastore", VmdkFile: "my vm-disk-1.vmdk"},
expectError: false,
},
{
name: "Invalid VMDK Path - missing ']'",
vmdkPath: "[mydatastore myvm/myvm.vmdk",
expectedVMDisk: populator.VMDisk{},
expectError: true,
},
{
name: "Invalid VMDK Path - missing '/' ",
vmdkPath: "[mydatastore] myvm_myvm.vmdk",
expectedVMDisk: populator.VMDisk{},
expectError: true,
},
{
name: "Empty VMDK Path",
vmdkPath: "",
expectedVMDisk: populator.VMDisk{},
expectError: true,
},
{
name: "VMDK Path with only datastore",
vmdkPath: "[mydatastore]",
expectedVMDisk: populator.VMDisk{},
expectError: true,
},
{
name: "VMDK Path with multiple slashes in path",
vmdkPath: "[mydatastore] myvm/subdir/myvm.vmdk",
expectedVMDisk: populator.VMDisk{},
expectError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := populator.ParseVmdkPath(tt.vmdkPath)
if tt.expectError {
if err == nil {
t.Errorf("expected an error, but got none")
}
} else {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if got != tt.expectedVMDisk {
t.Errorf("\ngot %+v,\nwant %+v", got, tt.expectedVMDisk)
}
}
})
}
}
func TestPopulate(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockPopulator := populator_mocks.NewMockPopulator(ctrl)
pv := populator.PersistentVolume{
Name: "test-pv",
VolumeHandle: "test-handle",
}
progress := make(chan uint64)
xcopyUsed := make(chan int)
quit := make(chan error)
mockPopulator.EXPECT().Populate("vm-1", "source.vmdk", pv, progress, quit, xcopyUsed).Return(nil)
err := mockPopulator.Populate("vm-1", "source.vmdk", pv, progress, quit, xcopyUsed)
if err != nil {
t.Errorf("Populate() error = %v, wantErr %v", err, false)
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/vib.go | cmd/vsphere-xcopy-volume-populator/internal/populator/vib.go | package populator
import (
"context"
"fmt"
"strings"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/vmware"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25/mo"
"k8s.io/klog/v2"
)
const (
vibName = "vmkfstools-wrapper"
vibLocation = "/bin/vmkfstools-wrapper.vib"
)
// VibVersion is set by ldflags
var VibVersion = "x.x.x"
// ensure vib will fetch the vib version and in case needed will install it
// on the target ESX
func ensureVib(client vmware.Client, esx *object.HostSystem, datastore string, desiredVibVersion string) error {
klog.Infof("ensuring vib version on ESXi %s: %s", esx.Name(), VibVersion)
version, err := getViBVersion(client, esx)
if err != nil {
return fmt.Errorf("failed to get the VIB version from ESXi %s: %w", esx.Name(), err)
}
klog.Infof("current vib version on ESXi %s: %s", esx.Name(), version)
if version == desiredVibVersion {
return nil
}
dc, err := getHostDC(esx)
if err != nil {
return err
}
vibPath, err := uploadVib(client, dc, datastore)
if err != nil {
return fmt.Errorf("failed to upload the VIB to ESXi %s: %w", esx.Name(), err)
}
klog.Infof("uploaded vib to ESXi %s", esx.Name())
err = installVib(client, esx, vibPath)
if err != nil {
return fmt.Errorf("failed to install the VIB on ESXi %s: %w", esx.Name(), err)
}
klog.Infof("installed vib on ESXi %s version %s", esx.Name(), VibVersion)
return nil
}
func getHostDC(esx *object.HostSystem) (*object.Datacenter, error) {
ctx := context.Background()
hostRef := esx.Reference()
pc := property.DefaultCollector(esx.Client())
var hostMo mo.HostSystem
err := pc.RetrieveOne(context.Background(), hostRef, []string{"parent"}, &hostMo)
if err != nil {
klog.Fatalf("failed to retrieve host parent: %v", err)
}
parentRef := hostMo.Parent
var datacenter *object.Datacenter
currentParentRef := parentRef
// walk the parents of the host up till the datacenter
for {
if currentParentRef.Type == "Datacenter" {
finder := find.NewFinder(esx.Client(), true)
datacenter, err = finder.Datacenter(ctx, currentParentRef.String())
if err != nil {
return nil, err
}
return datacenter, nil
}
var genericParentMo mo.ManagedEntity
err = pc.RetrieveOne(context.Background(), *currentParentRef, []string{"parent"}, &genericParentMo)
if err != nil {
klog.Fatalf("failed to retrieve intermediate parent: %v", err)
}
if genericParentMo.Parent == nil {
break
}
currentParentRef = genericParentMo.Parent
}
return nil, fmt.Errorf("could not determine datacenter for host '%s'.", esx.Name())
}
func getViBVersion(client vmware.Client, esxi *object.HostSystem) (string, error) {
r, err := client.RunEsxCommand(context.Background(), esxi, []string{"software", "vib", "get", "-n", vibName})
if err != nil {
vFault, conversonErr := vmware.ErrToFault(err)
if conversonErr != nil {
return "", err
}
if vFault != nil {
for _, m := range vFault.ErrMsgs {
if strings.Contains(m, "[NoMatchError]") {
// vib is not installed. return empty object
return "", nil
}
}
}
return "", err
}
klog.Infof("reply from get vib %v", r)
return r[0].Value("Version"), err
}
func uploadVib(client vmware.Client, dc *object.Datacenter, datastore string) (string, error) {
ds, err := client.GetDatastore(context.Background(), dc, datastore)
if err != nil {
return "", fmt.Errorf("failed to upload file: %w", err)
}
if err = ds.UploadFile(context.Background(), vibLocation, vibName+".vib", nil); err != nil {
return "", fmt.Errorf("failed to upload %s: %w", vibLocation, err)
}
return fmt.Sprintf("/vmfs/volumes/%s/%s", datastore, vibName+".vib"), nil
}
func installVib(client vmware.Client, esx *object.HostSystem, vibPath string) error {
r, err := client.RunEsxCommand(context.Background(), esx, []string{"software", "vib", "install", "-f", "1", "-v", vibPath})
if err != nil {
return err
}
klog.Infof("reply from get vib %v", r)
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/vvol_populator.go | cmd/vsphere-xcopy-volume-populator/internal/populator/vvol_populator.go | package populator
import (
"fmt"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/vmware"
"k8s.io/klog/v2"
)
type VvolPopulator struct {
vSphereClient vmware.Client
storageApi VVolCapable
}
func NewVvolPopulator(storageApi VVolCapable, vmwareClient vmware.Client) (Populator, error) {
return &VvolPopulator{
vSphereClient: vmwareClient,
storageApi: storageApi,
}, nil
}
func (p *VvolPopulator) Populate(vmId string, sourceVMDKFile string, pv PersistentVolume, hostLocker Hostlocker, progress chan<- uint64, xcopyUsed chan<- int, quit chan error) (errFinal error) {
defer func() {
r := recover()
if r != nil {
klog.Infof("VVol Populator: recovered from panic: %v", r)
}
klog.Infof("VVol Populator: exiting with final error: %v", errFinal)
quit <- errFinal
}()
klog.Infof("VVol Populator: Starting copy operation")
klog.Infof("VVol Populator: VM ID: %s, Source VMDK: %s, Target: %s", vmId, sourceVMDKFile, pv.Name)
// VVol copy does not use xcopy
xcopyUsed <- 0
// Try using vSphere API to discover source volume first (preferred method)
klog.Infof("VVol Populator: Starting VVol copy operation...")
err := p.storageApi.VvolCopy(p.vSphereClient, vmId, sourceVMDKFile, pv, progress)
if err != nil {
klog.Errorf("VVol Populator: discovery of source volume using vSphere API failed: %v", err)
return fmt.Errorf("failed to copy VMDK using VVol storage API: %w", err)
}
klog.Infof("VVol Populator: Copy operation completed successfully")
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/mocks/storage_mock_client.go | cmd/vsphere-xcopy-volume-populator/internal/populator/mocks/storage_mock_client.go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator (interfaces: StorageApi)
//
// Generated by this command:
//
// mockgen -destination=mocks/storage_mock_client.go -package=mocks . StorageApi
//
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
populator "github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
gomock "go.uber.org/mock/gomock"
)
// MockStorageApi is a mock of StorageApi interface.
type MockStorageApi struct {
ctrl *gomock.Controller
recorder *MockStorageApiMockRecorder
isgomock struct{}
}
// MockStorageApiMockRecorder is the mock recorder for MockStorageApi.
type MockStorageApiMockRecorder struct {
mock *MockStorageApi
}
// NewMockStorageApi creates a new mock instance.
func NewMockStorageApi(ctrl *gomock.Controller) *MockStorageApi {
mock := &MockStorageApi{ctrl: ctrl}
mock.recorder = &MockStorageApiMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockStorageApi) EXPECT() *MockStorageApiMockRecorder {
return m.recorder
}
// CurrentMappedGroups mocks base method.
func (m *MockStorageApi) CurrentMappedGroups(targetLUN populator.LUN, context populator.MappingContext) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CurrentMappedGroups", targetLUN, context)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CurrentMappedGroups indicates an expected call of CurrentMappedGroups.
func (mr *MockStorageApiMockRecorder) CurrentMappedGroups(targetLUN, context any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentMappedGroups", reflect.TypeOf((*MockStorageApi)(nil).CurrentMappedGroups), targetLUN, context)
}
// EnsureClonnerIgroup mocks base method.
func (m *MockStorageApi) EnsureClonnerIgroup(initiatorGroup string, clonnerIqn []string) (populator.MappingContext, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EnsureClonnerIgroup", initiatorGroup, clonnerIqn)
ret0, _ := ret[0].(populator.MappingContext)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// EnsureClonnerIgroup indicates an expected call of EnsureClonnerIgroup.
func (mr *MockStorageApiMockRecorder) EnsureClonnerIgroup(initiatorGroup, clonnerIqn any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureClonnerIgroup", reflect.TypeOf((*MockStorageApi)(nil).EnsureClonnerIgroup), initiatorGroup, clonnerIqn)
}
// Map mocks base method.
func (m *MockStorageApi) Map(initatorGroup string, targetLUN populator.LUN, context populator.MappingContext) (populator.LUN, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Map", initatorGroup, targetLUN, context)
ret0, _ := ret[0].(populator.LUN)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Map indicates an expected call of Map.
func (mr *MockStorageApiMockRecorder) Map(initatorGroup, targetLUN, context any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Map", reflect.TypeOf((*MockStorageApi)(nil).Map), initatorGroup, targetLUN, context)
}
// ResolvePVToLUN mocks base method.
func (m *MockStorageApi) ResolvePVToLUN(persistentVolume populator.PersistentVolume) (populator.LUN, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ResolvePVToLUN", persistentVolume)
ret0, _ := ret[0].(populator.LUN)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ResolvePVToLUN indicates an expected call of ResolvePVToLUN.
func (mr *MockStorageApiMockRecorder) ResolvePVToLUN(persistentVolume any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResolvePVToLUN", reflect.TypeOf((*MockStorageApi)(nil).ResolvePVToLUN), persistentVolume)
}
// UnMap mocks base method.
func (m *MockStorageApi) UnMap(initatorGroup string, targetLUN populator.LUN, context populator.MappingContext) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UnMap", initatorGroup, targetLUN, context)
ret0, _ := ret[0].(error)
return ret0
}
// UnMap indicates an expected call of UnMap.
func (mr *MockStorageApiMockRecorder) UnMap(initatorGroup, targetLUN, context any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnMap", reflect.TypeOf((*MockStorageApi)(nil).UnMap), initatorGroup, targetLUN, context)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/mocks/hostlocker_mock.go | cmd/vsphere-xcopy-volume-populator/internal/populator/mocks/hostlocker_mock.go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator (interfaces: Hostlocker)
//
// Generated by this command:
//
// mockgen -destination=mocks/hostlocker_mock.go -package=mocks . Hostlocker
//
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "go.uber.org/mock/gomock"
)
// MockHostlocker is a mock of Hostlocker interface.
type MockHostlocker struct {
ctrl *gomock.Controller
recorder *MockHostlockerMockRecorder
isgomock struct{}
}
// MockHostlockerMockRecorder is the mock recorder for MockHostlocker.
type MockHostlockerMockRecorder struct {
mock *MockHostlocker
}
// NewMockHostlocker creates a new mock instance.
func NewMockHostlocker(ctrl *gomock.Controller) *MockHostlocker {
mock := &MockHostlocker{ctrl: ctrl}
mock.recorder = &MockHostlockerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockHostlocker) EXPECT() *MockHostlockerMockRecorder {
return m.recorder
}
// WithLock mocks base method.
func (m *MockHostlocker) WithLock(ctx context.Context, hostID string, work func(context.Context) error) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WithLock", ctx, hostID, work)
ret0, _ := ret[0].(error)
return ret0
}
// WithLock indicates an expected call of WithLock.
func (mr *MockHostlockerMockRecorder) WithLock(ctx, hostID, work any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithLock", reflect.TypeOf((*MockHostlocker)(nil).WithLock), ctx, hostID, work)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/populator/populator_mocks/populator_mock.go | cmd/vsphere-xcopy-volume-populator/internal/populator/populator_mocks/populator_mock.go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator (interfaces: Populator)
//
// Generated by this command:
//
// mockgen -destination=internal/populator/mocks/populator_mock.go -package=mocks github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator Populator
//
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
populator "github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
gomock "go.uber.org/mock/gomock"
)
// MockPopulator is a mock of Populator interface.
type MockPopulator struct {
ctrl *gomock.Controller
recorder *MockPopulatorMockRecorder
isgomock struct{}
}
// MockPopulatorMockRecorder is the mock recorder for MockPopulator.
type MockPopulatorMockRecorder struct {
mock *MockPopulator
}
// NewMockPopulator creates a new mock instance.
func NewMockPopulator(ctrl *gomock.Controller) *MockPopulator {
mock := &MockPopulator{ctrl: ctrl}
mock.recorder = &MockPopulatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockPopulator) EXPECT() *MockPopulatorMockRecorder {
return m.recorder
}
// Populate mocks base method.
func (m *MockPopulator) Populate(vmId, sourceVMDKFile string, persistentVolume populator.PersistentVolume, progress chan<- uint64, quit chan error, xcopyUsed chan<- int) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Populate", vmId, sourceVMDKFile, persistentVolume, progress, quit, xcopyUsed)
ret0, _ := ret[0].(error)
return ret0
}
// Populate indicates an expected call of Populate.
func (mr *MockPopulatorMockRecorder) Populate(vmId, sourceVMDKFile, persistentVolume, progress, quit, xcopyUsed any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Populate", reflect.TypeOf((*MockPopulator)(nil).Populate), vmId, sourceVMDKFile, persistentVolume, progress, quit, xcopyUsed)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/powerflex/powerflex.go | cmd/vsphere-xcopy-volume-populator/internal/powerflex/powerflex.go | package powerflex
import (
"fmt"
"slices"
"time"
"github.com/dell/goscaleio"
siotypes "github.com/dell/goscaleio/types/v1"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
"k8s.io/klog/v2"
)
const (
SYSTEM_ID_ENV_KEY = "POWERFLEX_SYSTEM_ID"
sdcIDContextKey string = "sdcId"
)
type PowerflexClonner struct {
Client *goscaleio.Client
systemId string
}
// CurrentMappedGroups implements populator.StorageApi.
func (p *PowerflexClonner) CurrentMappedGroups(targetLUN populator.LUN, mappingContext populator.MappingContext) ([]string, error) {
klog.Infof("getting current mapping to volume %+v", targetLUN)
klog.Infof("going to sleep to give csi time")
time.Sleep(20 * time.Second)
v, err := p.Client.GetVolume("", "", "", targetLUN.Name, false)
if err != nil {
return nil, err
}
currentMappedSdcs := []string{}
if len(v) != 1 {
return nil, fmt.Errorf("found %d volumes while expecting one. Target volume ID %s", len(v), targetLUN.ProviderID)
}
klog.Infof("current mapping %+v", v[0].MappedSdcInfo)
if len(v[0].MappedSdcInfo) == 0 {
klog.Errorf("found 0 Mapped SDC Info for target volume %+v", targetLUN)
return []string{}, fmt.Errorf("found 0 Mapped SDC Info for target volume %+v", targetLUN)
}
for _, sdcInfo := range v[0].MappedSdcInfo {
currentMappedSdcs = append(currentMappedSdcs, sdcInfo.SdcID)
}
return currentMappedSdcs, nil
}
// EnsureClonnerIgroup implements populator.StorageApi.
func (p *PowerflexClonner) EnsureClonnerIgroup(initiatorGroup string, clonnerIqn []string) (populator.MappingContext, error) {
klog.Infof("ensuring initiator group %s for clonners %v", initiatorGroup, clonnerIqn)
mappingContext := make(map[string]any)
system, err := p.Client.FindSystem(p.systemId, "", "")
if err != nil {
return nil, err
}
sdcs, err := system.GetSdc()
if err != nil {
return nil, err
}
for _, sdc := range sdcs {
if sdc.OSType != "Esx" {
continue
}
klog.Infof("Comparing with sdc %+v", sdc)
if slices.Contains(clonnerIqn, sdc.SdcGUID) {
klog.Infof("found compatible SDC: %+v", sdc)
mappingContext[sdcIDContextKey] = sdc.ID
return mappingContext, nil
}
}
return mappingContext, fmt.Errorf("could not find the SDC adapter on ESXI")
}
func (p *PowerflexClonner) Map(initiatorGroup string, targetLUN populator.LUN, mappingContext populator.MappingContext) (populator.LUN, error) {
klog.Infof("mapping volume %s to initiator group %s with context %v", targetLUN.Name, initiatorGroup, mappingContext)
sdc, volume, err := p.fetchSdcVolume(initiatorGroup, targetLUN, mappingContext)
if err != nil {
return targetLUN, err
}
mapParams := siotypes.MapVolumeSdcParam{
SdcID: sdc.Sdc.ID,
AllowMultipleMappings: "true",
}
err = volume.MapVolumeSdc(&mapParams)
if err != nil {
return targetLUN, fmt.Errorf("failed to map the volume id %s to sdc id %s: %w", volume.Volume.ID, sdc.Sdc.ID, err)
}
// the serial or the NAA is the {$systemID$volumeID}
targetLUN.NAA = fmt.Sprintf("eui.%s%s", sdc.Sdc.SystemID, volume.Volume.ID)
return targetLUN, nil
}
// Map implements populator.StorageApi.
func (p *PowerflexClonner) fetchSdcVolume(initatorGroup string, targetLUN populator.LUN, mappingContext populator.MappingContext) (*goscaleio.Sdc, *goscaleio.Volume, error) {
// TODO rgolan do we need an instanceID as part of the client?
// probably yes for multiple instances
system, err := p.Client.FindSystem(p.systemId, "", "")
if err != nil {
return nil, nil, err
}
sdc, err := system.FindSdc("ID", initatorGroup)
if err != nil {
return nil, nil, fmt.Errorf("failed to locate sdc by sdc guid %s", initatorGroup)
}
klog.Infof("found sdc name %s id %s", sdc.Sdc.Name, sdc.Sdc.ID)
v, err := p.Client.GetVolume("", "", "", targetLUN.Name, false)
if err != nil {
return nil, nil, err
}
if len(v) != 1 {
return nil, nil, fmt.Errorf("expected a single volume but found %d", len(v))
}
volumeService := goscaleio.NewVolume(p.Client)
volumeService.Volume = v[0]
return sdc, volumeService, nil
}
// ResolveVolumeHandleToLUN implements populator.StorageApi.
func (p *PowerflexClonner) ResolvePVToLUN(pv populator.PersistentVolume) (populator.LUN, error) {
name := pv.VolumeAttributes["Name"]
if name == "" {
return populator.LUN{},
fmt.Errorf("the PersistentVolume attribute 'Name' is empty and " +
"essential to locate the underlying volume in PowerFlex")
}
id, err := p.Client.FindVolumeID(name)
if err != nil {
return populator.LUN{}, err
}
v, err := p.Client.GetVolume("", id, "", "", false)
if err != nil {
return populator.LUN{}, nil
}
if len(v) != 1 {
return populator.LUN{}, fmt.Errorf("failed to locate a single volume by name %s", name)
}
klog.Infof("found volume %s", v[0].Name)
return populator.LUN{
Name: v[0].Name,
ProviderID: v[0].ID,
VolumeHandle: pv.VolumeHandle,
}, nil
}
func (p *PowerflexClonner) SciniRequired() bool {
return true
}
// UnMap implements populator.StorageApi.
func (p *PowerflexClonner) UnMap(initatorGroup string, targetLUN populator.LUN, mappingContext populator.MappingContext) error {
unmappingAll, _ := mappingContext["UnmapAllSdc"].(bool)
if unmappingAll {
klog.Infof("unmapping all from volume %s", targetLUN.Name)
} else {
klog.Infof("unmapping volume %s from initiator group %s", targetLUN.Name, initatorGroup)
}
sdc, volume, err := p.fetchSdcVolume(initatorGroup, targetLUN, mappingContext)
if err != nil {
return err
}
unmapParams := siotypes.UnmapVolumeSdcParam{
AllSdcs: "true",
}
if !unmappingAll {
unmapParams = siotypes.UnmapVolumeSdcParam{
SdcID: sdc.Sdc.ID,
}
}
err = volume.UnmapVolumeSdc(&unmapParams)
if err != nil {
return err
}
return nil
}
func NewPowerflexClonner(hostname, username, password string, sslSkipVerify bool, systemId string) (PowerflexClonner, error) {
if systemId == "" {
return PowerflexClonner{}, fmt.Errorf("systemId is empty. Make sure to pass systemId using the env variable %q. The value can be taken from the vxflexos-config secret under the powerflex CSI deployment", SYSTEM_ID_ENV_KEY)
}
client, err := goscaleio.NewClientWithArgs(hostname, "", 10000, sslSkipVerify, true)
if err != nil {
return PowerflexClonner{}, err
}
_, err = client.Authenticate(&goscaleio.ConfigConnect{
Endpoint: hostname,
Username: username,
Password: password,
Insecure: sslSkipVerify,
})
if err != nil {
return PowerflexClonner{}, fmt.Errorf("error authenticating: %w", err)
}
klog.Infof("successfuly logged in to ScaleIO Gateway at %s version %s", client.GetConfigConnect().Endpoint, client.GetConfigConnect().Version)
return PowerflexClonner{Client: client, systemId: systemId}, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/primera3par/par3client.go | cmd/vsphere-xcopy-volume-populator/internal/primera3par/par3client.go | package primera3par
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"net/url"
"strings"
"time"
"github.com/google/uuid"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/fcutil"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
"k8s.io/klog/v2"
)
type Primera3ParClient interface {
GetSessionKey() (string, error)
EnsureLunMapped(initiatorGroup string, targetLUN populator.LUN) (populator.LUN, error)
LunUnmap(ctx context.Context, initiatorGroupName string, lunName string) error
EnsureHostsWithIds(adapterIds []string) ([]string, error)
EnsureHostSetExists(hostSetName string) error
AddHostToHostSet(hostSetName string, hostName string) error
GetLunDetailsByVolumeName(lunName string, lun populator.LUN) (populator.LUN, error)
CurrentMappedGroups(volumeName string, mappingContext populator.MappingContext) ([]string, error)
}
type HostsResponse struct {
Total int `json:"total"`
Members []Host `json:"members"`
}
type Host struct {
ID int `json:"id"`
Name string `json:"name"`
Descriptors Descriptor `json:"descriptors"`
FCPaths []FCPath `json:"FCPaths"`
ISCSIPaths []ISCSIPath `json:"iSCSIPaths"`
Persona int `json:"persona"`
Links []Link `json:"links"`
}
type Descriptor struct {
OS string `json:"os"`
}
type FCPath struct {
WWN string `json:"wwpn"`
}
type ISCSIPath struct {
Name string `json:"name"`
IPAddr string `json:"IPAddr"`
HostSpeed int `json:"hostSpeed"`
}
type Link struct {
Href string `json:"href"`
Rel string `json:"rel"`
}
type Primera3ParClientWsImpl struct {
BaseURL string
SessionKey string
Password string
Username string
HTTPClient *http.Client
SessionStartTime time.Time
}
func NewPrimera3ParClientWsImpl(storageHostname, storageUsername, storagePassword string, skipSSLVerification bool) Primera3ParClientWsImpl {
return Primera3ParClientWsImpl{
BaseURL: storageHostname,
Password: storagePassword,
Username: storageUsername,
HTTPClient: &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: skipSSLVerification}, // Disable SSL verification
},
},
}
}
// EnsureHostsWithIds We return a list of host names that are connected to the adapters provided. If a host already exists we find it,
// if it does not, we crate a new one. When we create a new host it will always have one path, but an existing host may
// aggregate several.
func (p *Primera3ParClientWsImpl) EnsureHostsWithIds(adapterIds []string) ([]string, error) {
hostnames := make([]string, len(adapterIds))
for _, adapterId := range adapterIds {
hostName, err := p.getHostByAdapterId(adapterId)
if err != nil {
return nil, fmt.Errorf("failed to get host by adapterId: %w", err)
}
if hostName != "" {
hostnames = append(hostnames, hostName)
continue
}
hostName = uuid.New().String()
hostName = hostName[:10]
err = p.createHost(hostName, adapterId)
if err != nil {
return nil, err
}
hostnames = append(hostnames, hostName)
}
hostnames = cleanHostnames(hostnames)
return hostnames, nil
}
func cleanHostnames(hosts []string) []string {
seen := make(map[string]struct{}, len(hosts))
var out []string
for _, h := range hosts {
if h == "" {
continue
}
if _, ok := seen[h]; ok {
continue
}
seen[h] = struct{}{}
out = append(out, h)
}
return out
}
func (p *Primera3ParClientWsImpl) getHostByAdapterId(id string) (string, error) {
var rawFilter string
if strings.HasPrefix(id, "fc.") {
parts := strings.SplitN(strings.TrimPrefix(id, "fc."), ":", 2)
if len(parts) != 2 {
return "", fmt.Errorf("invalid FC adapter id %q", id)
}
wwpn := sanitizeWWN(parts[1])
rawFilter = fmt.Sprintf(`" FCPaths[wwn EQ %s] "`, wwpn)
} else if strings.HasPrefix(id, "iqn.") {
rawFilter = fmt.Sprintf(`" iSCSIPaths[name EQ %s] "`, id)
} else {
klog.Infof("host with adapterId %s not found since this adapter type is not supported", id)
return "", nil
}
esc := url.PathEscape(rawFilter)
uri := fmt.Sprintf("%s/api/v1/hosts?query=%s", p.BaseURL, esc)
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return "", fmt.Errorf("failed to create request: %w", err)
}
var respData HostsResponse
if err := p.doRequestUnmarshalResponse(req, "getHostByAdapterId", &respData); err != nil {
return "", err
}
if len(respData.Members) > 0 {
return respData.Members[0].Name, nil
}
return "", nil
}
func (p *Primera3ParClientWsImpl) hostExists(hostname string) (bool, error) {
url := fmt.Sprintf("%s/api/v1/hosts/%s", p.BaseURL, hostname)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := p.doRequest(req, "hostExists")
if err != nil {
return false, fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return true, nil
}
if resp.StatusCode == http.StatusNotFound {
return false, nil
}
body, _ := io.ReadAll(resp.Body)
return false, fmt.Errorf("unexpected response: %d, body: %s", resp.StatusCode, string(body))
}
func (p *Primera3ParClientWsImpl) createHost(hostname, adapterId string) error {
url := fmt.Sprintf("%s/api/v1/hosts", p.BaseURL)
body := map[string]interface{}{
"name": hostname,
"persona": 11,
}
if strings.HasPrefix(adapterId, "fc.") {
raw := strings.TrimPrefix(adapterId, "fc.")
parts := strings.FieldsFunc(raw, func(r rune) bool {
return r == ':' || r == '-'
})
var wwns []string
for _, p := range parts {
wwns = append(wwns, sanitizeWWN(p))
}
body["FCWWNs"] = wwns
} else {
body["iSCSINames"] = []string{adapterId}
}
jsonBody, err := json.Marshal(body)
if err != nil {
return fmt.Errorf("failed to marshal create-host body: %w", err)
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody))
if err != nil {
return fmt.Errorf("failed to create POST request: %w", err)
}
resp, err := p.doRequest(req, "createHost")
if err != nil {
return fmt.Errorf("createHost request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
b, _ := io.ReadAll(resp.Body)
return fmt.Errorf("createHost returned %d: %s", resp.StatusCode, string(b))
}
return nil
}
func sanitizeWWN(raw string) string {
return fcutil.NormalizeWWN(raw)
}
func (p *Primera3ParClientWsImpl) GetSessionKey() (string, error) {
if time.Since(p.SessionStartTime) < 3*time.Minute && p.SessionKey != "" {
return p.SessionKey, nil
}
url := fmt.Sprintf("%s/api/v1/credentials", p.BaseURL)
requestBody := map[string]string{
"user": p.Username,
"password": p.Password,
}
jsonBody, err := json.Marshal(requestBody)
if err != nil {
return "", fmt.Errorf("failed to encode JSON: %w", err)
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody))
if err != nil {
return "", fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := p.HTTPClient.Do(req)
if err != nil {
return "", fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("failed to read response: %w", err)
}
if resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden {
var errorResp struct {
Code int `json:"code"`
Desc string `json:"desc"`
}
if err := json.Unmarshal(bodyBytes, &errorResp); err == nil {
return "", fmt.Errorf("authentication failed: %s (code %d)", errorResp.Desc, errorResp.Code)
}
return "", fmt.Errorf("authentication failed with status %d: %s", resp.StatusCode, string(bodyBytes))
}
var response map[string]string
if err := json.Unmarshal(bodyBytes, &response); err != nil {
return "", fmt.Errorf("failed to parse session key response: %w", err)
}
if sessionKey, ok := response["key"]; ok {
p.SessionKey = sessionKey
p.SessionStartTime = time.Now()
klog.Info("Successfully obtained new session key")
return sessionKey, nil
}
return "", fmt.Errorf("failed to retrieve session key, response: %s", string(bodyBytes))
}
func (p *Primera3ParClientWsImpl) EnsureLunMapped(initiatorGroup string, targetLUN populator.LUN) (populator.LUN, error) {
targetLUN.IQN = initiatorGroup
hostSetName := fmt.Sprintf("set:%s", initiatorGroup)
vlun, err := p.GetVLun(targetLUN.Name, hostSetName)
if err != nil {
return populator.LUN{}, err
}
if vlun != nil {
return targetLUN, nil
}
lunID, err := p.GetFreeLunID(initiatorGroup)
if err != nil {
return populator.LUN{}, err
}
// note autoLun is on, and lun is set as well - this combination works for both primera and 3par.
// "autoLun" alone fails for 3par despite documentation, and setting lun fails for primera.
requestBody := map[string]interface{}{
"volumeName": targetLUN.Name,
"lun": lunID,
"hostname": hostSetName,
"autoLun": true,
}
jsonBody, err := json.Marshal(requestBody)
if err != nil {
return populator.LUN{}, fmt.Errorf("failed to encode JSON: %w", err)
}
url := fmt.Sprintf("%s/api/v1/vluns", p.BaseURL)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody))
if err != nil {
return populator.LUN{}, fmt.Errorf("failed to create request: %w", err)
}
resp, err := p.doRequest(req, "ensureLunMapping")
if err != nil {
return populator.LUN{}, fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
return populator.LUN{}, fmt.Errorf("failed to map LUN: status %d, resp: %v", resp.StatusCode, resp)
}
return targetLUN, nil
}
func (p *Primera3ParClientWsImpl) LunUnmap(ctx context.Context, initiatorGroupName string, lunName string) error {
lunID, err := p.GetVLunID(lunName, fmt.Sprintf("set:%s", initiatorGroupName))
if err != nil {
return fmt.Errorf("failed to get LUN ID: %w", err)
}
fields := map[string]interface{}{
"LUN": lunName,
"igroup": initiatorGroupName,
"LUN ID Used": lunID,
}
log.Printf("LunUnmap: %v", fields)
url := fmt.Sprintf("%s/api/v1/vluns/%s,%d,%s", p.BaseURL, lunName, lunID, fmt.Sprintf("set:%s", initiatorGroupName))
req, err := http.NewRequestWithContext(ctx, "DELETE", url, nil)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
p.setReqHeadersWithSessionKey(req)
resp, err := p.HTTPClient.Do(req)
if err != nil {
return fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
resp, err = p.handleUnauthorizedSessionKey(resp, req, err)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return fmt.Errorf("failed to unmap LUN: status %d", resp.StatusCode)
}
log.Printf("LunUnmap: Successfully unmapped LUN %s from %s", lunName, initiatorGroupName)
return nil
}
func (p *Primera3ParClientWsImpl) GetFreeLunID(initiatorGroupName string) (int, error) {
url := fmt.Sprintf("%s/api/v1/vluns", p.BaseURL)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return 0, fmt.Errorf("failed to create request: %w", err)
}
var response struct {
Members []struct {
LUN int `json:"lun"`
Hostname string `json:"hostname"`
} `json:"members"`
}
err = p.doRequestUnmarshalResponse(req, "getFreeLunId", &response)
if err != nil {
return 0, err
}
usedLUNs := make(map[int]bool)
for _, vlun := range response.Members {
if vlun.Hostname == initiatorGroupName {
usedLUNs[vlun.LUN] = true
}
}
for i := 1; i <= 255; i++ {
if !usedLUNs[i] {
return i, nil
}
}
return 0, fmt.Errorf("no available LUN ID found for host %s", initiatorGroupName)
}
func (p *Primera3ParClientWsImpl) GetVLunSerial(volumeName, hostName string) (string, error) {
lun, err := p.GetVLun(volumeName, hostName)
if err != nil {
return "", err
}
if lun == nil {
return "", fmt.Errorf("LUN not found for volume %s and host %s at GetVLunSerial", volumeName, hostName)
}
return lun.Serial, nil
}
type VLun struct {
VolumeName string `json:"volumeName"`
LUN int `json:"lun"`
Hostname string `json:"hostname"`
Serial string `json:"serial"`
}
func (p *Primera3ParClientWsImpl) GetVLun(volumeName, hostname string) (*VLun, error) {
url := fmt.Sprintf("%s/api/v1/vluns", p.BaseURL)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
var response struct {
Members []VLun `json:"members"`
}
err = p.doRequestUnmarshalResponse(req, "getVLun", &response)
if err != nil {
return nil, err
}
for _, vlun := range response.Members {
if vlun.VolumeName == volumeName && vlun.Hostname == hostname {
return &vlun, nil
}
}
return nil, nil
}
func (p *Primera3ParClientWsImpl) GetVLunID(lunName, initiatorGroupName string) (int, error) {
lun, err := p.GetVLun(lunName, initiatorGroupName)
if err != nil {
return 0, err
}
if lun == nil {
return 0, fmt.Errorf("LUN not found for volume %s and host %s, at GetVLunID", lunName, initiatorGroupName)
}
return lun.LUN, nil
}
func (p *Primera3ParClientWsImpl) GetLunDetailsByVolumeName(volumeName string, lun populator.LUN) (populator.LUN, error) {
cutVolName := prefixOfString(volumeName, 31)
url := fmt.Sprintf("%s/api/v1/volumes/%s", p.BaseURL, cutVolName)
reqType := "getVolume"
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return populator.LUN{}, fmt.Errorf("failed to create request: %w", err)
}
type MyResponse struct {
Id int `json:"id"`
Name string `json:"name"`
WWN string `json:"wwn"`
}
var response MyResponse
err = p.doRequestUnmarshalResponse(req, reqType, &response)
if err != nil {
return populator.LUN{}, err
}
if response.Name != "" {
lun.Name = cutVolName
lun.NAA = fmt.Sprintf("naa.%s", strings.ToLower(response.WWN))
return lun, nil
}
return populator.LUN{}, fmt.Errorf("volume not found for volume: %s", cutVolName)
}
func (p *Primera3ParClientWsImpl) CurrentMappedGroups(volumeName string, mappingContext populator.MappingContext) ([]string, error) {
type VLUN struct {
LUN int `json:"lun"`
VolumeName string `json:"volumeName"`
Hostname string `json:"hostname"`
}
type Response struct {
Members []VLUN `json:"members"`
}
var response Response
url := fmt.Sprintf("%s/api/v1/vluns", p.BaseURL)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return []string{}, fmt.Errorf("failed to create request: %w", err)
}
err = p.doRequestUnmarshalResponse(req, "GET", &response)
if err != nil {
return nil, fmt.Errorf("failed to fetch VLUNs: %w", err)
}
hostnameSet := make(map[string]struct{})
for _, vlun := range response.Members {
if vlun.VolumeName == volumeName {
hostnameSet[vlun.Hostname] = struct{}{}
}
}
hostnames := make([]string, 0, len(hostnameSet))
for hostname := range hostnameSet {
hostnames = append(hostnames, hostname)
}
return hostnames, nil
}
func (p *Primera3ParClientWsImpl) doRequest(req *http.Request, reqDescription string) (*http.Response, error) {
_, err := p.GetSessionKey()
if err != nil {
return nil, err
}
p.setReqHeadersWithSessionKey(req)
resp, err := p.HTTPClient.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed for %s: %w", reqDescription, err)
}
if resp, err = p.handleUnauthorizedSessionKey(resp, req, err); err != nil {
return nil, fmt.Errorf("failed for %s: %w", reqDescription, err)
}
return resp, nil
}
func (p *Primera3ParClientWsImpl) doRequestUnmarshalResponse(req *http.Request, reqDescription string, response interface{}) error {
_, err := p.GetSessionKey()
if err != nil {
return err
}
p.setReqHeadersWithSessionKey(req)
resp, err := p.HTTPClient.Do(req)
if err != nil {
return fmt.Errorf("request failed for %s: %w", reqDescription, err)
}
defer resp.Body.Close()
if resp, err = p.handleUnauthorizedSessionKey(resp, req, err); err != nil {
return fmt.Errorf("failed for %s: %w", reqDescription, err)
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("failed for %s: status %d, body: %s", reqDescription, resp.StatusCode, string(body))
}
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read response for %s: %w", reqDescription, err)
}
if err := json.Unmarshal(bodyBytes, response); err != nil {
return fmt.Errorf("failed to parse JSON for %s: %w", reqDescription, err)
}
return nil
}
func (p *Primera3ParClientWsImpl) handleUnauthorizedSessionKey(resp *http.Response, req *http.Request, err error) (*http.Response, error) {
if resp.StatusCode == http.StatusUnauthorized {
if _, err := p.GetSessionKey(); err != nil {
return nil, fmt.Errorf("failed to refresh session key: %w", err)
}
p.setReqHeadersWithSessionKey(req)
resp, err = p.HTTPClient.Do(req)
if err != nil {
return nil, fmt.Errorf("retry request failed: %w", err)
}
defer resp.Body.Close()
}
return resp, nil
}
func (p *Primera3ParClientWsImpl) EnsureHostSetExists(hostSetName string) error {
url := fmt.Sprintf("%s/api/v1/hostsets/%s", p.BaseURL, hostSetName)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
resp, err := p.doRequest(req, "ensureHostSetExists, find set")
if err != nil {
return fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return nil // Host set already exists
}
createURL := fmt.Sprintf("%s/api/v1/hostsets", p.BaseURL)
requestBody := map[string]interface{}{
"name": hostSetName,
}
jsonBody, err := json.Marshal(requestBody)
if err != nil {
return fmt.Errorf("failed to encode JSON: %w", err)
}
req, err = http.NewRequest("POST", createURL, bytes.NewBuffer(jsonBody))
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
respCreate, err := p.doRequest(req, "EnsuresHostSetExists")
if err != nil {
return fmt.Errorf("request failed: %w", err)
}
defer respCreate.Body.Close()
if respCreate.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(respCreate.Body)
return fmt.Errorf("failed to create host set: status %d, body: %s", resp.StatusCode, string(body))
}
return nil
}
func (p *Primera3ParClientWsImpl) setReqHeadersWithSessionKey(req *http.Request) {
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-HP3PAR-WSAPI-SessionKey", p.SessionKey)
}
func (p *Primera3ParClientWsImpl) AddHostToHostSet(hostSetName string, hostName string) error {
url := fmt.Sprintf("%s/api/v1/hostsets/%s", p.BaseURL, hostSetName)
requestBody := map[string]interface{}{
"action": 1,
"setmembers": []string{
hostName,
},
}
jsonBody, err := json.Marshal(requestBody)
if err != nil {
return fmt.Errorf("failed to encode JSON: %w", err)
}
req, err := http.NewRequest("PUT", url, bytes.NewBuffer(jsonBody))
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
resp, err := p.doRequest(req, "AddHostToHostSet")
if resp.StatusCode == http.StatusOK {
return nil
}
body, _ := io.ReadAll(resp.Body)
var apiError struct {
Code int `json:"code"`
Desc string `json:"desc"`
}
if err := json.Unmarshal(body, &apiError); err == nil {
if apiError.Code == 104 {
klog.Errorf("Host %s is already in host set %s. Ignoring duplicate.\n", hostName, hostSetName)
return nil
}
}
return fmt.Errorf("failed to add host to host set: status %d, body: %s", resp.StatusCode, string(body))
}
func prefixOfString(s string, length int) string {
runes := []rune(s)
if len(runes) > length {
return string(runes[:length])
}
return s
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/primera3par/par3_test.go | cmd/vsphere-xcopy-volume-populator/internal/primera3par/par3_test.go | package primera3par
import (
"testing"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
"github.com/stretchr/testify/assert"
)
func TestPrimera3ParClonner(t *testing.T) {
mockClient := NewMockPrimera3ParClient()
clonner := Primera3ParClonner{client: mockClient}
targetLUN := populator.LUN{
Name: "TestVolume",
IQN: "iqn.1993-08.org.debian:01:test1234",
}
initiatorGroup := "TestInitiatorGroup"
mockClient.Volumes[targetLUN.Name] = targetLUN
mockClient.Hosts["mock-host-"+targetLUN.IQN] = "esxi-1000"
t.Run("Ensure Clonner Igroup", func(t *testing.T) {
_, err := clonner.EnsureClonnerIgroup(initiatorGroup, []string{targetLUN.IQN})
assert.NoError(t, err, "Expected no error when ensuring Clonner Igroup")
_, hostExists := mockClient.Hosts["mock-host-"+targetLUN.IQN]
assert.True(t, hostExists, "Expected host to exist")
_, hostSetExists := mockClient.HostSets[initiatorGroup]
assert.True(t, hostSetExists, "Expected host set to exist")
})
t.Run("Map LUN", func(t *testing.T) {
_, err := clonner.Map(initiatorGroup, targetLUN, nil)
assert.NoError(t, err, "Expected no error when mapping LUN")
})
t.Run("Current Mapped Groups", func(t *testing.T) {
groups, err := clonner.CurrentMappedGroups(targetLUN, nil)
assert.NoError(t, err, "Expected no error when fetching mapped groups")
assert.Contains(t, groups, initiatorGroup, "Expected initiator group to be mapped")
})
t.Run("Resolve Volume Handle to LUN", func(t *testing.T) {
_, err := clonner.ResolvePVToLUN(populator.PersistentVolume{VolumeHandle: targetLUN.Name})
assert.NoError(t, err, "Expected no error when resolving LUN details")
})
t.Run("Unmap LUN", func(t *testing.T) {
err := clonner.UnMap(initiatorGroup, targetLUN, nil)
assert.NoError(t, err, "Expected no error when unmapping LUN")
})
}
func TestNewPrimera3ParClonner(t *testing.T) {
_, err := NewPrimera3ParClonner("hostname", "username", "password", false)
assert.NoError(t, err, "Expected no error when creating a new clonner")
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/primera3par/clonner.go | cmd/vsphere-xcopy-volume-populator/internal/primera3par/clonner.go | package primera3par
import (
"context"
"fmt"
"k8s.io/klog/v2"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
)
const PROVIDER_ID = "60002ac"
type Primera3ParClonner struct {
client Primera3ParClient
}
func NewPrimera3ParClonner(storageHostname, storageUsername, storagePassword string, sslSkipVerify bool) (Primera3ParClonner, error) {
clon := NewPrimera3ParClientWsImpl(storageHostname, storageUsername, storagePassword, sslSkipVerify)
return Primera3ParClonner{
client: &clon,
}, nil
}
// EnsureClonnerIgroup creates or update an initiator group with the clonnerIqn
func (c *Primera3ParClonner) EnsureClonnerIgroup(initiatorGroup string, adapterIds []string) (populator.MappingContext, error) {
hostNames, err := c.client.EnsureHostsWithIds(adapterIds)
if err != nil {
return nil, fmt.Errorf("failed to ensure host with IQN: %w", err)
}
err = c.client.EnsureHostSetExists(initiatorGroup)
if err != nil {
return nil, fmt.Errorf("failed to ensure host set: %w", err)
}
for _, hostName := range hostNames {
klog.Infof("adding host %s, to initiatorGroup: %s", hostName, initiatorGroup)
err = c.client.AddHostToHostSet(initiatorGroup, hostName)
if err != nil {
return nil, fmt.Errorf("failed to add host to host set: %w", err)
}
}
return nil, nil
}
func (p *Primera3ParClonner) GetNaaID(lun populator.LUN) populator.LUN {
return lun
}
// Map is responsible to mapping an initiator group to a LUN
func (c *Primera3ParClonner) Map(initiatorGroup string, targetLUN populator.LUN, mappingContext populator.MappingContext) (populator.LUN, error) {
return c.client.EnsureLunMapped(initiatorGroup, targetLUN)
}
// UnMap is responsible to unmapping an initiator group from a LUN
func (c *Primera3ParClonner) UnMap(initiatorGroup string, targetLUN populator.LUN, mappingContext populator.MappingContext) error {
return c.client.LunUnmap(context.TODO(), initiatorGroup, targetLUN.Name)
}
// Return initiatorGroups the LUN is mapped to
func (p *Primera3ParClonner) CurrentMappedGroups(targetLUN populator.LUN, mappingContext populator.MappingContext) ([]string, error) {
res, err := p.client.CurrentMappedGroups(targetLUN.Name, nil)
if err != nil {
return []string{}, fmt.Errorf("failed to get current mapped groups: %w", err)
}
return res, nil
}
func (c *Primera3ParClonner) ResolvePVToLUN(pv populator.PersistentVolume) (populator.LUN, error) {
lun := populator.LUN{VolumeHandle: pv.VolumeHandle}
lun, err := c.client.GetLunDetailsByVolumeName(pv.VolumeHandle, lun)
if err != nil {
return populator.LUN{}, err
}
return lun, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/primera3par/par3clientmock.go | cmd/vsphere-xcopy-volume-populator/internal/primera3par/par3clientmock.go | package primera3par
import (
"context"
"fmt"
"log"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
)
type MockPrimera3ParClient struct {
SessionKey string
Volumes map[string]populator.LUN
VLUNs map[string][]VLun
Hosts map[string]string
HostSets map[string][]string
}
func NewMockPrimera3ParClient() *MockPrimera3ParClient {
return &MockPrimera3ParClient{
SessionKey: "mock-session-key",
Volumes: make(map[string]populator.LUN),
VLUNs: make(map[string][]VLun),
Hosts: make(map[string]string),
HostSets: make(map[string][]string),
}
}
func (m *MockPrimera3ParClient) GetSessionKey() (string, error) {
log.Println("Mock: GetSessionKey called")
return m.SessionKey, nil
}
func (m *MockPrimera3ParClient) EnsureHostsWithIds(iqn []string) ([]string, error) {
for hostName, existingIQN := range m.Hosts {
if existingIQN == iqn[0] {
return []string{hostName}, nil
}
}
hostName := fmt.Sprintf("mock-host-%s", iqn)
m.Hosts[hostName] = iqn[0]
log.Printf("Mock: Created host %s with IQN %s", hostName, iqn)
return []string{hostName}, nil
}
func (m *MockPrimera3ParClient) EnsureHostSetExists(hostSetName string) error {
if _, exists := m.HostSets[hostSetName]; !exists {
m.HostSets[hostSetName] = []string{}
log.Printf("Mock: Created host set %s", hostSetName)
}
return nil
}
func (m *MockPrimera3ParClient) AddHostToHostSet(hostSetName string, hostName string) error {
if _, exists := m.HostSets[hostSetName]; !exists {
return fmt.Errorf("mock: host set %s does not exist", hostSetName)
}
for _, existingHost := range m.HostSets[hostSetName] {
if existingHost == hostName {
return nil
}
}
m.HostSets[hostSetName] = append(m.HostSets[hostSetName], hostName)
log.Printf("Mock: Added host %s to host set %s", hostName, hostSetName)
return nil
}
func (m *MockPrimera3ParClient) EnsureLunMapped(initiatorGroup string, targetLUN populator.LUN) (populator.LUN, error) {
if _, exists := m.Volumes[targetLUN.Name]; !exists {
return populator.LUN{}, fmt.Errorf("mock: volume %s does not exist", targetLUN.Name)
}
vlun := VLun{
VolumeName: targetLUN.Name,
LUN: len(m.VLUNs[initiatorGroup]) + 1,
Hostname: initiatorGroup,
}
m.VLUNs[initiatorGroup] = append(m.VLUNs[initiatorGroup], vlun)
log.Printf("Mock: EnsureLunMapped -> Volume %s mapped to initiator group %s with LUN ID %d", targetLUN.Name, initiatorGroup, vlun.LUN)
return targetLUN, nil
}
func (m *MockPrimera3ParClient) LunUnmap(ctx context.Context, initiatorGroupName string, lunName string) error {
vluns, exists := m.VLUNs[initiatorGroupName]
if !exists {
return fmt.Errorf("mock: no VLUNs found for initiator group %s", initiatorGroupName)
}
for i, vlun := range vluns {
if vlun.VolumeName == lunName {
m.VLUNs[initiatorGroupName] = append(vluns[:i], vluns[i+1:]...)
log.Printf("Mock: LunUnmap -> Volume %s unmapped from initiator group %s", lunName, initiatorGroupName)
return nil
}
}
return fmt.Errorf("mock: LUN %s not found for initiator group %s", lunName, initiatorGroupName)
}
func (m *MockPrimera3ParClient) GetLunDetailsByVolumeName(lunName string, lun populator.LUN) (populator.LUN, error) {
if volume, exists := m.Volumes[lunName]; exists {
log.Printf("Mock: GetLunDetailsByVolumeName -> Found volume %s", lunName)
return volume, nil
}
return populator.LUN{}, fmt.Errorf("mock: volume %s not found", lunName)
}
func (m *MockPrimera3ParClient) CurrentMappedGroups(volumeName string, mappingContext populator.MappingContext) ([]string, error) {
var groups []string
for group, vluns := range m.VLUNs {
for _, vlun := range vluns {
if vlun.VolumeName == volumeName {
groups = append(groups, group)
}
}
}
if len(groups) == 0 {
return nil, fmt.Errorf("mock: no mapped groups found for volume %s", volumeName)
}
log.Printf("Mock: CurrentMappedGroups -> Volume %s is mapped to groups: %v", volumeName, groups)
return groups, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/internal/flashsystem/flashsystem.go | cmd/vsphere-xcopy-volume-populator/internal/flashsystem/flashsystem.go | package flashsystem
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/fcutil"
"github.com/kubev2v/forklift/cmd/vsphere-xcopy-volume-populator/internal/populator"
"k8s.io/klog/v2"
)
// FlashSystemProviderIDPrefix is the standard NAA prefix for IBM LUNs.
const FlashSystemProviderIDPrefix = "naa.6005076"
const HostIdKey = "hostId"
const HostNameKey = "hostName"
const HostCreatedKey = "hostCreated"
type ExtractedMapping struct {
HostId string `json:"hostId"`
HostName string `json:"hostName"`
HostCreated bool `json:"hostCreated"`
IsSet bool `json:"isSet"`
}
// extractWWPNsFromFCFormat extracts individual WWPNs from fc.WWNN:WWPN format
// Uses the second part (after colon) as the real WWPN
func extractWWPNsFromFCFormat(fcStrings []string) []string {
var wwpns []string
for _, fcStr := range fcStrings {
if strings.HasPrefix(fcStr, "fc.") {
wwpn, err := fcutil.ExtractWWPN(fcStr)
if err != nil {
klog.Warningf("Failed to extract WWPN from %s: %v", fcStr, err)
continue
}
wwpns = append(wwpns, wwpn)
klog.Infof("Extracted WWPN: %s from %s", wwpn, fcStr)
}
}
return wwpns
}
// AuthResponse models the JSON response from the /auth endpoint.
type AuthResponse struct {
Token string `json:"token"`
}
type FlashSystemHost struct {
ID string `json:"id"`
Name string `json:"name"`
Status string `json:"status"`
}
type FlashSystemVolume struct {
ID string `json:"id"`
Name string `json:"name"`
VdiskUID string `json:"vdisk_UID"` // Unique Identification Number, used for NAA.
}
type FlashSystemVolumeHostMapping struct {
VDiskID string `json:"id"` // This is the VDisk ID
VDiskName string `json:"name"` // This is the VDisk name
HostID string `json:"host_id"` // This is the Host ID
HostName string `json:"host_name"` // This is the Host name
}
type HostPort struct {
HostID string `json:"host_id"`
HostName string `json:"host_name"`
WWPN string `json:"WWPN"` // API returns uppercase WWPN
IQN string `json:"iscsi_name"`
}
// FlashSystemAPIClient handles communication with the FlashSystem REST API.
type FlashSystemAPIClient struct {
ManagementIP string
httpClient *http.Client
authToken string // Session token from /auth
username string // Store for re-authentication
password string // Store for re-authentication
}
// NewFlashSystemAPIClient creates and authenticates a new API client.
func NewFlashSystemAPIClient(managementIP, username, password string, sslSkipVerify bool) (*FlashSystemAPIClient, error) {
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: sslSkipVerify},
}
httpClient := &http.Client{Transport: transport, Timeout: time.Second * 60}
client := &FlashSystemAPIClient{
ManagementIP: managementIP,
httpClient: httpClient,
username: username,
password: password,
}
// Initial authentication
if err := client.authenticate(); err != nil {
return nil, fmt.Errorf("initial authentication failed: %w", err)
}
return client, nil
}
// authenticate handles the authentication process using v1 API best practices
func (c *FlashSystemAPIClient) authenticate() error {
authURL := fmt.Sprintf("https://%s:7443/rest/v1/auth", c.ManagementIP)
// FlashSystem expects username and password via HTTP headers, not JSON body
req, err := http.NewRequest("POST", authURL, bytes.NewBuffer([]byte{}))
if err != nil {
return fmt.Errorf("failed to create auth request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
req.Header.Set("X-Auth-Username", c.username)
req.Header.Set("X-Auth-Password", c.password)
klog.Infof("Attempting to authenticate with FlashSystem at %s for user %s", c.ManagementIP, c.username)
resp, err := c.httpClient.Do(req)
if err != nil {
return fmt.Errorf("failed to send auth request to FlashSystem: %w", err)
}
defer resp.Body.Close()
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read auth response body: %w", err)
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("FlashSystem authentication failed. Status: %s, Body: %s", resp.Status, string(bodyBytes))
}
var authResp AuthResponse
if err := json.Unmarshal(bodyBytes, &authResp); err != nil {
return fmt.Errorf("failed to unmarshal auth token response: %w. Body: %s", err, string(bodyBytes))
}
if authResp.Token == "" {
return fmt.Errorf("FlashSystem authentication successful but no token found in response")
}
c.authToken = authResp.Token
klog.Infof("Successfully authenticated with FlashSystem and obtained session token.")
return nil
}
// makeRequest is a helper to make authenticated HTTP requests with automatic token refresh.
func (c *FlashSystemAPIClient) makeRequest(method, path string, payload interface{}) ([]byte, int, error) {
// Try the request first, and handle 403 (token expiry) by re-authenticating
respBodyBytes, statusCode, err := c.doRequest(method, path, payload)
// Handle 403 Forbidden - token expired, re-authenticate and retry once
if statusCode == http.StatusForbidden {
klog.Infof("Received 403 Forbidden, token likely expired. Re-authenticating...")
if authErr := c.authenticate(); authErr != nil {
return nil, statusCode, fmt.Errorf("re-authentication failed: %w", authErr)
}
// Retry the request with new token
return c.doRequest(method, path, payload)
}
return respBodyBytes, statusCode, err
}
// doRequest performs the actual HTTP request
func (c *FlashSystemAPIClient) doRequest(method, path string, payload interface{}) ([]byte, int, error) {
fullURL := fmt.Sprintf("https://%s:7443/rest/v1%s", c.ManagementIP, path)
klog.Infof("FlashSystem API Request: %s %s", method, fullURL)
var reqBody *bytes.Buffer
if payload != nil {
jsonPayload, err := json.Marshal(payload)
if err != nil {
return nil, 0, fmt.Errorf("failed to marshal payload for %s: %w", fullURL, err)
}
reqBody = bytes.NewBuffer(jsonPayload)
klog.Infof("Request Payload JSON: %s", string(jsonPayload))
} else {
reqBody = bytes.NewBuffer([]byte{})
}
req, err := http.NewRequest(method, fullURL, reqBody)
if err != nil {
return nil, 0, fmt.Errorf("failed to create request for %s: %w", fullURL, err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
req.Header.Set("X-Auth-Token", c.authToken)
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, 0, fmt.Errorf("request to %s failed: %w", fullURL, err)
}
defer resp.Body.Close()
respBodyBytes, readErr := io.ReadAll(resp.Body)
if readErr != nil {
return nil, resp.StatusCode, fmt.Errorf("failed to read response body from %s: %w", fullURL, readErr)
}
klog.Infof("Response Status: %s, Body: %s", resp.Status, string(respBodyBytes))
// Enhanced error handling based on IBM Storage Virtualize API status codes
if resp.StatusCode >= 400 {
return respBodyBytes, resp.StatusCode, c.handleAPIError(resp.StatusCode, string(respBodyBytes), fullURL)
}
return respBodyBytes, resp.StatusCode, nil
}
// handleAPIError provides enhanced error handling for different HTTP status codes
func (c *FlashSystemAPIClient) handleAPIError(statusCode int, body, url string) error {
switch statusCode {
case http.StatusBadRequest: // 400
return fmt.Errorf("bad request to %s: %s", url, body)
case http.StatusUnauthorized: // 401
return fmt.Errorf("unauthorized request to %s - check credentials: %s", url, body)
case http.StatusForbidden: // 403
return fmt.Errorf("forbidden request to %s - token expired or insufficient permissions: %s", url, body)
case http.StatusNotFound: // 404
return fmt.Errorf("resource not found at %s: %s", url, body)
case http.StatusConflict: // 409
return fmt.Errorf("conflict at %s - resource may already exist: %s", url, body)
case http.StatusTooManyRequests: // 429
return fmt.Errorf("too many requests to %s - rate limited: %s", url, body)
case http.StatusInternalServerError: // 500
return fmt.Errorf("internal server error at %s: %s", url, body)
case http.StatusBadGateway: // 502
return fmt.Errorf("bad gateway error at %s: %s", url, body)
default:
return fmt.Errorf("HTTP %d error at %s: %s", statusCode, url, body)
}
}
// FlashSystemClonner implements the populator.StorageApi interface.
type FlashSystemClonner struct {
api *FlashSystemAPIClient
}
// NewFlashSystemClonner creates a new FlashSystemClonner.
func NewFlashSystemClonner(managementIP, username, password string, sslSkipVerify bool) (FlashSystemClonner, error) {
client, err := NewFlashSystemAPIClient(managementIP, username, password, sslSkipVerify)
if err != nil {
return FlashSystemClonner{}, fmt.Errorf("failed to create FlashSystem API client: %w", err)
}
return FlashSystemClonner{api: client}, nil
}
// EnsureSingleHost creates or finds a single host with the given identifiers.
func (c *FlashSystemClonner) EnsureClonnerIgroup(hostName string, clonnerIdentifiers []string) (populator.MappingContext, error) {
klog.Infof("Ensuring single host '%s' exists with identifiers: %v", hostName, clonnerIdentifiers)
ctx := make(populator.MappingContext)
if len(clonnerIdentifiers) == 0 {
return nil, fmt.Errorf("no identifiers provided")
}
// Step 1: Categorize identifiers - separate FC WWPNs from iSCSI IQNs
var fcWWPNs []string
var iscsiIQNs []string
for _, identifier := range clonnerIdentifiers {
if strings.HasPrefix(identifier, "fc.") {
// It's a FC WWPN - extract and get the first half (non-virtual part)
wwpns := extractWWPNsFromFCFormat([]string{identifier})
fcWWPNs = append(fcWWPNs, wwpns...)
} else {
// Assume it's an iSCSI IQN
iscsiIQNs = append(iscsiIQNs, identifier)
}
}
klog.Infof("Categorized identifiers - FC WWPNs: %v, iSCSI IQNs: %v", fcWWPNs, iscsiIQNs)
// Step 2: Check for existing hosts with any of these identifiers
var existingHostName string
var err error
// Check FC WWPNs first (call once with all WWPNs)
if len(fcWWPNs) > 0 {
existingFCHosts, err := c.findAllHostsByWWPNs(fcWWPNs)
if err != nil {
klog.Warningf("Error searching for existing FC hosts: %v", err)
} else if len(existingFCHosts) > 0 {
existingHostName = existingFCHosts[0]
klog.Infof("Found existing host '%s' with FC WWPNs %v", existingHostName, fcWWPNs)
}
}
// Check iSCSI IQNs if no FC host found
if existingHostName == "" && len(iscsiIQNs) > 0 {
existingISCSIHosts, err := c.findAllHostsByIQNs(iscsiIQNs)
if err != nil {
klog.Warningf("Error searching for existing iSCSI hosts: %v", err)
} else if len(existingISCSIHosts) > 0 {
existingHostName = existingISCSIHosts[0]
klog.Infof("Found existing host '%s' with iSCSI IQNs %v", existingHostName, iscsiIQNs)
}
}
// Step 3: If existing host found, use it
if existingHostName != "" {
// Get the host details to find its ID
hostDetails, err := c.getHostDetailsByName(existingHostName)
if err != nil {
return nil, fmt.Errorf("failed to get details for existing host '%s': %w", existingHostName, err)
}
// Store the actual FlashSystem host name for use in Map/UnMap operations
// Note: created_host is not set (defaults to false) since we're using an existing host
ctx[HostNameKey] = hostDetails.Name
ctx[HostIdKey] = hostDetails.ID
klog.Infof("Using existing host '%s' with ID '%s'", hostDetails.Name, hostDetails.ID)
return ctx, nil
}
// Step 4: No existing host found, create new host
// Prioritize FC over iSCSI as per user requirements
var newHostName string
if len(fcWWPNs) > 0 {
// Create FC host
klog.Infof("Creating new FC host '%s' with WWPNs: %v", hostName, fcWWPNs)
newHostName, err = c.createNewHost(hostName, fcWWPNs, true)
if err != nil {
return nil, fmt.Errorf("failed to create FC host: %w", err)
}
} else if len(iscsiIQNs) > 0 {
// Create iSCSI host (only if no FC WWPNs exist)
klog.Infof("Creating new iSCSI host '%s' with IQNs: %v", hostName, iscsiIQNs)
newHostName, err = c.createNewHost(hostName, iscsiIQNs, false)
if err != nil {
return nil, fmt.Errorf("failed to create iSCSI host: %w", err)
}
} else {
return nil, fmt.Errorf("no valid FC WWPNs or iSCSI IQNs found in identifiers: %v", clonnerIdentifiers)
}
// Get the details of the newly created host to verify creation and for logging
hostDetails, err := c.getHostDetailsByName(newHostName)
if err != nil {
return nil, fmt.Errorf("failed to get details for newly created host '%s': %w", newHostName, err)
}
// Store the actual FlashSystem host name for use in Map/UnMap operations
// Mark that we created this host so it can be cleaned up in UnMap if it becomes empty
ctx[HostNameKey] = hostDetails.Name
ctx[HostIdKey] = hostDetails.ID
ctx[HostCreatedKey] = true
klog.Infof("Successfully created new host '%s' with ID '%s'", hostDetails.Name, hostDetails.ID)
return ctx, nil
}
// createNewHost creates a new host without checking for existing ones (used when we've already determined none exist)
func (c *FlashSystemClonner) createNewHost(hostName string, identifiers []string, isFibreChannel bool) (string, error) {
// Check if our desired host name exists and adjust if needed
filterPayload := map[string]string{
"filtervalue": fmt.Sprintf("name=%s", hostName),
}
hostBytes, hostStatus, hostErr := c.api.makeRequest("POST", "/lshost", filterPayload)
if hostErr != nil {
return "", fmt.Errorf("failed to query host by name: %w", hostErr)
}
if hostStatus == http.StatusOK {
var existingHosts []FlashSystemHost
if err := json.Unmarshal(hostBytes, &existingHosts); err == nil && len(existingHosts) > 0 {
// Generate a unique name by appending a suffix
hostName = fmt.Sprintf("%s-%d", hostName, time.Now().Unix())
klog.Infof("Host name conflict, using alternative name: %s", hostName)
}
}
// Create new host with unique WWPNs and name
klog.Infof("Creating NEW host '%s' with identifiers: %v (FC: %t)", hostName, identifiers, isFibreChannel)
createPayload := map[string]interface{}{
"name": hostName,
}
if isFibreChannel {
// Fibre Channel host
wwpnString := strings.Join(identifiers, ":")
createPayload["fcwwpn"] = wwpnString
createPayload["force"] = true
createPayload["protocol"] = "fcscsi"
createPayload["type"] = "generic"
klog.Infof("Creating FC host '%s' with WWPNs: %s", hostName, wwpnString)
} else {
// iSCSI host
iqnString := strings.Join(identifiers, ",")
createPayload["iscsiname"] = iqnString
createPayload["protocol"] = "iscsi"
createPayload["type"] = "generic"
klog.Infof("Creating iSCSI host '%s' with IQNs: %s", hostName, iqnString)
}
// Log the exact payload for debugging
if payloadJSON, err := json.MarshalIndent(createPayload, "", " "); err == nil {
klog.Infof("FlashSystem mkhost API request payload: %s", string(payloadJSON))
}
// Make the mkhost API call
respBytes, respStatus, err := c.api.makeRequest("POST", "/mkhost", createPayload)
if err != nil {
return "", fmt.Errorf("failed to create host: %w", err)
}
if respStatus != http.StatusOK && respStatus != http.StatusCreated {
return "", fmt.Errorf("failed to create host: status %d, body: %s", respStatus, string(respBytes))
}
klog.Infof("Successfully created host '%s'", hostName)
return hostName, nil
}
// Map maps a VDisk to a Host using mkvolumehostmap API.
func (c *FlashSystemClonner) Map(initiatorGroup string, targetLUN populator.LUN, context populator.MappingContext) (populator.LUN, error) {
mapping := extractFromContext(context)
if !mapping.IsSet {
klog.Infof("No mapping context provided, skipping map operation for LUN '%s' to '%s' (assuming already correctly mapped)", targetLUN.Name, initiatorGroup)
return targetLUN, nil
}
hostID := mapping.HostId
vdiskID := targetLUN.Name // The LUN.Name field should hold the VDisk ID for this provider.
klog.Infof("Mapping LUN (VDisk ID '%s') to Host '%s' (Host ID '%s')", vdiskID, mapping.HostName, hostID)
// Create the mapping using mkvdiskhostmap API endpoint
mapPayload := map[string]interface{}{
"host": hostID,
}
endpoint := fmt.Sprintf("/mkvdiskhostmap/%s", vdiskID)
mapBody, mapStatus, mapErr := c.api.makeRequest("POST", endpoint, mapPayload)
// Handle the specific case where mapping already exists to same host
if mapErr != nil && mapStatus == http.StatusConflict && strings.Contains(mapErr.Error(), "CMMVC5878E") {
klog.Infof("VDisk '%s' is already mapped to Host '%s', continuing...", vdiskID, hostID)
} else if mapErr != nil && mapStatus == http.StatusConflict && strings.Contains(mapErr.Error(), "CMMVC9375E") {
// Volume is already mapped to a different host (CMMVC9375E)
// Use lshostvdiskmap to find the next available SCSI ID for the target host
// Then map with explicit SCSI ID, which allows multi-host mapping
klog.Warningf("VDisk '%s' is already mapped to another host. Finding next available SCSI ID for multi-host mapping.", vdiskID)
// Query lshostvdiskmap for the target host to get all current SCSI IDs
hostMapEndpoint := fmt.Sprintf("/lshostvdiskmap/%s", hostID)
hostMapBody, hostMapStatus, hostMapErr := c.api.makeRequest("POST", hostMapEndpoint, map[string]string{})
if hostMapErr != nil || hostMapStatus != http.StatusOK {
return populator.LUN{}, fmt.Errorf("failed to query host volume mappings: %w", hostMapErr)
}
var hostMappings []struct {
SCSIID string `json:"SCSI_id"`
}
if unmarshalErr := json.Unmarshal(hostMapBody, &hostMappings); unmarshalErr != nil {
return populator.LUN{}, fmt.Errorf("failed to parse host volume mappings: %w", unmarshalErr)
}
// Find the maximum SCSI ID currently in use
maxSCSIID := -1
for _, mapping := range hostMappings {
scsiID := 0
if _, err := fmt.Sscanf(mapping.SCSIID, "%d", &scsiID); err == nil {
if scsiID > maxSCSIID {
maxSCSIID = scsiID
}
}
}
// Use the next available SCSI ID
nextSCSIID := maxSCSIID + 1
klog.Infof("Found max SCSI ID %d on host '%s', using next available: %d", maxSCSIID, initiatorGroup, nextSCSIID)
// Retry mapping with explicit SCSI ID for multi-host assignment
// Note: REST API may allow multi-host mapping when explicit SCSI ID is provided
scsiPayload := map[string]interface{}{
"host": hostID,
"scsi": nextSCSIID,
"force": true,
}
mapBody, mapStatus, mapErr = c.api.makeRequest("POST", endpoint, scsiPayload)
if mapErr != nil {
return populator.LUN{}, fmt.Errorf("failed to create host mapping with SCSI ID %d: %w", nextSCSIID, mapErr)
} else if mapStatus != http.StatusOK && mapStatus != http.StatusCreated {
return populator.LUN{}, fmt.Errorf("failed to map VDisk '%s' to Host '%s' with SCSI ID %d, status: %d, body: %s", vdiskID, hostID, nextSCSIID, mapStatus, string(mapBody))
}
klog.Infof("Successfully mapped VDisk '%s' to Host '%s' with SCSI ID %d for multi-host access", vdiskID, hostID, nextSCSIID)
} else if mapErr != nil {
return populator.LUN{}, fmt.Errorf("failed to create host mapping: %w", mapErr)
} else if mapStatus != http.StatusOK && mapStatus != http.StatusCreated {
return populator.LUN{}, fmt.Errorf("failed to map VDisk '%s' to Host '%s', status: %d, body: %s", vdiskID, hostID, mapStatus, string(mapBody))
}
klog.Infof("Successfully created mapping of VDisk '%s' to Host '%s'.", vdiskID, hostID)
return targetLUN, nil
}
func extractFromContext(context populator.MappingContext) ExtractedMapping {
if context == nil {
return ExtractedMapping{}
}
result := ExtractedMapping{}
// Extract HostId
if hostId, ok := context[HostIdKey].(string); ok {
result.HostId = hostId
}
// Extract HostName
if hostName, ok := context[HostNameKey].(string); ok {
result.HostName = hostName
}
// Extract HostCreated
if hostCreated, ok := context[HostCreatedKey].(bool); ok {
result.HostCreated = hostCreated
}
// Set IsSet to true if we have at least a HostId or HostName
result.IsSet = result.HostId != "" || result.HostName != ""
return result
}
// UnMap removes a VDisk mapping from a Host.
func (c *FlashSystemClonner) UnMap(initiatorGroup string, targetLUN populator.LUN, context populator.MappingContext) error {
mapping := extractFromContext(context)
if !mapping.IsSet {
klog.Infof("mapping context is empty, skipping unmap")
return nil
}
hostID := mapping.HostId
vdiskID := targetLUN.Name // VDisk ID from the LUN object.
klog.Infof("Unmapping LUN (VDisk ID '%s') from Host '%s' (Host ID '%s')", vdiskID, mapping.HostName, hostID)
// Use v1 API endpoint for removing host mapping
payload := map[string]string{
"host": hostID,
}
endpoint := fmt.Sprintf("/rmvdiskhostmap/%s", vdiskID)
unmapBody, unmapStatus, unmapErr := c.api.makeRequest("POST", endpoint, payload)
if unmapErr != nil {
return fmt.Errorf("failed to unmap VDisk from host: %w", unmapErr)
}
// 200 OK or 204 No Content are typical for success.
if unmapStatus != http.StatusOK && unmapStatus != http.StatusNoContent {
// It's possible the API returns 404 if mapping doesn't exist, which is idempotent.
if unmapStatus == http.StatusNotFound {
klog.Infof("Mapping for VDisk '%s' to Host '%s' did not exist.", vdiskID, hostID)
return nil
}
return fmt.Errorf("failed to unmap VDisk '%s' from Host '%s', status: %d, body: %s", vdiskID, hostID, unmapStatus, string(unmapBody))
}
klog.Infof("Successfully unmapped LUN (VDisk ID '%s') from Host '%s'", vdiskID, hostID)
// Clean up the host if we created it and if it has no more mappings
if mapping.HostCreated {
c.cleanupEmptyHost(hostID)
}
// Clear the context to signal that no remapping is needed
for key := range context {
delete(context, key)
}
klog.Infof("Cleared mapping context after successful unmap")
return nil
}
// cleanupEmptyHost removes a host if it has no mappings (safe cleanup)
func (c *FlashSystemClonner) cleanupEmptyHost(hostID string) {
// Check if host has any remaining mappings
filterPayload := map[string]string{
"filtervalue": fmt.Sprintf("host_id=%s", hostID),
}
mappingsBytes, status, err := c.api.makeRequest("POST", "/lshostvdiskmap", filterPayload)
if err != nil {
klog.Warningf("Failed to check host mappings before cleanup: %v", err)
return
}
if status == http.StatusOK {
var mappings []FlashSystemVolumeHostMapping
if err := json.Unmarshal(mappingsBytes, &mappings); err == nil && len(mappings) > 0 {
klog.Infof("Host id '%s' still has %d mappings, not cleaning up", hostID, len(mappings))
return
}
}
// Host has no mappings, safe to remove it
klog.Infof("Cleaning up empty host ID: %s)", hostID)
rmPayload := map[string]string{
"host": hostID,
}
rmBody, rmStatus, rmErr := c.api.makeRequest("POST", "/rmhost", rmPayload)
if rmErr != nil {
klog.Warningf("Failed to cleanup host: %v", rmErr)
return
}
if rmStatus != http.StatusOK && rmStatus != http.StatusNoContent {
klog.Warningf("Failed to cleanup host id '%s', status: %d, body: %s", hostID, rmStatus, string(rmBody))
return
}
klog.Infof("Successfully cleaned up empty host id '%s'", hostID)
}
// CurrentMappedGroups returns the host names a VDisk is mapped to.
func (c *FlashSystemClonner) CurrentMappedGroups(targetLUN populator.LUN, context populator.MappingContext) ([]string, error) {
vdiskID := targetLUN.Name // VDisk ID is stored in the Name field.
klog.Infof("Getting current mapped groups for LUN (VDisk ID '%s')", vdiskID)
groupSet := make(map[string]bool)
uniqueGroups := []string{}
// Check host mappings using lsvdiskhostmap with vdisk_id in the URL path
endpoint := fmt.Sprintf("/lsvdiskhostmap/%s", vdiskID)
hostBodyBytes, hostStatus, hostErr := c.api.makeRequest("POST", endpoint, map[string]string{})
if hostErr != nil {
return nil, fmt.Errorf("failed to get host mappings: %w", hostErr)
}
if hostStatus != http.StatusOK {
return nil, fmt.Errorf("failed to get host mappings, status: %d, body: %s", hostStatus, string(hostBodyBytes))
}
var hostMappings []FlashSystemVolumeHostMapping
if err := json.Unmarshal(hostBodyBytes, &hostMappings); err != nil {
return nil, fmt.Errorf("failed to unmarshal host mappings: %w. Body: %s", err, string(hostBodyBytes))
}
for _, m := range hostMappings {
if !groupSet[m.HostName] {
groupSet[m.HostName] = true
uniqueGroups = append(uniqueGroups, m.HostName)
klog.Infof("Found host mapping: %s", m.HostName)
}
}
klog.Infof("LUN (VDisk ID '%s') is mapped to host groups: %v", vdiskID, uniqueGroups)
return uniqueGroups, nil
}
// createLUNFromVDisk creates a LUN object from a FlashSystemVolume
func (c *FlashSystemClonner) createLUNFromVDisk(vdiskDetails FlashSystemVolume, volumeHandle string) (populator.LUN, error) {
vdiskUID := strings.ToLower(vdiskDetails.VdiskUID)
if vdiskUID == "" {
return populator.LUN{}, fmt.Errorf("resolved volume '%s' has an empty UID", vdiskDetails.Name)
}
// FlashSystem vdiskUID already contains the full NAA identifier including the IBM vendor prefix
naaDeviceID := "naa." + vdiskUID
lun := populator.LUN{
Name: vdiskDetails.ID,
VolumeHandle: vdiskDetails.Name,
SerialNumber: vdiskDetails.VdiskUID,
NAA: naaDeviceID,
}
klog.Infof("Resolved volume handle '%s' to LUN: Name(ID)=%s, SN(UID)=%s, NAA=%s, VDisk Name=%s",
volumeHandle, lun.Name, lun.SerialNumber, lun.NAA, vdiskDetails.Name)
return lun, nil
}
// ResolvePVToLUN resolves a PersistentVolume to a LUN by finding a volume with matching vdisk_UID.
func (c *FlashSystemClonner) ResolvePVToLUN(pv populator.PersistentVolume) (populator.LUN, error) {
klog.Infof("Resolving PersistentVolume '%s' to LUN details", pv.Name)
// Parse PV VolumeHandle to extract the vdisk_UID
// Expected format: 'SVC:5;600507681088804CB800000000001074'
pvHandleSplit := strings.Split(pv.VolumeHandle, ";")
if len(pvHandleSplit) != 2 {
return populator.LUN{}, fmt.Errorf("failed to parse vdisk handle '%s', it is not of the expected format: 'SVC:5;600507681088804CB800000000001074'", pv.VolumeHandle)
}
pvUID := pvHandleSplit[1] // Keep original case for the filter
// Use lsvdisk with filter on vdisk_UID attribute to get the specific volume
filterPayload := map[string]string{
"filtervalue": fmt.Sprintf("vdisk_UID=%s", pvUID),
}
klog.Infof("Querying vdisk with vdisk_UID filter: %s", pvUID)
vdisksBytes, vdisksStatus, vdisksErr := c.api.makeRequest("POST", "/lsvdisk", filterPayload)
if vdisksErr != nil {
return populator.LUN{}, fmt.Errorf("failed to get vdisk with UID %s: %w", pvUID, vdisksErr)
}
if vdisksStatus != http.StatusOK {
return populator.LUN{}, fmt.Errorf("failed to get vdisk with UID %s, status: %d, body: %s", pvUID, vdisksStatus, string(vdisksBytes))
}
var vdisks []FlashSystemVolume
if err := json.Unmarshal(vdisksBytes, &vdisks); err != nil {
return populator.LUN{}, fmt.Errorf("failed to unmarshal vdisks response: %w. Body: %s", err, string(vdisksBytes))
}
if len(vdisks) == 0 {
return populator.LUN{}, fmt.Errorf("volume with vdisk_UID '%s' not found", pvUID)
}
if len(vdisks) > 1 {
return populator.LUN{}, fmt.Errorf("found %d volumes with vdisk_UID '%s', expected exactly one (UIDs must be unique)", len(vdisks), pvUID)
}
vdisk := vdisks[0]
klog.Infof("Found matching volume: '%s' (ID: %s) for PV '%s'", vdisk.Name, vdisk.ID, pv.Name)
return c.createLUNFromVDisk(vdisk, pv.VolumeHandle)
}
// getHostPorts gets host ports directly from the API
func (c *FlashSystemClonner) getHostPorts() ([]HostPort, error) {
klog.Infof("Fetching host ports using lshostports")
hostPortBytes, status, err := c.api.makeRequest("POST", "/lshostports", map[string]string{})
if err != nil || status != http.StatusOK {
return nil, fmt.Errorf("failed to list host ports: %w, status: %d", err, status)
}
// Parse response as host ports
var hostPorts []HostPort
if err := json.Unmarshal(hostPortBytes, &hostPorts); err != nil {
return nil, fmt.Errorf("failed to unmarshal host ports: %w", err)
}
klog.Infof("Retrieved %d host ports from lshostports", len(hostPorts))
return hostPorts, nil
}
// findAllHostsByIdentifiers searches for hosts using host port discovery - unified function for both WWPNs and IQNs
func (c *FlashSystemClonner) findAllHostsByIdentifiers(identifiers []string, identifierType string) ([]string, error) {
if len(identifiers) == 0 {
return nil, nil
}
klog.Infof("Searching for hosts with %s using host port discovery: %v", identifierType, identifiers)
// Get host ports
hostPorts, err := c.getHostPorts()
if err != nil {
return nil, err
}
foundHosts := make(map[string]bool) // Use map to avoid duplicates
var hostNames []string
// Normalize identifiers for comparison (just lowercase)
normalizedIdentifiers := make(map[string]string) // normalized -> original mapping
for _, identifier := range identifiers {
normalized := strings.ToLower(identifier)
normalizedIdentifiers[normalized] = identifier
klog.V(4).Infof("Normalized %s: %s -> %s", identifierType, identifier, normalized)
}
// Search through host ports for matching identifiers
for _, port := range hostPorts {
// Both WWPNs and IQNs can be in the WWPN field according to user, but also check IQN field for completeness
fieldsToCheck := []string{port.WWPN, port.IQN}
for _, fieldValue := range fieldsToCheck {
if fieldValue != "" {
normalizedFieldValue := strings.ToLower(fieldValue)
klog.V(4).Infof("Checking field value: %s (normalized: %s) for host: %s", fieldValue, normalizedFieldValue, port.HostName)
// Check if normalized value matches any of our target identifiers
if originalIdentifier, exists := normalizedIdentifiers[normalizedFieldValue]; exists {
if !foundHosts[port.HostName] {
klog.Infof("Found host '%s' for %s %s (port field value: %s) via host port", port.HostName, identifierType, originalIdentifier, fieldValue)
foundHosts[port.HostName] = true
hostNames = append(hostNames, port.HostName)
}
}
}
}
}
klog.Infof("Found %d existing hosts via host port discovery: %v", len(hostNames), hostNames)
return hostNames, nil
}
// findAllHostsByWWPNs searches for hosts with WWPNs using the unified function
func (c *FlashSystemClonner) findAllHostsByWWPNs(wwpns []string) ([]string, error) {
return c.findAllHostsByIdentifiers(wwpns, "WWPNs")
}
// findAllHostsByIQNs searches for hosts with IQNs using the unified function
func (c *FlashSystemClonner) findAllHostsByIQNs(iqns []string) ([]string, error) {
return c.findAllHostsByIdentifiers(iqns, "IQNs")
}
// getHostDetailsByName gets detailed information about a specific host and returns FlashSystemHost
func (c *FlashSystemClonner) getHostDetailsByName(hostName string) (*FlashSystemHost, error) {
filterPayload := map[string]string{
"filtervalue": fmt.Sprintf("name=%s", hostName),
}
hostBytes, status, err := c.api.makeRequest("POST", "/lshost", filterPayload)
if err != nil || status != http.StatusOK {
return nil, fmt.Errorf("failed to get host details for %s: %w, status: %d", hostName, err, status)
}
var hosts []FlashSystemHost
if err := json.Unmarshal(hostBytes, &hosts); err != nil {
return nil, fmt.Errorf("failed to unmarshal host details: %w", err)
}
if len(hosts) == 0 {
return nil, fmt.Errorf("host %s not found", hostName)
}
return &hosts[0], nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/vsphere-xcopy-volume-populator/vmkfstools-wrapper/script.go | cmd/vsphere-xcopy-volume-populator/vmkfstools-wrapper/script.go | package vmkfstoolswrapper
import _ "embed"
//go:embed vmkfstools_wrapper.py
var Script []byte
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-provider-server/main.go | cmd/ova-provider-server/main.go | package main
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/kubev2v/forklift/cmd/ova-provider-server/api"
"github.com/kubev2v/forklift/cmd/ova-provider-server/auth"
"github.com/kubev2v/forklift/cmd/ova-provider-server/settings"
"github.com/kubev2v/forklift/pkg/lib/logging"
)
var Settings = &settings.Settings
var log = logging.WithName("ova|main")
func main() {
var err error
defer func() {
if err != nil {
log.Error(err, "router returned error")
}
}()
err = Settings.Load()
if err != nil {
log.Error(err, "failed to load settings")
panic(err)
}
log.Info("Started", "settings", Settings)
router := gin.Default()
router.Use(api.ErrorHandler())
inventory := api.InventoryHandler{}
inventory.AddRoutes(router)
if Settings.ApplianceEndpoints {
appliances := api.ApplianceHandler{
OVAStoragePath: Settings.CatalogPath,
AuthRequired: Settings.Auth.Required,
Auth: auth.NewProviderAuth(
Settings.Provider.Namespace,
Settings.Provider.Name,
Settings.Provider.Verb,
Settings.Auth.TTL),
}
appliances.AddRoutes(router)
}
err = router.Run(fmt.Sprintf(":%s", Settings.Port))
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-provider-server/auth/auth.go | cmd/ova-provider-server/auth/auth.go | package auth
import (
"context"
"strings"
"sync"
"time"
"github.com/gin-gonic/gin"
"github.com/kubev2v/forklift/pkg/lib/logging"
authz "k8s.io/api/authorization/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
)
const (
ForkliftGroup = "forklift.konveyor.io"
ProviderResource = "providers"
)
var log = logging.WithName("auth")
func NewProviderAuth(namespace string, name string, verb string, ttl int) *ProviderAuth {
a := &ProviderAuth{
TTL: time.Duration(ttl) * time.Second,
Namespace: namespace,
Name: name,
Verb: verb,
cache: make(map[string]time.Time),
}
return a
}
// ProviderAuth uses a SelfSubjectAccessReview
// to perform user auth related to one specific Provider CR.
type ProviderAuth struct {
TTL time.Duration
mutex sync.Mutex
cache map[string]time.Time
Verb string
Namespace string
Name string
}
// Permit determines if the request should be permitted by
// checking that the user (identified by bearer token) has
// permissions on the specified Provider CR.
func (r *ProviderAuth) Permit(ctx *gin.Context) (allowed bool) {
r.mutex.Lock()
defer r.mutex.Unlock()
r.prune()
if r.Name == "" || r.Namespace == "" {
return
}
token, ok := r.token(ctx)
if !ok {
return
}
if t, found := r.cache[token]; found {
if time.Since(t) <= r.TTL {
allowed = true
return
}
}
allowed, err := r.permit(token)
if err != nil {
log.Error(err, "Authorization failed.")
return
}
if allowed {
r.cache[token] = time.Now()
} else {
delete(r.cache, token)
}
return
}
// Perform an SSAR to determine if the user has access to this provider.
func (r *ProviderAuth) permit(token string) (allowed bool, err error) {
client, err := r.client(token)
if err != nil {
return
}
review := &authz.SelfSubjectAccessReview{
Spec: authz.SelfSubjectAccessReviewSpec{
ResourceAttributes: &authz.ResourceAttributes{
Group: ForkliftGroup,
Resource: ProviderResource,
Verb: r.Verb,
Namespace: r.Namespace,
Name: r.Name,
},
},
}
err = client.Create(context.TODO(), review)
if err != nil {
return
}
allowed = review.Status.Allowed
return
}
// Extract token from auth header.
func (r *ProviderAuth) token(ctx *gin.Context) (token string, ok bool) {
header := ctx.GetHeader("Authorization")
fields := strings.Fields(header)
if len(fields) == 2 && fields[0] == "Bearer" {
token = fields[1]
ok = true
}
return
}
// Prune the cache.
// Evacuate expired tokens.
func (r *ProviderAuth) prune() {
for token, t := range r.cache {
if time.Since(t) > r.TTL {
delete(r.cache, token)
}
}
}
// Build API client with user token.
func (r *ProviderAuth) client(token string) (client k8sclient.Client, err error) {
var cfg *rest.Config
cfg, err = config.GetConfig()
if err != nil {
return
}
cfg.BearerTokenFile = ""
cfg.BearerToken = token
client, err = k8sclient.New(
cfg,
k8sclient.Options{
Scheme: scheme.Scheme,
})
return
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-provider-server/settings/settings.go | cmd/ova-provider-server/settings/settings.go | package settings
import (
"os"
"strconv"
)
// Environment variables.
const (
EnvApplianceEndpoints = "APPLIANCE_ENDPOINTS"
EnvAuthRequired = "AUTH_REQUIRED"
EnvCatalogPath = "CATALOG_PATH"
EnvPort = "PORT"
EnvProviderNamespace = "PROVIDER_NAMESPACE"
EnvProviderName = "PROVIDER_NAME"
EnvProviderVerb = "PROVIDER_VERB"
EnvTokenCacheTTL = "TOKEN_CACHE_TTL"
)
var Settings OVASettings
type OVASettings struct {
// Whether the appliance management endpoints are enabled
ApplianceEndpoints bool
Auth struct {
// Whether (k8s) auth is required. If true,
// the user's token must have access to the related
// provider CR.
Required bool
// How long to cache a valid token review (seconds)
TTL int
}
// Path to OVA appliance directory
CatalogPath string
// Port to serve on
Port string
// Provider details
Provider struct {
Name string
Namespace string
Verb string
}
}
func (r *OVASettings) Load() (err error) {
r.ApplianceEndpoints = getEnvBool(EnvApplianceEndpoints, false)
r.Auth.Required = getEnvBool(EnvAuthRequired, true)
r.Auth.TTL = getEnvInt(EnvTokenCacheTTL, 10)
s, found := os.LookupEnv(EnvCatalogPath)
if found {
r.CatalogPath = s
} else {
r.CatalogPath = "/ova"
}
s, found = os.LookupEnv(EnvPort)
if found {
r.Port = s
} else {
r.Port = "8080"
}
s, found = os.LookupEnv(EnvProviderName)
if found {
r.Provider.Name = s
}
s, found = os.LookupEnv(EnvProviderNamespace)
if found {
r.Provider.Namespace = s
}
s, found = os.LookupEnv(EnvProviderVerb)
if found {
r.Provider.Verb = s
} else {
r.Provider.Verb = "get"
}
return
}
// Get boolean.
func getEnvBool(name string, def bool) bool {
boolean := def
if s, found := os.LookupEnv(name); found {
parsed, err := strconv.ParseBool(s)
if err == nil {
boolean = parsed
}
}
return boolean
}
// Get env param as integer.
func getEnvInt(name string, def int) int {
if s, found := os.LookupEnv(name); found {
parsed, err := strconv.Atoi(s)
if err == nil {
return parsed
}
}
return def
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-provider-server/inventory/uuid.go | cmd/ova-provider-server/inventory/uuid.go | package inventory
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"log"
"github.com/kubev2v/forklift/pkg/lib/gob"
"github.com/google/uuid"
)
type UUIDMap struct {
m map[string]string
}
func NewUUIDMap() *UUIDMap {
return &UUIDMap{
m: make(map[string]string),
}
}
func (um *UUIDMap) GetUUID(object interface{}, key string) string {
var id string
id, ok := um.m[key]
if !ok {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
if err := enc.Encode(object); err != nil {
log.Fatal(err)
}
hash := sha256.Sum256(buf.Bytes())
id = hex.EncodeToString(hash[:])
if len(id) > 36 {
id = id[:36]
}
um.m[key] = id
}
return id
}
func isValidUUID(id string) bool {
_, err := uuid.Parse(id)
return err == nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-provider-server/inventory/convert.go | cmd/ova-provider-server/inventory/convert.go | package inventory
import (
"strconv"
"strings"
"unicode"
"github.com/kubev2v/forklift/cmd/ova-provider-server/ova"
)
var vmIDmap *UUIDMap
var diskIDMap *UUIDMap
var networkIDMap *UUIDMap
func init() {
vmIDmap = NewUUIDMap()
diskIDMap = NewUUIDMap()
networkIDMap = NewUUIDMap()
}
// ResourceTypes
const (
ResourceTypeProcessor = 3
ResourceTypeMemory = 4
ResourceTypeEthernetAdapter = 10
ResourceTypeHardDiskDrive = 17
)
func ConvertToVmStruct(envelope []ova.Envelope, ovaPath []string) []ova.VM {
var vms []ova.VM
for i := 0; i < len(envelope); i++ {
vmXml := envelope[i]
for _, virtualSystem := range vmXml.VirtualSystem {
// Initialize a new VM
newVM := ova.VM{
OvaPath: ovaPath[i],
OvaSource: ova.GuessSource(vmXml),
Name: virtualSystem.Name,
OsType: virtualSystem.OperatingSystemSection.OsType,
}
for _, item := range virtualSystem.HardwareSection.Items {
switch item.ResourceType {
case ResourceTypeProcessor:
newVM.CpuCount = item.VirtualQuantity
newVM.CpuUnits = item.AllocationUnits
if item.CoresPerSocket != "" {
num, err := strconv.ParseInt(item.CoresPerSocket, 10, 32)
if err != nil {
newVM.CoresPerSocket = 1
} else {
newVM.CoresPerSocket = int32(num)
}
}
case ResourceTypeMemory:
newVM.MemoryMB = item.VirtualQuantity
newVM.MemoryUnits = item.AllocationUnits
case ResourceTypeEthernetAdapter:
newVM.NICs = append(newVM.NICs, ova.NIC{
Name: item.ElementName,
MAC: item.Address,
Network: item.Connection,
})
default:
var itemKind string
if len(item.ElementName) > 0 {
// if the `ElementName` element has a name such as "Hard Disk 1", strip off the
// number suffix to try to get a more generic name for the device type
itemKind = strings.TrimRightFunc(item.ElementName, func(r rune) bool {
return unicode.IsDigit(r) || unicode.IsSpace(r)
})
} else {
// Some .ova files do not include an `ElementName` element for each device. Fall
// back to using the `Description` element
itemKind = item.Description
}
if len(itemKind) == 0 {
itemKind = "Unknown"
}
newVM.Devices = append(newVM.Devices, ova.Device{
Kind: itemKind,
})
}
}
for j, disk := range vmXml.DiskSection.Disks {
name := envelope[i].References.File[j].Href
newVM.Disks = append(newVM.Disks, ova.VmDisk{
FilePath: getDiskPath(ovaPath[i]),
Capacity: disk.Capacity,
CapacityAllocationUnits: disk.CapacityAllocationUnits,
DiskId: disk.DiskId,
FileRef: disk.FileRef,
Format: disk.Format,
PopulatedSize: disk.PopulatedSize,
Name: name,
})
newVM.Disks[j].ID = diskIDMap.GetUUID(newVM.Disks[j], ovaPath[i]+"/"+name)
}
for _, network := range vmXml.NetworkSection.Networks {
newVM.Networks = append(newVM.Networks, ova.VmNetwork{
Name: network.Name,
Description: network.Description,
ID: networkIDMap.GetUUID(network.Name, network.Name),
})
}
newVM.ApplyVirtualConfig(virtualSystem.HardwareSection.Configs)
newVM.ApplyExtraVirtualConfig(virtualSystem.HardwareSection.ExtraConfig)
var id string
if isValidUUID(virtualSystem.ID) {
id = virtualSystem.ID
} else {
id = vmIDmap.GetUUID(newVM, ovaPath[i])
}
newVM.UUID = id
vms = append(vms, newVM)
}
}
return vms
}
func ConvertToNetworkStruct(envelopes []ova.Envelope) []ova.VmNetwork {
var networks []ova.VmNetwork
for _, envelope := range envelopes {
for _, network := range envelope.NetworkSection.Networks {
newNetwork := ova.VmNetwork{
Name: network.Name,
Description: network.Description,
ID: networkIDMap.GetUUID(network.Name, network.Name),
}
networks = append(networks, newNetwork)
}
}
return networks
}
func ConvertToDiskStruct(envelopes []ova.Envelope, ovaPath []string) []ova.VmDisk {
var disks []ova.VmDisk
for i, envelope := range envelopes {
for j, disk := range envelope.DiskSection.Disks {
name := envelope.References.File[j].Href
newDisk := ova.VmDisk{
FilePath: getDiskPath(ovaPath[i]),
Capacity: disk.Capacity,
CapacityAllocationUnits: disk.CapacityAllocationUnits,
DiskId: disk.DiskId,
FileRef: disk.FileRef,
Format: disk.Format,
PopulatedSize: disk.PopulatedSize,
Name: name,
}
newDisk.ID = diskIDMap.GetUUID(newDisk, ovaPath[i]+"/"+name)
disks = append(disks, newDisk)
}
}
return disks
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-provider-server/inventory/scan.go | cmd/ova-provider-server/inventory/scan.go | package inventory
import (
"fmt"
"log"
"os"
"path/filepath"
"strings"
"time"
"github.com/kubev2v/forklift/cmd/ova-provider-server/ova"
)
func ScanForAppliances(path string) (envelopes []ova.Envelope, ovaPaths []string) {
ovaFiles, ovfFiles, err := findOVAFiles(path)
if err != nil {
fmt.Println("Error finding OVA anf OVF files:", err)
return
}
var filesPath []string
for _, ovaFile := range ovaFiles {
fmt.Println("Processing OVA file:", ovaFile)
if !isFileComplete(ovaFile) {
log.Printf("Skipping %s: file still being copied\n", ovaFile)
continue
}
xmlStruct, err := ova.ExtractEnvelope(ovaFile)
if err != nil {
log.Printf("Error processing OVF from OVA %s: %v\n", ovaFile, err)
continue
}
envelopes = append(envelopes, *xmlStruct)
filesPath = append(filesPath, ovaFile)
}
for _, ovfFile := range ovfFiles {
fmt.Println("Processing OVF file:", ovfFile)
if !isFileComplete(ovfFile) {
log.Printf("Skipping %s: file still being copied\n", ovfFile)
continue
}
xmlStruct, err := ova.ReadEnvelope(ovfFile)
if err != nil {
if strings.Contains(err.Error(), "still being copied") {
log.Printf("Skipping %s: %v\n", ovfFile, err)
} else {
log.Printf("Error processing OVF %s: %v\n", ovfFile, err)
}
continue
}
envelopes = append(envelopes, *xmlStruct)
filesPath = append(filesPath, ovfFile)
}
return envelopes, filesPath
}
func findOVAFiles(directory string) (ovaFiles []string, ovfFiles []string, err error) {
var ovaMaxDepth = 2
err = filepath.WalkDir(directory, func(path string, info os.DirEntry, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
relativePath, _ := filepath.Rel(directory, path)
depth := len(strings.Split(relativePath, string(filepath.Separator)))
switch {
case (depth <= ovaMaxDepth) && isOva(info.Name()):
ovaFiles = append(ovaFiles, path)
case (depth <= ovaMaxDepth+1) && isOvf(info.Name()):
ovfFiles = append(ovfFiles, path)
}
return nil
})
if err != nil {
fmt.Println("Error scanning OVA and OVF files: ", err)
return nil, nil, err
}
return
}
func isOva(filename string) bool {
return hasSuffixIgnoreCase(filename, ova.ExtOVA)
}
func isOvf(filename string) bool {
return hasSuffixIgnoreCase(filename, ova.ExtOVF)
}
// Checks if the given file has the desired extension
func hasSuffixIgnoreCase(fileName, suffix string) bool {
return strings.HasSuffix(strings.ToLower(fileName), strings.ToLower(suffix))
}
// isFileComplete checks that the file was not modified in the last 30s
func isFileComplete(filePath string) bool {
info, err := os.Stat(filePath)
if err != nil {
return false
}
// Exclude zero-byte files (common placeholder pattern)
age := time.Since(info.ModTime())
return age > 30*time.Second && info.Size() > 0
}
func getDiskPath(path string) string {
if filepath.Ext(path) != ".ovf" {
return path
}
i := strings.LastIndex(path, "/")
if i > -1 {
return path[:i+1]
}
return path
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-provider-server/inventory/nfs_test.go | cmd/ova-provider-server/inventory/nfs_test.go | //nolint:errcheck
package inventory
import (
"os"
"path/filepath"
"testing"
. "github.com/onsi/gomega"
)
func TestFindOVAFiles(t *testing.T) {
g := NewGomegaWithT(t)
tests := []struct {
name string
setup func(directory string)
expectedOVAs []string
expectedOVFs []string
expectError bool
}{
{
name: "basic structure",
setup: func(directory string) {
os.MkdirAll(filepath.Join(directory, "subdir1", "subdir2"), 0755)
os.WriteFile(filepath.Join(directory, "test.ova"), []byte{}, 0644)
os.WriteFile(filepath.Join(directory, "test.ovf"), []byte{}, 0644)
os.WriteFile(filepath.Join(directory, "subdir1", "test1.ova"), []byte{}, 0644)
os.WriteFile(filepath.Join(directory, "subdir1", "test1.ovf"), []byte{}, 0644)
os.WriteFile(filepath.Join(directory, "subdir1", "subdir2", "test2.ovf"), []byte{}, 0644)
},
expectedOVAs: []string{"test.ova", "subdir1/test1.ova"},
expectedOVFs: []string{"test.ovf", "subdir1/test1.ovf", "subdir1/subdir2/test2.ovf"},
expectError: false,
},
{
name: "non-existent directory",
setup: func(directory string) {
os.RemoveAll(directory)
},
expectedOVAs: nil,
expectedOVFs: nil,
expectError: true,
},
{
name: "non-ova/ovf files",
setup: func(directory string) {
os.WriteFile(filepath.Join(directory, "test.txt"), []byte{}, 0644)
},
expectedOVAs: nil,
expectedOVFs: nil,
expectError: false,
},
{
name: "incorrect depth ova",
setup: func(directory string) {
os.MkdirAll(filepath.Join(directory, "subdir1", "subdir2"), 0755)
os.WriteFile(filepath.Join(directory, "subdir1", "subdir2", "test3.ova"), []byte{}, 0644)
},
expectedOVAs: nil,
expectedOVFs: nil,
expectError: false,
},
{
name: "incorrect depth ovf",
setup: func(directory string) {
os.MkdirAll(filepath.Join(directory, "subdir1", "subdir2", "subdir3"), 0755)
os.WriteFile(filepath.Join(directory, "subdir1", "subdir2", "subdir3", "test3.ovf"), []byte{}, 0644)
},
expectedOVAs: nil,
expectedOVFs: nil,
expectError: false,
},
{
name: "folder with extension",
setup: func(directory string) {
os.MkdirAll(filepath.Join(directory, "subdir1.ova"), 0755)
os.MkdirAll(filepath.Join(directory, "subdir2.ovf"), 0755)
},
expectedOVAs: nil,
expectedOVFs: nil,
expectError: false,
},
{
name: "files inside folders with extension",
setup: func(directory string) {
os.MkdirAll(filepath.Join(directory, "subdir1.ova"), 0755)
os.MkdirAll(filepath.Join(directory, "subdir2.ovf"), 0755)
os.WriteFile(filepath.Join(directory, "subdir1.ova", "test.ova"), []byte{}, 0644)
os.WriteFile(filepath.Join(directory, "subdir2.ovf", "test.ovf"), []byte{}, 0644)
},
expectedOVAs: []string{"subdir1.ova/test.ova"},
expectedOVFs: []string{"subdir2.ovf/test.ovf"},
expectError: false,
},
}
for _, testCase := range tests {
t.Run(testCase.name, func(t *testing.T) {
testDir, err := os.MkdirTemp("", "ova_test")
g.Expect(err).NotTo(HaveOccurred())
testCase.setup(testDir)
for i, relPath := range testCase.expectedOVAs {
testCase.expectedOVAs[i] = filepath.Join(testDir, relPath)
}
for i, relPath := range testCase.expectedOVFs {
testCase.expectedOVFs[i] = filepath.Join(testDir, relPath)
}
ovaFiles, ovfFiles, err := findOVAFiles(testDir)
if testCase.expectError {
g.Expect(err).To(HaveOccurred())
} else {
g.Expect(err).ToNot(HaveOccurred())
g.Expect(ovaFiles).To(ConsistOf(testCase.expectedOVAs))
g.Expect(ovfFiles).To(ConsistOf(testCase.expectedOVFs))
}
os.RemoveAll(testDir)
})
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-provider-server/api/base.go | cmd/ova-provider-server/api/base.go | package api
import (
"errors"
"net/http"
"github.com/gin-gonic/gin"
"github.com/kubev2v/forklift/pkg/lib/logging"
)
var log = logging.WithName("ova|api")
type BadRequestError struct {
Reason string
}
func (r *BadRequestError) Error() string { return r.Reason }
type ConflictError struct {
Reason string
}
func (r *ConflictError) Error() string { return r.Reason }
// ErrorHandler renders error conditions from lower handlers.
func ErrorHandler() gin.HandlerFunc {
return func(ctx *gin.Context) {
ctx.Next()
if len(ctx.Errors) == 0 {
return
}
err := ctx.Errors.Last()
badRequest := &BadRequestError{}
if errors.As(err, &badRequest) {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
conflict := &ConflictError{}
if errors.As(err, &conflict) {
ctx.JSON(http.StatusConflict, gin.H{"error": err.Error()})
return
}
ctx.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-provider-server/api/inventory.go | cmd/ova-provider-server/api/inventory.go | package api
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/kubev2v/forklift/cmd/ova-provider-server/inventory"
)
const (
VMsRoute = "/vms"
NetworksRoute = "/networks"
DisksRoute = "/disks"
TestConnectionRoute = "/test_connection"
)
// InventoryHandler serves routes consumed by the Forklift inventory service.
type InventoryHandler struct{}
// AddRoutes adds inventory routes to a gin router.
func (h InventoryHandler) AddRoutes(e *gin.Engine) {
router := e.Group("/")
router.GET(VMsRoute, h.VMs)
router.GET(NetworksRoute, h.Networks)
router.GET(DisksRoute, h.Disks)
router.GET(TestConnectionRoute, h.TestConnection)
}
// VMs godoc
// @summary List all VMs structs that can be extracted from all OVAs/OVFs in the catalog.
// @description List all VMs structs that can be extracted from all OVAs/OVFs in the catalog.
// @tags inventory
// @produce json
// @success 200 {array} ova.VM
// @router /vms [get]
func (h InventoryHandler) VMs(ctx *gin.Context) {
envelopes, paths := inventory.ScanForAppliances(Settings.CatalogPath)
vms := inventory.ConvertToVmStruct(envelopes, paths)
ctx.JSON(http.StatusOK, vms)
}
// Networks godoc
// @summary List all network structs that can be extracted from all OVAs/OVFs in the catalog.
// @description List all network structs that can be extracted from all OVAs/OVFs in the catalog.
// @tags inventory
// @produce json
// @success 200 {array} ova.VmNetwork
// @router /networks [get]
func (h InventoryHandler) Networks(ctx *gin.Context) {
envelopes, _ := inventory.ScanForAppliances(Settings.CatalogPath)
networks := inventory.ConvertToNetworkStruct(envelopes)
ctx.JSON(http.StatusOK, networks)
}
// Disks godoc
// @summary List all disk structs that can be extracted from all OVAs/OVFs in the catalog.
// @description List all disk structs that can be extracted from all OVAs/OVFs in the catalog.
// @tags inventory
// @produce json
// @success 200 {array} ova.VmDisk
// @router /disks [get]
func (h InventoryHandler) Disks(ctx *gin.Context) {
envelopes, paths := inventory.ScanForAppliances(Settings.CatalogPath)
disks := inventory.ConvertToDiskStruct(envelopes, paths)
ctx.JSON(http.StatusOK, disks)
}
func (h InventoryHandler) TestConnection(ctx *gin.Context) {
ctx.JSON(http.StatusOK, "Test connection successful")
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-provider-server/api/appliance.go | cmd/ova-provider-server/api/appliance.go | package api
import (
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"os"
pathlib "path/filepath"
"strings"
"time"
"github.com/gin-gonic/gin"
"github.com/kubev2v/forklift/cmd/ova-provider-server/auth"
"github.com/kubev2v/forklift/cmd/ova-provider-server/ova"
"github.com/kubev2v/forklift/cmd/ova-provider-server/settings"
)
var Settings = &settings.Settings
const (
AppliancesRoute = "/appliances"
ApplianceRoute = AppliancesRoute + "/:" + Filename
Filename = "filename"
DirectoryPrefix = "appliance-"
ApplianceField = "file"
)
// ApplianceInfo JSON resource
type ApplianceInfo struct {
File string `json:"file"`
Size int64 `json:"size,omitempty"`
Modified *time.Time `json:"modified,omitempty"`
Error string `json:"error,omitempty"`
Source string `json:"source,omitempty"`
VirtualSystems []Ref `json:"virtualSystems"`
}
func (r *ApplianceInfo) OK() bool {
return r.Error == ""
}
type Ref struct {
Name string `json:"name"`
ID string `json:"id"`
}
// ApplianceHandler serves appliance management routes.
type ApplianceHandler struct {
OVAStoragePath string
AuthRequired bool
Auth *auth.ProviderAuth
}
// AddRoutes adds appliance management routes to a gin router.
func (h ApplianceHandler) AddRoutes(e *gin.Engine) {
router := e.Group("/")
router.GET(AppliancesRoute, h.List)
router.POST(AppliancesRoute, h.Upload)
// leave the delete endpoint disabled for now
// router.DELETE(ApplianceRoute, h.Delete)
}
// List godoc
// @summary Lists the appliances that are present in the catalog.
// @description Lists the appliances that are present in the catalog.
// @tags appliances
// @produce json
// @success 200 {array} ApplianceInfo
// @router /appliances [get]
func (h ApplianceHandler) List(ctx *gin.Context) {
if !h.permitted(ctx) {
ctx.AbortWithStatus(http.StatusForbidden)
return
}
entries, err := os.ReadDir(h.OVAStoragePath)
if err != nil {
_ = ctx.Error(err)
return
}
appliances := make([]*ApplianceInfo, 0)
for _, entry := range entries {
if !entry.IsDir() || !strings.HasPrefix(entry.Name(), DirectoryPrefix) {
continue
}
dirPath := pathlib.Join(h.OVAStoragePath, entry.Name())
dirEntries, dErr := os.ReadDir(dirPath)
if dErr != nil {
log.Error(dErr, "couldn't read directory", "dir", dirPath)
continue
}
for _, dirEntry := range dirEntries {
if dirEntry.IsDir() {
continue
}
if !strings.HasSuffix(strings.ToLower(dirEntry.Name()), ova.ExtOVA) {
continue
}
info, fErr := dirEntry.Info()
if fErr != nil {
continue
}
appliance := h.applianceInfo(info)
appliances = append(appliances, appliance)
}
}
ctx.JSON(http.StatusOK, appliances)
}
// Upload godoc
// @summary Accepts upload of an OVA to the catalog.
// @description Accepts upload of an OVA to the catalog.
// @tags appliances
// @success 200 {object} ApplianceInfo
// @router /appliances [post]
func (h ApplianceHandler) Upload(ctx *gin.Context) {
if !h.permitted(ctx) {
ctx.AbortWithStatus(http.StatusForbidden)
return
}
err := h.writable()
if err != nil {
err = &BadRequestError{err.Error()}
_ = ctx.Error(err)
return
}
input, err := ctx.FormFile(ApplianceField)
if err != nil {
err = &BadRequestError{err.Error()}
_ = ctx.Error(err)
return
}
filename := pathlib.Base(input.Filename)
if !strings.HasSuffix(strings.ToLower(filename), ova.ExtOVA) {
err = &BadRequestError{"filename must end with .ova extension"}
_ = ctx.Error(err)
return
}
path := h.fullPath(filename)
_, err = os.Stat(path)
if err == nil {
err = &ConflictError{"a file by that name already exists"}
_ = ctx.Error(err)
return
} else {
if errors.Is(err, os.ErrNotExist) {
err = nil
} else {
_ = ctx.Error(err)
return
}
}
src, err := input.Open()
if err != nil {
err = &BadRequestError{err.Error()}
_ = ctx.Error(err)
return
}
defer func() {
_ = src.Close()
}()
err = os.MkdirAll(pathlib.Dir(path), 0750)
if err != nil {
_ = ctx.Error(err)
return
}
err = h.upload(src, path)
if err != nil {
log.Error(err, "failed uploading file")
_ = os.RemoveAll(pathlib.Dir(path))
_ = ctx.Error(err)
return
}
// remove a file that doesn't appear to contain
// a valid appliance and return an error.
appliance, err := h.validate(path)
if err != nil {
log.Error(err, "failed statting file")
_ = os.RemoveAll(pathlib.Dir(path))
_ = ctx.Error(err)
return
}
if !appliance.OK() {
_ = os.RemoveAll(pathlib.Dir(path))
_ = ctx.Error(&BadRequestError{appliance.Error})
return
}
ctx.JSON(http.StatusOK, appliance)
}
func (h ApplianceHandler) upload(src io.Reader, path string) (err error) {
dst, err := os.Create(path)
if err != nil {
return
}
defer func() {
_ = dst.Close()
}()
_, err = io.Copy(dst, src)
if err != nil {
return
}
err = os.Chmod(path, 0640)
if err != nil {
return
}
return
}
func (h ApplianceHandler) validate(path string) (appliance *ApplianceInfo, err error) {
info, err := os.Stat(path)
if err != nil {
return
}
appliance = h.applianceInfo(info)
return
}
// Delete godoc
// @summary Deletes an OVA from the catalog.
// @description Deletes an OVA from the catalog.
// @tags appliances
// @success 204
// @router /appliances/{filename} [delete]
// @param filename path string true "Filename of OVA in catalog"
func (h ApplianceHandler) Delete(ctx *gin.Context) {
if !h.permitted(ctx) {
ctx.AbortWithStatus(http.StatusForbidden)
return
}
filename := pathlib.Base(ctx.Param(Filename))
if !strings.HasSuffix(strings.ToLower(filename), ova.ExtOVA) {
err := &BadRequestError{"filename must end with .ova extension"}
_ = ctx.Error(err)
return
}
path := h.fullPath(filename)
_, err := os.Stat(path)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
ctx.Status(http.StatusNoContent)
return
} else {
_ = ctx.Error(err)
return
}
}
err = os.RemoveAll(pathlib.Dir(path))
if err != nil {
_ = ctx.Error(err)
return
}
ctx.Status(http.StatusNoContent)
}
func (h ApplianceHandler) permitted(ctx *gin.Context) bool {
if !h.AuthRequired {
return true
}
return h.Auth.Permit(ctx)
}
func (h ApplianceHandler) applianceInfo(info os.FileInfo) (appliance *ApplianceInfo) {
modTime := info.ModTime()
appliance = &ApplianceInfo{
File: info.Name(),
Modified: &modTime,
Size: info.Size(),
}
envelope, err := ova.ExtractEnvelope(h.fullPath(info.Name()))
if err != nil {
appliance.Error = err.Error()
return
}
appliance.Source = ova.GuessSource(*envelope)
for _, vs := range envelope.VirtualSystem {
appliance.VirtualSystems = append(appliance.VirtualSystems, Ref{Name: vs.Name, ID: vs.ID})
}
return
}
func (h ApplianceHandler) fullPath(filename string) string {
return pathlib.Join(
h.OVAStoragePath,
fmt.Sprintf("%s%s", DirectoryPrefix, string2hash(filename)),
filename)
}
func (h ApplianceHandler) writable() error {
check := pathlib.Join(h.OVAStoragePath, ".writeable")
f, err := os.OpenFile(check, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)
if err != nil {
if errors.Is(err, os.ErrExist) {
return nil
}
return err
}
_ = f.Close()
_ = os.Remove(check)
return nil
}
func string2hash(s string) string {
h := sha256.New()
_, _ = h.Write([]byte(s))
return hex.EncodeToString(h.Sum(nil))
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-provider-server/ova/vm.go | cmd/ova-provider-server/ova/vm.go | package ova
import "strconv"
// vm struct
type VM struct {
Name string
OvaPath string
OvaSource string
OsType string
RevisionValidated int64
PolicyVersion int
UUID string
Firmware string
SecureBoot bool
CpuAffinity []int32
CpuHotAddEnabled bool
CpuHotRemoveEnabled bool
MemoryHotAddEnabled bool
FaultToleranceEnabled bool
CpuCount int32
CoresPerSocket int32
MemoryMB int32
MemoryUnits string
CpuUnits string
BalloonedMemory int32
IpAddress string
NumaNodeAffinity []string
StorageUsed int64
ChangeTrackingEnabled bool
Devices []Device
NICs []NIC
Disks []VmDisk
Networks []VmNetwork
}
func (r *VM) ApplyVirtualConfig(configs []VirtualConfig) {
for _, config := range configs {
r.apply(config.Key, config.Value)
}
}
func (r *VM) ApplyExtraVirtualConfig(configs []ExtraVirtualConfig) {
for _, config := range configs {
r.apply(config.Key, config.Value)
}
}
func (r *VM) apply(key string, value string) {
switch key {
case "firmware":
r.Firmware = value
case "bootOptions.efiSecureBootEnabled":
r.SecureBoot, _ = strconv.ParseBool(value)
case "uefi.secureBoot.enabled":
// Legacy key used in some vSphere and Workstation/Fusion OVAs
r.SecureBoot, _ = strconv.ParseBool(value)
case "memoryHotAddEnabled":
r.MemoryHotAddEnabled, _ = strconv.ParseBool(value)
case "cpuHotAddEnabled":
r.CpuHotAddEnabled, _ = strconv.ParseBool(value)
case "cpuHotRemoveEnabled":
r.CpuHotRemoveEnabled, _ = strconv.ParseBool(value)
}
}
// Virtual Disk.
type VmDisk struct {
ID string
Name string
FilePath string
Capacity int64
CapacityAllocationUnits string
DiskId string
FileRef string
Format string
PopulatedSize int64
}
// Virtual Device.
type Device struct {
Kind string `json:"kind"`
}
type Conf struct {
//nolint:unused
key string
Value string
}
// Virtual ethernet card.
type NIC struct {
Name string `json:"name"`
MAC string `json:"mac"`
Network string
Config []Conf
}
type VmNetwork struct {
Name string
Description string
ID string
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-provider-server/ova/ova.go | cmd/ova-provider-server/ova/ova.go | package ova
import (
"archive/tar"
"encoding/xml"
"errors"
"io"
"os"
"strings"
)
const (
SourceUnknown = "Unknown"
SourceVMware = "VMware"
SourceVirtualBox = "VirtualBox"
SourceXen = "Xen"
SourceOvirt = "oVirt"
)
const (
ExtOVF = ".ovf"
ExtOVA = ".ova"
)
// ExtractEnvelope from an appliance archive (*.ova file)
func ExtractEnvelope(ovaPath string) (envelope *Envelope, err error) {
file, err := os.Open(ovaPath)
if err != nil {
return
}
defer func() {
_ = file.Close()
}()
envelope = &Envelope{}
reader := tar.NewReader(file)
for {
header, rErr := reader.Next()
if rErr != nil {
if errors.Is(rErr, io.EOF) {
err = errors.New("unexpected end of file while looking for .ovf")
} else {
err = rErr
}
return
}
if strings.HasSuffix(strings.ToLower(header.Name), ExtOVF) {
decoder := xml.NewDecoder(reader)
err = decoder.Decode(envelope)
if err != nil {
return
}
break
}
}
return
}
// ReadEnvelope from an *.ovf file.
func ReadEnvelope(ovfPath string) (envelope *Envelope, err error) {
file, err := os.Open(ovfPath)
if err != nil {
return
}
defer func() {
_ = file.Close()
}()
envelope = &Envelope{}
decoder := xml.NewDecoder(file)
err = decoder.Decode(envelope)
if err != nil {
return
}
return
}
// GuessSource checks the OVF XML for any markers that might cause import problems later on.
// Not guaranteed to correctly guess the OVA source, but should be good enough
// to filter out some obvious problem cases.
func GuessSource(envelope Envelope) string {
namespaceMap := map[string]string{
"http://schemas.citrix.com/ovf/envelope/1": SourceXen,
"http://www.citrix.com/xenclient/ovf/1": SourceXen,
"http://www.virtualbox.org/ovf/machine": SourceVirtualBox,
"http://www.ovirt.org/ovf": SourceOvirt,
}
foundVMware := false
for _, attribute := range envelope.Attributes {
if source, present := namespaceMap[attribute.Value]; present {
return source
}
// Other products may contain a VMware namespace, use it as a default if present
// and if no others are found.
if strings.Contains(attribute.Value, "http://www.vmware.com/schema/ovf") {
foundVMware = true
}
}
if foundVMware {
return SourceVMware
}
return SourceUnknown
}
// xml struct
type Item struct {
AllocationUnits string `xml:"AllocationUnits,omitempty"`
Description string `xml:"Description,omitempty"`
ElementName string `xml:"ElementName"`
InstanceID string `xml:"InstanceID"`
ResourceType int `xml:"ResourceType"`
VirtualQuantity int32 `xml:"VirtualQuantity"`
Address string `xml:"Address,omitempty"`
ResourceSubType string `xml:"ResourceSubType,omitempty"`
Parent string `xml:"Parent,omitempty"`
HostResource string `xml:"HostResource,omitempty"`
Connection string `xml:"Connection,omitempty"`
Configs []VirtualConfig `xml:"Config"`
CoresPerSocket string `xml:"CoresPerSocket"`
}
type VirtualConfig struct {
XMLName xml.Name `xml:"http://www.vmware.com/schema/ovf Config"`
Required string `xml:"required,attr"`
Key string `xml:"key,attr"`
Value string `xml:"value,attr"`
}
type ExtraVirtualConfig struct {
XMLName xml.Name `xml:"http://www.vmware.com/schema/ovf ExtraConfig"`
Required string `xml:"required,attr"`
Key string `xml:"key,attr"`
Value string `xml:"value,attr"`
}
type VirtualHardwareSection struct {
Info string `xml:"Info"`
Items []Item `xml:"Item"`
Configs []VirtualConfig `xml:"Config"`
ExtraConfig []ExtraVirtualConfig `xml:"ExtraConfig"`
}
type References struct {
File []struct {
Href string `xml:"href,attr"`
} `xml:"File"`
}
type DiskSection struct {
XMLName xml.Name `xml:"DiskSection"`
Info string `xml:"Info"`
Disks []Disk `xml:"Disk"`
}
type Disk struct {
XMLName xml.Name `xml:"Disk"`
Capacity int64 `xml:"capacity,attr"`
CapacityAllocationUnits string `xml:"capacityAllocationUnits,attr"`
DiskId string `xml:"diskId,attr"`
FileRef string `xml:"fileRef,attr"`
Format string `xml:"format,attr"`
PopulatedSize int64 `xml:"populatedSize,attr"`
}
type NetworkSection struct {
XMLName xml.Name `xml:"NetworkSection"`
Info string `xml:"Info"`
Networks []Network `xml:"Network"`
}
type Network struct {
XMLName xml.Name `xml:"Network"`
Name string `xml:"name,attr"`
Description string `xml:"Description"`
}
type VirtualSystem struct {
ID string `xml:"id,attr"`
Name string `xml:"Name"`
OperatingSystemSection struct {
Info string `xml:"Info"`
Description string `xml:"Description"`
OsType string `xml:"osType,attr"`
} `xml:"OperatingSystemSection"`
HardwareSection VirtualHardwareSection `xml:"VirtualHardwareSection"`
}
type Envelope struct {
XMLName xml.Name `xml:"Envelope"`
Attributes []xml.Attr `xml:",any,attr"`
VirtualSystem []VirtualSystem `xml:"VirtualSystem"`
DiskSection DiskSection `xml:"DiskSection"`
NetworkSection NetworkSection `xml:"NetworkSection"`
References References `xml:"References"`
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ovirt-populator/ovirt-populator.go | cmd/ovirt-populator/ovirt-populator.go | package main
import (
"bufio"
"encoding/json"
"errors"
"flag"
"fmt"
"os"
"os/exec"
"strconv"
"github.com/kubev2v/forklift/pkg/metrics"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"k8s.io/klog/v2"
)
type engineConfig struct {
URL string
username string
password string
cacert string
insecure bool
}
type TransferProgress struct {
Transferred uint64 `json:"transferred"`
Description string `json:"description"`
Size *uint64 `json:"size,omitempty"`
Elapsed float64 `json:"elapsed"`
}
func main() {
var engineUrl, diskID, volPath, secretName, crName, crNamespace, ownerUID string
var pvcSize *int64
flag.StringVar(&engineUrl, "engine-url", "", "ovirt-engine url (https://engine.fqdn)")
flag.StringVar(&diskID, "disk-id", "", "ovirt-engine disk id")
flag.StringVar(&volPath, "volume-path", "", "Volume path to populate")
flag.StringVar(&secretName, "secret-name", "", "Name of secret containing ovirt credentials")
flag.StringVar(&crName, "cr-name", "", "Custom Resource instance name")
flag.StringVar(&crNamespace, "cr-namespace", "", "Custom Resource instance namespace")
flag.StringVar(&ownerUID, "owner-uid", "", "Owner UID (usually PVC UID)")
pvcSize = flag.Int64("pvc-size", 0, "Size of pvc (in bytes)")
flag.Parse()
if pvcSize == nil || *pvcSize <= 0 {
klog.Fatal("pvc-size must be greater than 0")
}
certsDirectory, err := os.MkdirTemp("", "certsdir")
if err != nil {
klog.Fatal(err)
}
metrics.StartPrometheusEndpoint(certsDirectory)
populate(engineUrl, diskID, volPath, ownerUID, *pvcSize)
}
func populate(engineURL, diskID, volPath, ownerUID string, pvcSize int64) {
config := loadEngineConfig(engineURL)
prepareCredentials(config)
executePopulationProcess(config, diskID, volPath, ownerUID, pvcSize)
}
func prepareCredentials(config *engineConfig) {
writeFile("/tmp/ovirt.pass", config.password)
if !config.insecure {
writeFile("/tmp/ca.pem", config.cacert)
}
}
func writeFile(filename, content string) {
file, err := os.Create(filename)
if err != nil {
klog.Fatalf("Failed to create %s: %v", filename, err)
}
defer file.Close()
if _, err := file.Write([]byte(content)); err != nil {
klog.Fatalf("Failed to write to %s: %v", filename, err)
}
}
func executePopulationProcess(config *engineConfig, diskID, volPath, ownerUID string, pvcSize int64) {
args := createCommandArguments(config, diskID, volPath)
cmd := exec.Command("ovirt-img", args...)
r, _ := cmd.StdoutPipe()
cmd.Stderr = cmd.Stdout
done := make(chan struct{})
scanner := bufio.NewScanner(r)
klog.Info(fmt.Sprintf("Running command: %s", cmd.String()))
go monitorProgress(scanner, ownerUID, pvcSize, done)
if err := cmd.Start(); err != nil {
klog.Fatal(err)
}
<-done
if err := cmd.Wait(); err != nil {
klog.Fatal(err)
}
}
func monitorProgress(scanner *bufio.Scanner, ownerUID string, pvcSize int64, done chan struct{}) {
progress := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "ovirt_progress",
Help: "Progress of volume population",
},
[]string{"ownerUID"},
)
if err := prometheus.Register(progress); err != nil {
klog.Error("Prometheus progress gauge not registered:", err)
return
}
var currentProgress float64
total := pvcSize
metric := &dto.Metric{}
for scanner.Scan() {
progressOutput := TransferProgress{}
text := scanner.Text()
klog.Info(text)
if err := json.Unmarshal([]byte(text), &progressOutput); err != nil {
var syntaxError *json.SyntaxError
if !errors.As(err, &syntaxError) {
klog.Error(err)
}
}
if total > 0 {
currentProgress = (float64(progressOutput.Transferred) / float64(total)) * 100
if err := progress.WithLabelValues(ownerUID).Write(metric); err != nil {
klog.Error(err)
} else if currentProgress > metric.Counter.GetValue() {
progress.WithLabelValues(ownerUID).Add(currentProgress - metric.Counter.GetValue())
}
}
}
if err := progress.WithLabelValues(ownerUID).Write(metric); err != nil {
klog.Error(err)
}
remaining := 100 - int64(metric.Counter.GetValue())
if remaining > 0 {
progress.WithLabelValues(ownerUID).Add(float64(remaining))
}
done <- struct{}{}
}
func createCommandArguments(config *engineConfig, diskID, volPath string) []string {
var args []string
args = append(args, "download-disk", "--output", "json", "--engine-url="+config.URL, "--username="+config.username, "--password-file=/tmp/ovirt.pass")
if config.insecure {
args = append(args, "--insecure")
} else {
args = append(args, "--cafile=/tmp/ca.pem")
}
args = append(args, "-f", "raw", diskID, volPath)
return args
}
func loadEngineConfig(engineURL string) *engineConfig {
insecure := getEnvAsBool("insecureSkipVerify", false)
return &engineConfig{
URL: engineURL,
username: os.Getenv("user"),
password: os.Getenv("password"),
cacert: os.Getenv("cacert"),
insecure: insecure,
}
}
func getEnvAsBool(key string, defaultVal bool) bool {
val, found := os.LookupEnv(key)
if !found {
return defaultVal
}
boolVal, err := strconv.ParseBool(val)
if err != nil {
klog.Fatal("Invalid boolean value for", key, ":", val)
}
return boolVal
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/image-converter/image-converter.go | cmd/image-converter/image-converter.go | package main
import (
"bufio"
"bytes"
"flag"
"os/exec"
"k8s.io/klog/v2"
)
func main() {
var srcVolPath, dstVolPath, srcFormat, dstFormat, volumeMode string
flag.StringVar(&srcVolPath, "src-path", "", "Source volume path")
flag.StringVar(&dstVolPath, "dst-path", "", "Target volume path")
flag.StringVar(&srcFormat, "src-format", "", "Format of the source volume")
flag.StringVar(&dstFormat, "dst-format", "", "Format of the target volume")
flag.StringVar(&volumeMode, "volume-mode", "", "Format of the target volume")
flag.Parse()
klog.Info("srcVolPath: ", srcVolPath, " dstVolPath: ", dstVolPath, " sourceFormat: ", srcFormat, " targetFormat: ", dstFormat)
err := convert(srcVolPath, dstVolPath, srcFormat, dstFormat, volumeMode)
if err != nil {
klog.Fatal(err)
}
}
func convert(srcVolPath, dstVolPath, srcFormat, dstFormat, volumeMode string) error {
err := qemuimgConvert(srcVolPath, dstVolPath, srcFormat, dstFormat)
if err != nil {
return err
}
klog.Info("Copying over source")
// Copy dst over src
switch volumeMode {
case "Block":
err = qemuimgConvert(dstVolPath, srcVolPath, dstFormat, dstFormat)
if err != nil {
return err
}
case "Filesystem":
// Use mv for files as it's faster than qemu-img convert
cmd := exec.Command("mv", dstVolPath, srcVolPath)
var stderr bytes.Buffer
cmd.Stderr = &stderr // Capture stderr
klog.Info("Executing command: ", cmd.String())
err := cmd.Run()
if err != nil {
klog.Error(stderr.String())
return err
}
}
return nil
}
func qemuimgConvert(srcVolPath, dstVolPath, srcFormat, dstFormat string) error {
cmd := exec.Command(
"qemu-img",
"convert",
"-p",
"-f", srcFormat,
"-O", dstFormat,
srcVolPath,
dstVolPath,
)
klog.Info("Executing command: ", cmd.String())
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return err
}
if err := cmd.Start(); err != nil {
return err
}
go func() {
scanner := bufio.NewScanner(stderr)
for scanner.Scan() {
line := scanner.Text()
klog.Error(line)
}
}()
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
line := scanner.Text()
klog.Info(line)
}
err = cmd.Wait()
if err != nil {
return err
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/openstack-populator/openstack-populator_test.go | cmd/openstack-populator/openstack-populator_test.go | package main
import (
"fmt"
"io"
"net"
"net/http"
"net/http/httptest"
"os"
"testing"
)
func setupMockServer() (*httptest.Server, string, int, error) {
listener, err := net.Listen("tcp", ":0")
if err != nil {
return nil, "", 0, err
}
mux := http.NewServeMux()
port := listener.Addr().(*net.TCPAddr).Port
baseURL := fmt.Sprintf("http://localhost:%d", port)
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
response := fmt.Sprintf(`{
"versions": {
"values": [
{
"id": "v3.0",
"links": [
{"rel": "self", "href": "%s/v3/"}
],
"status": "stable"
}
]
}
}`, baseURL)
fmt.Fprint(w, response)
})
mux.HandleFunc("/v2/images/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, `mock_data`)
})
mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("X-Subject-Token", "MIIFvgY")
w.WriteHeader(http.StatusCreated)
identityServer := fmt.Sprintf("%s/v3/", baseURL)
imageServiceURL := fmt.Sprintf("%s/v2/images", baseURL)
fmt.Println("identityServer ", identityServer)
response := fmt.Sprintf(`{
"token": {
"methods": ["password"],
"project": {
"domain": {
"id": "default",
"name": "Default"
},
"id": "8538a3f13f9541b28c2620eb19065e45",
"name": "admin"
},
"catalog": [
{
"type": "identity",
"name": "keystone",
"endpoints": [
{
"url": "%s",
"region": "RegionOne",
"interface": "public",
"id": "identity-public-endpoint-id"
},
{
"url": "%s",
"region": "RegionOne",
"interface": "admin",
"id": "identity-admin-endpoint-id"
},
{
"url": "%s",
"region": "RegionOne",
"interface": "internal",
"id": "identity-internal-endpoint-id"
}
]
},
{
"type": "image",
"name": "glance",
"endpoints": [
{
"url": "%s",
"region": "RegionOne",
"interface": "public",
"id": "image-public-endpoint-id"
}
]
}
],
"user": {
"domain": {
"id": "default",
"name": "Default"
},
"id": "3ec3164f750146be97f21559ee4d9c51",
"name": "admin"
},
"issued_at": "201406-10T20:55:16.806027Z"
}
}`,
identityServer,
identityServer,
identityServer,
imageServiceURL)
fmt.Fprint(w, response)
})
server := httptest.NewUnstartedServer(mux)
server.Listener = listener
server.Start()
return server, baseURL, port, nil
}
func TestPopulate(t *testing.T) {
os.Setenv("username", "testuser")
os.Setenv("password", "testpassword")
os.Setenv("projectName", "Default")
os.Setenv("domainName", "Default")
os.Setenv("insecureSkipVerify", "true")
os.Setenv("availability", "public")
os.Setenv("regionName", "RegionOne")
os.Setenv("authType", "password")
server, identityServerURL, port, err := setupMockServer()
if err != nil {
t.Fatalf("Failed to start mock server: %v", err)
}
defer server.Close()
fmt.Printf("Mock server running on port: %d\n", port)
fileName := "disk.img"
secretName := "test-secret"
imageID := "test-image-id"
ownerUID := "test-uid"
config := &AppConfig{
identityEndpoint: identityServerURL,
secretName: secretName,
imageID: imageID,
ownerUID: ownerUID,
pvcSize: 100,
volumePath: fileName,
}
fmt.Println("server ", identityServerURL)
populate(config)
file, err := os.Open(fileName)
if err != nil {
t.Fatalf("Failed to open file: %v", err)
}
defer file.Close() // Ensure the file is closed after reading
content, err := io.ReadAll(file)
if err != nil {
t.Fatalf("Failed to read file: %v", err)
}
if string(content) != "mock_data\n" {
t.Errorf("Expected %s, got %s", "mock_data", string(content))
}
os.Remove(fileName)
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/openstack-populator/openstack-populator.go | cmd/openstack-populator/openstack-populator.go | package main
import (
"flag"
"io"
"os"
"strings"
"time"
libclient "github.com/kubev2v/forklift/pkg/lib/client/openstack"
"github.com/kubev2v/forklift/pkg/metrics"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"k8s.io/klog/v2"
)
type AppConfig struct {
identityEndpoint string
imageID string
crNamespace string
crName string
secretName string
ownerUID string
pvcSize int64
volumePath string
}
func main() {
config := &AppConfig{}
flag.StringVar(&config.identityEndpoint, "endpoint", "", "endpoint URL (https://openstack.example.com:5000/v2.0)")
flag.StringVar(&config.secretName, "secret-name", "", "secret containing OpenStack credentials")
flag.StringVar(&config.imageID, "image-id", "", "Openstack image ID")
flag.StringVar(&config.volumePath, "volume-path", "", "Path to populate")
flag.StringVar(&config.crName, "cr-name", "", "Custom Resource instance name")
flag.StringVar(&config.crNamespace, "cr-namespace", "", "Custom Resource instance namespace")
flag.StringVar(&config.ownerUID, "owner-uid", "", "Owner UID (usually PVC UID)")
flag.Int64Var(&config.pvcSize, "pvc-size", 0, "Size of pvc (in bytes)")
flag.Parse()
if config.pvcSize <= 0 {
klog.Fatal("pvc-size must be greater than 0")
}
certsDirectory, err := os.MkdirTemp("", "certsdir")
if err != nil {
klog.Fatal(err)
}
metrics.StartPrometheusEndpoint(certsDirectory)
populate(config)
}
func populate(config *AppConfig) {
client := createClient(config)
downloadAndSaveImage(client, config)
}
func createClient(config *AppConfig) *libclient.Client {
options := readOptions()
client := &libclient.Client{
URL: config.identityEndpoint,
Options: options,
}
err := client.Connect()
if err != nil {
klog.Fatal(err)
}
return client
}
func downloadAndSaveImage(client *libclient.Client, config *AppConfig) {
klog.Info("Downloading the image: ", config.imageID)
imageReader, err := client.DownloadImage(config.imageID)
if err != nil {
klog.Fatal(err)
}
defer imageReader.Close()
file := openFile(config.volumePath)
defer file.Close()
progressVec := createProgressCounter()
writeData(imageReader, file, config, progressVec)
}
func createProgressCounter() *prometheus.CounterVec {
progressVec := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "openstack_populator_progress",
Help: "Progress of volume population",
},
[]string{"ownerUID"},
)
if err := prometheus.Register(progressVec); err != nil {
klog.Error("Prometheus progress counter not registered:", err)
}
return progressVec
}
func openFile(volumePath string) *os.File {
flags := os.O_RDWR
if strings.HasSuffix(volumePath, "disk.img") {
flags |= os.O_CREATE
}
file, err := os.OpenFile(volumePath, flags, 0650)
if err != nil {
klog.Fatal(err)
}
return file
}
func writeData(reader io.ReadCloser, file *os.File, config *AppConfig, progress *prometheus.CounterVec) {
countingReader := &CountingReader{reader: reader, total: config.pvcSize, read: new(int64)}
done := make(chan bool)
go reportProgress(done, countingReader, progress, config)
if _, err := io.Copy(file, countingReader); err != nil {
klog.Fatal(err)
}
done <- true
}
func reportProgress(done chan bool, countingReader *CountingReader, progress *prometheus.CounterVec, config *AppConfig) {
for {
select {
case <-done:
finalizeProgress(progress, config.ownerUID)
return
default:
updateProgress(countingReader, progress, config.ownerUID)
time.Sleep(1 * time.Second)
}
}
}
func finalizeProgress(progress *prometheus.CounterVec, ownerUID string) {
currentVal := progress.WithLabelValues(ownerUID)
var metric dto.Metric
if err := currentVal.Write(&metric); err != nil {
klog.Error("Error reading current progress:", err)
return
}
if metric.Counter != nil {
remainingProgress := 100 - *metric.Counter.Value
if remainingProgress > 0 {
currentVal.Add(remainingProgress)
}
}
klog.Info("Finished populating the volume. Progress: 100%")
}
func updateProgress(countingReader *CountingReader, progress *prometheus.CounterVec, ownerUID string) {
if countingReader.total <= 0 {
return
}
metric := &dto.Metric{}
if err := progress.WithLabelValues(ownerUID).Write(metric); err != nil {
klog.Errorf("updateProgress: failed to write metric; %v", err)
}
currentProgress := (float64(*countingReader.read) / float64(countingReader.total)) * 100
if currentProgress > *metric.Counter.Value {
progress.WithLabelValues(ownerUID).Add(currentProgress - *metric.Counter.Value)
}
klog.Info("Progress: ", int64(currentProgress), "%")
}
func readOptions() map[string]string {
options := map[string]string{}
// List of options to read from environment variables
envOptions := []string{
"regionName", "authType", "username", "userID", "password",
"applicationCredentialID", "applicationCredentialName", "applicationCredentialSecret",
"token", "systemScope", "projectName", "projectID", "userDomainName",
"userDomainID", "projectDomainName", "projectDomainID", "domainName",
"domainID", "defaultDomain", "insecureSkipVerify", "cacert", "availability",
}
klog.Info("Options:")
for _, option := range envOptions {
value := os.Getenv(option)
options[option] = value
if sensitiveInfo(option) {
value = strings.Repeat("*", len(value))
}
klog.Info(" - ", option, " = ", value)
}
return options
}
func sensitiveInfo(option string) bool {
return option == "password" || option == "applicationCredentialSecret" || option == "token"
}
type CountingReader struct {
reader io.ReadCloser
read *int64
total int64
}
func (cr *CountingReader) Read(p []byte) (int, error) {
n, err := cr.reader.Read(p)
*cr.read += int64(n)
return n, err
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/populator-controller/populator-controller.go | cmd/populator-controller/populator-controller.go | package main
import (
"flag"
"os"
"os/signal"
"syscall"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
populator_machinery "github.com/kubev2v/forklift/pkg/lib-volume-populator/populator-machinery"
"github.com/kubev2v/forklift/pkg/settings"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog/v2"
)
const (
prefix = "forklift.konveyor.io"
mountPath = "/mnt/"
devicePath = "/dev/block"
groupName = "forklift.konveyor.io"
apiVersion = "v1beta1"
)
type populator struct {
kind string
resource string
controllerFunc func(bool, *unstructured.Unstructured, corev1.PersistentVolumeClaim) ([]string, error)
imageVar string
metricsEndpoint string
}
var populators = map[string]populator{
"ovirt": {
kind: "OvirtVolumePopulator",
resource: "ovirtvolumepopulators",
controllerFunc: getOvirtPopulatorPodArgs,
imageVar: "OVIRT_POPULATOR_IMAGE",
metricsEndpoint: ":8080",
},
"openstack": {
kind: "OpenstackVolumePopulator",
resource: "openstackvolumepopulators",
controllerFunc: getOpenstackPopulatorPodArgs,
imageVar: "OPENSTACK_POPULATOR_IMAGE",
metricsEndpoint: ":8081",
},
"vsphere-xcopy": {
kind: "VSphereXcopyVolumePopulator",
resource: "vspherexcopyvolumepopulators",
controllerFunc: getVXPopulatorPodArgs,
imageVar: "VSPHERE_XCOPY_VOLUME_POPULATOR_IMAGE",
metricsEndpoint: ":8082",
},
}
func main() {
var metricsPath, masterURL, kubeconfig string
// Controller args
if f := flag.Lookup("kubeconfig"); f != nil {
kubeconfig = f.Value.String()
} else {
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
}
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
// Metrics args
flag.StringVar(&metricsPath, "metrics-path", "/metrics", "The HTTP path where prometheus metrics will be exposed. Default is `/metrics`.")
klog.InitFlags(nil)
flag.Parse()
resources, err := getResources()
if err != nil {
klog.Fatalf("Failed to parse resources: %v", err)
}
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
stop := make(chan bool)
go func() {
<-sigs
stop <- true
}()
for _, populator := range populators {
imageName, ok := os.LookupEnv(populator.imageVar)
if !ok {
klog.Warning("Couldn't find", "imageVar", populator.imageVar)
continue
}
gk := schema.GroupKind{Group: groupName, Kind: populator.kind}
gvr := schema.GroupVersionResource{Group: groupName, Version: apiVersion, Resource: populator.resource}
controllerFunc := populator.controllerFunc
metricsEndpoint := populator.metricsEndpoint
go func() {
populator_machinery.RunController(masterURL, kubeconfig, imageName, metricsEndpoint, metricsPath,
prefix, gk, gvr, mountPath, devicePath, controllerFunc, resources)
<-stop
}()
}
<-stop
}
func getOvirtPopulatorPodArgs(rawBlock bool, u *unstructured.Unstructured, _ corev1.PersistentVolumeClaim) ([]string, error) {
var ovirtVolumePopulator v1beta1.OvirtVolumePopulator
err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.UnstructuredContent(), &ovirtVolumePopulator)
if err != nil {
return nil, err
}
var args []string
args = append(args, "--volume-path="+getVolumePath(rawBlock))
args = append(args, "--secret-name="+ovirtVolumePopulator.Spec.EngineSecretName)
args = append(args, "--disk-id="+ovirtVolumePopulator.Spec.DiskID)
args = append(args, "--engine-url="+ovirtVolumePopulator.Spec.EngineURL)
args = append(args, "--cr-name="+ovirtVolumePopulator.Name)
args = append(args, "--cr-namespace="+ovirtVolumePopulator.Namespace)
return args, nil
}
func getOpenstackPopulatorPodArgs(rawBlock bool, u *unstructured.Unstructured, _ corev1.PersistentVolumeClaim) ([]string, error) {
var openstackPopulator v1beta1.OpenstackVolumePopulator
err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.UnstructuredContent(), &openstackPopulator)
if nil != err {
return nil, err
}
args := []string{}
args = append(args, "--volume-path="+getVolumePath(rawBlock))
args = append(args, "--endpoint="+openstackPopulator.Spec.IdentityURL)
args = append(args, "--secret-name="+openstackPopulator.Spec.SecretName)
args = append(args, "--image-id="+openstackPopulator.Spec.ImageID)
args = append(args, "--cr-name="+openstackPopulator.Name)
args = append(args, "--cr-namespace="+openstackPopulator.Namespace)
return args, nil
}
func getVXPopulatorPodArgs(_ bool, u *unstructured.Unstructured, pvc corev1.PersistentVolumeClaim) ([]string, error) {
var xcopy v1beta1.VSphereXcopyVolumePopulator
err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.UnstructuredContent(), &xcopy)
if nil != err {
return nil, err
}
args := []string{
"--source-vm-id=" + xcopy.Spec.VmId,
"--source-vmdk=" + xcopy.Spec.VmdkPath,
"--target-namespace=" + xcopy.GetNamespace(),
"--cr-name=" + xcopy.Name,
"--cr-namespace=" + xcopy.Namespace,
"--owner-name=" + pvc.Name,
"--secret-name=" + xcopy.Spec.SecretName,
"--storage-vendor-product=" + xcopy.Spec.StorageVendorProduct,
}
return args, nil
}
func getVolumePath(rawBlock bool) string {
if rawBlock {
return devicePath
} else {
return mountPath + "disk.img"
}
}
func getResources() (*corev1.ResourceRequirements, error) {
cpuLimit := settings.Settings.Migration.PopulatorContainerLimitsCpu
memoryLimit := settings.Settings.Migration.PopulatorContainerLimitsMemory
cpuRequest := settings.Settings.Migration.PopulatorContainerRequestsCpu
memoryRequest := settings.Settings.Migration.PopulatorContainerRequestsMemory
return &corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceCPU: cpuLimit,
corev1.ResourceMemory: memoryLimit,
},
Requests: corev1.ResourceList{
corev1.ResourceCPU: cpuRequest,
corev1.ResourceMemory: memoryRequest,
},
}, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-proxy/cache.go | cmd/ova-proxy/cache.go | package main
import (
"net/http/httputil"
"sync"
"time"
)
type CachedProxy struct {
Proxy *httputil.ReverseProxy
CachedAt time.Time
}
type ProxyCache struct {
cache map[string]CachedProxy
mutex sync.Mutex
TTL time.Duration
}
func (r *ProxyCache) Add(key string, value *httputil.ReverseProxy) {
r.mutex.Lock()
defer r.mutex.Unlock()
if r.cache == nil {
r.cache = make(map[string]CachedProxy)
}
r.cache[key] = CachedProxy{
Proxy: value,
CachedAt: time.Now(),
}
}
func (r *ProxyCache) Get(key string) (proxy *httputil.ReverseProxy, found bool) {
r.mutex.Lock()
defer r.mutex.Unlock()
result, ok := r.cache[key]
if ok {
if time.Since(result.CachedAt) <= r.TTL {
proxy = result.Proxy
found = true
} else {
delete(r.cache, key)
}
}
return
}
func NewProxyCache(ttl int64) (cache *ProxyCache) {
cache = &ProxyCache{
TTL: time.Duration(ttl) * time.Second,
cache: make(map[string]CachedProxy),
}
return
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-proxy/proxy_test.go | cmd/ova-proxy/proxy_test.go | package main
import (
"context"
"fmt"
"net"
"net/http"
"net/http/httptest"
"net/url"
"path"
"strings"
"testing"
"time"
"github.com/gin-gonic/gin"
"github.com/go-logr/logr"
"github.com/kubev2v/forklift/pkg/apis"
api "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
func TestOVAProxy(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "OVA Proxy Suite")
}
func init() {
gin.SetMode(gin.TestMode)
}
var _ = Describe("ProxyServer", func() {
Describe("address()", func() {
DescribeTable("defaults and explicit",
func(port int, tlsOn bool, expect string) {
ps := &ProxyServer{Port: port}
ps.TLS.Enabled = tlsOn
Expect(ps.address()).To(Equal(expect))
},
Entry("nonTLS default", 0, false, ":8080"),
Entry("TLS default", 0, true, ":8443"),
Entry("explicit", 9000, false, ":9000"),
)
})
Describe("Proxy()", func() {
It("returns 404 when the Provider is missing", func() {
spy := NewSpyClient()
proxy := &ProxyServer{
Client: spy,
Log: logr.Discard(),
Cache: NewProxyCache(300),
}
ns, name := "namespace", "not-found"
urlPath := path.Join("/", ns, name, "appliances")
ctx, recorder := makeCtx(http.MethodGet, ns, name, urlPath)
proxy.Proxy(ctx)
Expect(recorder.Code).To(Equal(http.StatusNotFound), "body: %q", recorder.Body.String())
Expect(spy.gets).To(Equal(1))
Expect(spy.lastKey).To(Equal(types.NamespacedName{Namespace: ns, Name: name}))
})
It("returns 503 when the Provider service is not ready", func() {
provider := NewProvider("konveyor", "provider", nil)
spy := NewSpyClient(provider)
srv := &ProxyServer{
Client: spy,
Log: logr.Discard(),
Cache: NewProxyCache(300),
}
ctx, recorder := makeCtx(http.MethodGet, "konveyor", "provider", "/konveyor/provider/appliances")
srv.Proxy(ctx)
Expect(recorder.Code).To(Equal(http.StatusServiceUnavailable), "body: %q", recorder.Body.String())
Expect(spy.gets).To(Equal(1))
})
It("proxies to the backend and uses the cache on subsequent calls", func() {
// Backend that confirms it receives /appliances and replies with a known body.
var hits int
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
hits++
if r.URL.Path != "/appliances" {
http.Error(w, "unexpected path: "+r.URL.Path, http.StatusBadGateway)
return
}
_, _ = fmt.Fprint(w, "ok")
}))
DeferCleanup(backend.Close)
ns, name := "konveyor", "provider"
provider := NewProvider(ns, name, &corev1.ObjectReference{
Name: "svc-name",
Namespace: "svc-ns",
})
spy := NewSpyClient(provider)
parsed, _ := url.Parse(backend.URL)
proxy := &ProxyServer{
Client: spy,
Log: logr.Discard(),
Cache: NewProxyCache(300),
Transport: svcDialRedirect(parsed.Host),
}
// path that the proxy should resolve into a request to /appliances
// against the server for the provider specified by the URL params
requestPath := path.Join("/", ns, name, "appliances")
// this request should be a cache miss
ctx, recorder := makeCtx(http.MethodGet, ns, name, requestPath)
proxy.Proxy(ctx)
Expect(recorder.Code).To(Equal(http.StatusOK), "first body: %q", recorder.Body.String())
Expect(strings.TrimSpace(recorder.Body.String())).To(Equal("ok"))
Expect(spy.gets).To(Equal(1))
Expect(hits).To(Equal(1))
// this request should be a cache hit
ctx, recorder = makeCtx(http.MethodGet, ns, name, requestPath)
proxy.Proxy(ctx)
Expect(recorder.Code).To(Equal(http.StatusOK), "second body: %q", recorder.Body.String())
Expect(spy.gets).To(Equal(1), "provider should be cached")
Expect(hits).To(Equal(2))
})
})
})
// SpyClient wraps a controller-runtime client to count Get() calls and record the last key.
type SpyClient struct {
client.Client
gets int
lastKey types.NamespacedName
}
func (s *SpyClient) Get(ctx context.Context, key types.NamespacedName, obj client.Object, opts ...client.GetOption) error {
s.gets++
s.lastKey = key
return s.Client.Get(ctx, key, obj, opts...)
}
func NewSpyClient(objs ...client.Object) *SpyClient {
scheme := runtime.NewScheme()
err := apis.AddToScheme(scheme)
Expect(err).ToNot(HaveOccurred(), "AddToScheme failed")
base := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
return &SpyClient{Client: base}
}
func NewProvider(ns, name string, svc *corev1.ObjectReference) *api.Provider {
return &api.Provider{
TypeMeta: metav1.TypeMeta{
APIVersion: "forklift.konveyor.io/v1beta1",
Kind: "Provider",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Status: api.ProviderStatus{
Service: svc,
},
}
}
// closeNotifyRecorder wraps httptest.ResponseRecorder to implement http.CloseNotifier
type closeNotifyRecorder struct {
*httptest.ResponseRecorder
}
func (c *closeNotifyRecorder) CloseNotify() <-chan bool {
return make(chan bool)
}
func makeCtx(method, ns, provider, urlPath string) (*gin.Context, *closeNotifyRecorder) {
recorder := &closeNotifyRecorder{httptest.NewRecorder()}
c, _ := gin.CreateTestContext(recorder)
request := httptest.NewRequest(method, urlPath, nil)
c.Request = request
c.Params = gin.Params{
{Key: "namespace", Value: ns},
{Key: "provider", Value: provider},
}
return c, recorder
}
// svcDialRedirect returns a transport that redirects any dial to "*.svc.cluster.local:8080" to backendAddr
func svcDialRedirect(backendAddr string) http.RoundTripper {
original := http.DefaultTransport.(*http.Transport)
clone := original.Clone()
clone.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
if strings.HasSuffix(addr, ".svc.cluster.local:8080") {
addr = backendAddr
}
d := &net.Dialer{Timeout: 5 * time.Second}
return d.DialContext(ctx, network, addr)
}
return clone
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-proxy/main.go | cmd/ova-proxy/main.go | package main
import (
"errors"
"fmt"
"net/http"
"net/http/httputil"
"net/url"
"path"
"github.com/gin-gonic/gin"
"github.com/go-logr/logr"
"github.com/kubev2v/forklift/cmd/ova-proxy/settings"
"github.com/kubev2v/forklift/pkg/apis"
api "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/lib/logging"
k8serr "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
logf "sigs.k8s.io/controller-runtime/pkg/log"
)
var Settings = &settings.Settings
const (
ProviderRoute = "/:namespace/:provider/appliances"
ServiceTemplate = "http://%s.%s.svc.cluster.local:8080/appliances"
)
type ProxyServer struct {
// The service port.
Port int
//
// TLS.
TLS struct {
// Enabled.
Enabled bool
// Certificate path.
Certificate string
// Key path
Key string
}
Transport http.RoundTripper
Client k8sclient.Client
Log logr.Logger
Cache *ProxyCache
}
func (r *ProxyServer) Run() (err error) {
err = r.init()
if err != nil {
return
}
gin.SetMode(gin.ReleaseMode)
router := gin.Default()
router.Any(ProviderRoute, r.Proxy)
if r.TLS.Enabled {
err = router.RunTLS(r.address(), r.TLS.Certificate, r.TLS.Key)
if err != nil {
r.Log.Error(err, "failed to start with TLS")
return
}
} else {
err = router.Run(r.address())
if err != nil {
r.Log.Error(err, "failed to start")
return
}
}
return
}
func (r *ProxyServer) Proxy(ctx *gin.Context) {
providerName := ctx.Param("provider")
providerNamespace := ctx.Param("namespace")
key := path.Join(providerNamespace, providerName)
proxy, ok := r.Cache.Get(key)
if !ok {
provider := &api.Provider{}
err := r.Client.Get(ctx.Request.Context(), types.NamespacedName{
Namespace: providerNamespace,
Name: providerName,
},
provider)
if err != nil {
r.Log.Error(err, "error getting provider", "provider", key)
errorCode := http.StatusInternalServerError
if k8serr.IsNotFound(err) {
errorCode = http.StatusNotFound
}
_ = ctx.AbortWithError(errorCode, err)
return
}
if provider.Status.Service == nil {
r.Log.Error(errors.New("not ready"), "provider service is not ready")
_ = ctx.AbortWithError(http.StatusServiceUnavailable, fmt.Errorf("provider %s service is not ready", key))
return
}
service := provider.Status.Service
svcURL := fmt.Sprintf(ServiceTemplate, service.Name, service.Namespace)
u, err := url.Parse(svcURL)
if err != nil {
_ = ctx.AbortWithError(http.StatusInternalServerError, err)
return
}
proxy = &httputil.ReverseProxy{
Rewrite: func(req *httputil.ProxyRequest) {
req.SetURL(u)
req.Out.URL.Path = u.Path
req.Out.URL.RawPath = u.Path
req.SetXForwarded()
},
}
if r.Transport != nil {
proxy.Transport = r.Transport
}
r.Cache.Add(key, proxy)
}
proxy.ServeHTTP(ctx.Writer, ctx.Request)
}
func (r *ProxyServer) init() (err error) {
logger := logging.Factory.New()
logf.SetLogger(logger)
r.Log = logf.Log.WithName("entrypoint")
r.Client, err = r.getClient()
if err != nil {
return
}
return
}
// Determine the address.
func (r *ProxyServer) address() string {
if r.Port == 0 {
if r.TLS.Enabled {
r.Port = 8443
} else {
r.Port = 8080
}
}
return fmt.Sprintf(":%d", r.Port)
}
func (r *ProxyServer) getClient() (client k8sclient.Client, err error) {
err = apis.AddToScheme(scheme.Scheme)
if err != nil {
return
}
cfg, err := config.GetConfig()
if err != nil {
return
}
client, err = k8sclient.New(
cfg,
k8sclient.Options{
Scheme: scheme.Scheme,
})
return
}
func main() {
Settings.Load()
proxy := ProxyServer{
Cache: NewProxyCache(Settings.Cache.TTL),
}
if Settings.TLS.Key != "" {
proxy.TLS.Enabled = true
proxy.TLS.Certificate = Settings.TLS.Certificate
proxy.TLS.Key = Settings.TLS.Key
}
err := proxy.Run()
if err != nil {
panic(err)
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/ova-proxy/settings/settings.go | cmd/ova-proxy/settings/settings.go | package settings
import (
"errors"
"os"
"strconv"
"strings"
)
// k8s pod default.
const (
ServiceCAFile = "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt"
)
// DefaultScheme is the default scheme for the inventory service.
const (
DefaultScheme = "https"
)
// Environment variables.
const (
Port = "PORT"
Namespace = "POD_NAMESPACE"
Scheme = "SERVICE_SCHEME"
TLSCertificate = "TLS_CERTIFICATE"
TLSKey = "TLS_KEY"
TLSCa = "TLS_CA"
CacheTTL = "CACHE_TTL"
)
var Settings = ProxySettings{}
type ProxySettings struct {
// Pod namespace
Namespace string
// Host.
Host string
// Port
Port int
// URL Scheme (http or https)
Scheme string
// TLS
TLS struct {
// Certificate path
Certificate string
// Key path
Key string
// CA path
CA string
}
// Cache
Cache struct {
// TTL (in seconds)
TTL int64
}
}
func (r *ProxySettings) Load() {
// Namespace
if s, found := os.LookupEnv(Namespace); found {
r.Namespace = s
}
// Port
if s, found := os.LookupEnv(Port); found {
r.Port, _ = strconv.Atoi(s)
} else {
r.Port = 8080
}
// Scheme
if s, found := os.LookupEnv(Scheme); found {
s = strings.ToLower(strings.TrimSpace(s))
switch s {
case "http", "https":
r.Scheme = s
default:
r.Scheme = DefaultScheme
}
} else {
r.Scheme = DefaultScheme
}
// TLS
if s, found := os.LookupEnv(TLSCertificate); found {
r.TLS.Certificate = s
}
if s, found := os.LookupEnv(TLSKey); found {
r.TLS.Key = s
}
if s, found := os.LookupEnv(TLSCa); found {
r.TLS.CA = s
} else {
if _, err := os.Stat(ServiceCAFile); !errors.Is(err, os.ErrNotExist) {
r.TLS.CA = ServiceCAFile
}
}
if s, found := os.LookupEnv(CacheTTL); found {
r.Cache.TTL, _ = strconv.ParseInt(s, 10, 64)
} else {
r.Cache.TTL = 10 // seconds
}
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/cmd/virt-v2v/entrypoint.go | cmd/virt-v2v/entrypoint.go | package main
import (
_ "embed"
"fmt"
"os"
"github.com/kubev2v/forklift/pkg/virt-v2v/config"
"github.com/kubev2v/forklift/pkg/virt-v2v/conversion"
"github.com/kubev2v/forklift/pkg/virt-v2v/server"
utils "github.com/kubev2v/forklift/pkg/virt-v2v/utils"
)
func main() {
env := &config.AppConfig{}
err := env.Load()
if err != nil {
fmt.Println("Failed to load variables", err)
os.Exit(1)
}
if err = linkCertificates(env); err != nil {
fmt.Println("Failed to link the certificates", err)
os.Exit(1)
}
if err = createV2vOutputDir(env); err != nil {
fmt.Println("Failed to create v2v output dir", err)
os.Exit(1)
}
convert, err := conversion.NewConversion(env)
if err != nil {
fmt.Println("Failed prepare conversion", err)
os.Exit(1)
}
// Check if remote inspection of VMs should run
if env.IsRemoteInspection {
err = convert.RunRemoteV2vInspection()
if err != nil {
fmt.Println("Failed to execute virt-v2v-inspector command", err)
os.Exit(1)
}
} else {
// virt-v2v or virt-v2v-in-place
if convert.IsInPlace {
// fetch xml description of the guest from libvirt to help virt-v2v make the conversion
err = func() error {
domainXML, err := convert.GetDomainXML()
if err != nil {
return fmt.Errorf("failed to get domain XML: %v", err)
}
err = os.WriteFile(convert.LibvirtDomainFile, []byte(domainXML), 0644)
if err != nil {
return fmt.Errorf("failed to write domain XML file: %v", err)
}
return nil
}()
if err == nil {
err = convert.RunVirtV2vInPlace()
}
} else {
err = convert.RunVirtV2v()
}
if err != nil {
fmt.Println("Failed to execute virt-v2v command", err)
os.Exit(1)
}
// virt-v2v-inspector
err = convert.RunVirtV2VInspection()
if err != nil {
fmt.Println("Failed to inspect the disk", err)
os.Exit(1)
}
inspection, err := utils.GetInspectionV2vFromFile(convert.InspectionOutputFile)
if err != nil {
fmt.Println("Failed to get inspection file", err)
os.Exit(1)
}
// virt-customize
err = convert.RunCustomize(inspection.OS)
if err != nil {
warningMsg := fmt.Sprintf("VM customization failed: %v. Migration will proceed but customization was not applied successfully.", err)
fmt.Println("WARNING:", warningMsg)
server.AddWarning(server.Warning{
Reason: "CustomizationFailed",
Message: warningMsg,
})
}
// In the remote migrations we can not connect to the conversion pod from the controller.
// This connection is needed for to get the additional configuration which is gathered either form virt-v2v or
// virt-v2v-inspector. We expose those parameters via server in this pod and once the controller gets the config
// the controller sends the request to terminate the pod.
if convert.IsLocalMigration {
s := server.Server{
AppConfig: env,
}
err = s.Start()
if err != nil {
fmt.Println("failed to run the server", err)
os.Exit(1)
}
}
}
}
// VirtV2VPrepEnvironment used in the cold migration.
// It creates a links between the downloaded guest image from virt-v2v and mounted PVC.
func linkCertificates(env *config.AppConfig) (err error) {
if env.IsVsphereMigration() {
if _, err := os.Stat("/etc/secret/cacert"); err == nil {
// use the specified certificate
err = os.Symlink("/etc/secret/cacert", "/opt/ca-bundle.crt")
if err != nil {
fmt.Println("Error creating ca cert link ", err)
os.Exit(1)
}
} else {
// otherwise, keep system pool certificates
err := os.Symlink("/etc/pki/tls/certs/ca-bundle.crt.bak", "/opt/ca-bundle.crt")
if err != nil {
fmt.Println("Error creating ca cert link ", err)
os.Exit(1)
}
}
}
return nil
}
func createV2vOutputDir(env *config.AppConfig) (err error) {
if err = os.MkdirAll(env.Workdir, os.ModePerm); err != nil {
return fmt.Errorf("error creating directory: %v", err)
}
return nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
kubev2v/forklift | https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/hashicorp/go-uuid/uuid.go | vendor/github.com/hashicorp/go-uuid/uuid.go | package uuid
import (
"crypto/rand"
"encoding/hex"
"fmt"
"io"
)
// GenerateRandomBytes is used to generate random bytes of given size.
func GenerateRandomBytes(size int) ([]byte, error) {
return GenerateRandomBytesWithReader(size, rand.Reader)
}
// GenerateRandomBytesWithReader is used to generate random bytes of given size read from a given reader.
func GenerateRandomBytesWithReader(size int, reader io.Reader) ([]byte, error) {
if reader == nil {
return nil, fmt.Errorf("provided reader is nil")
}
buf := make([]byte, size)
if _, err := io.ReadFull(reader, buf); err != nil {
return nil, fmt.Errorf("failed to read random bytes: %v", err)
}
return buf, nil
}
const uuidLen = 16
// GenerateUUID is used to generate a random UUID
func GenerateUUID() (string, error) {
return GenerateUUIDWithReader(rand.Reader)
}
// GenerateUUIDWithReader is used to generate a random UUID with a given Reader
func GenerateUUIDWithReader(reader io.Reader) (string, error) {
if reader == nil {
return "", fmt.Errorf("provided reader is nil")
}
buf, err := GenerateRandomBytesWithReader(uuidLen, reader)
if err != nil {
return "", err
}
return FormatUUID(buf)
}
func FormatUUID(buf []byte) (string, error) {
if buflen := len(buf); buflen != uuidLen {
return "", fmt.Errorf("wrong length byte slice (%d)", buflen)
}
return fmt.Sprintf("%x-%x-%x-%x-%x",
buf[0:4],
buf[4:6],
buf[6:8],
buf[8:10],
buf[10:16]), nil
}
func ParseUUID(uuid string) ([]byte, error) {
if len(uuid) != 2 * uuidLen + 4 {
return nil, fmt.Errorf("uuid string is wrong length")
}
if uuid[8] != '-' ||
uuid[13] != '-' ||
uuid[18] != '-' ||
uuid[23] != '-' {
return nil, fmt.Errorf("uuid is improperly formatted")
}
hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36]
ret, err := hex.DecodeString(hexStr)
if err != nil {
return nil, err
}
if len(ret) != uuidLen {
return nil, fmt.Errorf("decoded hex is the wrong length")
}
return ret, nil
}
| go | Apache-2.0 | b3b4703e958c25d54c4d48138d9e80ae32fadac3 | 2026-01-07T09:44:30.792320Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.