file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
e2e/mcp/plans/test_read.py | Python | """Plans · read -- list, describe, and verify YAML of migration plans."""
import pytest
import yaml
from conftest import (
COLD_PLAN_NAME,
COLD_VMS,
OCP_PROVIDER_NAME,
TEST_NAMESPACE,
VSPHERE_PROVIDER_NAME,
WARM_PLAN_NAME,
WARM_VMS,
call_tool,
)
@pytest.mark.order(40)
async def test_get_plans(mcp_session):
"""List plans in the test namespace -- expect 2."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "get plan",
"flags": {"namespace": TEST_NAMESPACE, "output": "json"},
})
plans = result.get("data", [])
# Handle the "No plans found" message dict gracefully
if isinstance(plans, dict):
plans = []
assert isinstance(plans, list), (
f"Expected list of plans, got {type(plans).__name__}: {plans}"
)
assert len(plans) == 2, f"Expected 2 plans, got {len(plans)}"
@pytest.mark.order(41)
async def test_describe_cold_plan(mcp_session):
"""Describe the cold plan including VMs."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "describe plan",
"flags": {
"name": COLD_PLAN_NAME,
"namespace": TEST_NAMESPACE,
"with-vms": True,
},
})
output = result.get("output", "")
assert VSPHERE_PROVIDER_NAME in output, "Source provider missing"
for vm in COLD_VMS.split(","):
assert vm in output, f"VM '{vm}' missing from cold plan description"
@pytest.mark.order(42)
async def test_describe_warm_plan(mcp_session):
"""Describe the warm plan including VMs and warm flag."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "describe plan",
"flags": {
"name": WARM_PLAN_NAME,
"namespace": TEST_NAMESPACE,
"with-vms": True,
},
})
output = result.get("output", "")
assert "warm" in output.lower() or "Warm" in output, "Warm indicator missing"
for vm in WARM_VMS.split(","):
assert vm in output, f"VM '{vm}' missing from warm plan description"
@pytest.mark.order(43)
async def test_verify_cold_plan_yaml(mcp_session):
"""Fetch the cold plan as YAML and verify structure."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "get plan",
"flags": {
"name": COLD_PLAN_NAME,
"namespace": TEST_NAMESPACE,
"output": "yaml",
},
})
raw = result.get("output", "")
assert raw, "Expected YAML output, got empty string"
docs = list(yaml.safe_load_all(raw))
doc = docs[0]
if isinstance(doc, list):
assert len(doc) >= 1, "Expected at least one resource in YAML list"
doc = doc[0]
spec = doc.get("spec") or doc.get("object", {}).get("spec", {})
src_name = spec.get("provider", {}).get("source", {}).get("name", "")
tgt_name = spec.get("provider", {}).get("destination", {}).get("name", "")
assert src_name == VSPHERE_PROVIDER_NAME, f"Source provider mismatch: {src_name}"
assert tgt_name == OCP_PROVIDER_NAME, f"Target provider mismatch: {tgt_name}"
vms = spec.get("vms", [])
expected_cold = len(COLD_VMS.split(","))
assert len(vms) == expected_cold, f"Expected {expected_cold} VMs in cold plan, got {len(vms)}"
assert spec.get("warm", False) is False, "Cold plan should not be warm"
@pytest.mark.order(44)
async def test_verify_warm_plan_yaml(mcp_session):
"""Fetch the warm plan as YAML and verify warm flag and VMs."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "get plan",
"flags": {
"name": WARM_PLAN_NAME,
"namespace": TEST_NAMESPACE,
"output": "yaml",
},
})
raw = result.get("output", "")
assert raw, "Expected YAML output, got empty string"
docs = list(yaml.safe_load_all(raw))
doc = docs[0]
if isinstance(doc, list):
assert len(doc) >= 1, "Expected at least one resource in YAML list"
doc = doc[0]
spec = doc.get("spec") or doc.get("object", {}).get("spec", {})
assert spec.get("warm") is True, "Warm plan should have warm=true"
vms = spec.get("vms", [])
expected_warm = len(WARM_VMS.split(","))
assert len(vms) == expected_warm, f"Expected {expected_warm} VMs in warm plan, got {len(vms)}"
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/providers/test_create.py | Python | """Providers · write -- create OCP + vSphere providers, wait Ready + inventory."""
import pytest
from conftest import (
GOVC_PASSWORD,
GOVC_USERNAME,
OCP_PROVIDER_NAME,
TEST_NAMESPACE,
VSPHERE_PROVIDER_NAME,
VSPHERE_URL,
_kubectl_wait,
_mtv_base_args,
_retry_command,
call_tool,
)
@pytest.mark.order(10)
async def test_create_ocp_provider(mcp_session):
"""Create the OpenShift 'host' target provider (local cluster, no URL needed)."""
result = await call_tool(mcp_session, "mtv_write", {
"command": "create provider",
"flags": {
"name": OCP_PROVIDER_NAME,
"type": "openshift",
"namespace": TEST_NAMESPACE,
},
})
assert result.get("return_value") == 0, f"Unexpected result: {result}"
print(f"\n Created OpenShift provider '{OCP_PROVIDER_NAME}' (local cluster)")
@pytest.mark.order(11)
async def test_create_vsphere_provider(mcp_session):
"""Create the vSphere source provider with credentials from env."""
result = await call_tool(mcp_session, "mtv_write", {
"command": "create provider",
"flags": {
"name": VSPHERE_PROVIDER_NAME,
"type": "vsphere",
"url": VSPHERE_URL,
"username": GOVC_USERNAME,
"password": GOVC_PASSWORD,
"namespace": TEST_NAMESPACE,
"provider-insecure-skip-tls": True,
},
})
assert result.get("return_value") == 0, f"Unexpected result: {result}"
print(f"\n Created vSphere provider '{VSPHERE_PROVIDER_NAME}' at {VSPHERE_URL}")
@pytest.mark.order(12)
async def test_wait_providers_ready(mcp_session):
"""Wait until both providers reach Ready phase using ``kubectl wait``."""
_kubectl_wait(
[
f"providers.forklift.konveyor.io/{OCP_PROVIDER_NAME}",
f"providers.forklift.konveyor.io/{VSPHERE_PROVIDER_NAME}",
],
"jsonpath={.status.phase}=Ready",
namespace=TEST_NAMESPACE,
timeout=120,
)
print("\n ✓ Both providers are Ready")
@pytest.mark.order(13)
async def test_wait_inventory_ready(mcp_session):
"""Wait until the inventory service can actually serve requests.
``kubectl wait --for=condition=InventoryCreated`` confirms the provider
CR condition, then we retry a lightweight ``kubectl-mtv get inventory vm``
until the inventory service has synced and can authenticate (no 401).
"""
# 1. CR condition first (fast path -- usually already True)
_kubectl_wait(
f"providers.forklift.konveyor.io/{VSPHERE_PROVIDER_NAME}",
"condition=InventoryCreated",
namespace=TEST_NAMESPACE,
timeout=180,
)
# 2. Verify the inventory service is actually serving VM requests
_retry_command(
_mtv_base_args() + [
"get", "inventory", "vm",
"--provider", VSPHERE_PROVIDER_NAME,
"--namespace", TEST_NAMESPACE,
"--output", "json",
],
timeout=180,
interval=10,
description="vSphere VM inventory to respond",
)
print("\n ✓ vSphere VM inventory is accessible")
# 3. Verify host inventory is also available (needed by create host)
_retry_command(
_mtv_base_args() + [
"get", "inventory", "host",
"--provider", VSPHERE_PROVIDER_NAME,
"--namespace", TEST_NAMESPACE,
"--output", "json",
],
timeout=180,
interval=10,
description="vSphere host inventory to respond",
)
print(" ✓ vSphere host inventory is accessible")
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/providers/test_read.py | Python | """Providers · read -- list, get details, and verify YAML of providers."""
import pytest
import yaml
from conftest import (
GOVC_URL,
OCP_PROVIDER_NAME,
TEST_NAMESPACE,
VSPHERE_PROVIDER_NAME,
VSPHERE_URL,
call_tool,
)
@pytest.mark.order(20)
async def test_get_providers_list(mcp_session):
"""List providers in the test namespace -- expect at least 2."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "get provider",
"flags": {"namespace": TEST_NAMESPACE, "output": "json"},
})
providers = result.get("data", [])
assert isinstance(providers, list)
assert len(providers) >= 2, f"Expected >=2 providers, got {len(providers)}"
names = {
p.get("name") or p.get("metadata", {}).get("name")
for p in providers
}
assert VSPHERE_PROVIDER_NAME in names
assert OCP_PROVIDER_NAME in names
@pytest.mark.order(21)
async def test_get_vsphere_provider_details(mcp_session):
"""Verify the vSphere provider JSON has correct type and URL."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "get provider",
"flags": {
"name": VSPHERE_PROVIDER_NAME,
"namespace": TEST_NAMESPACE,
"output": "json",
},
})
data = result.get("data")
provider = data[0] if isinstance(data, list) else data
spec = provider.get("spec") or provider.get("object", {}).get("spec", {})
assert spec.get("type") == "vsphere"
# The stored URL should contain the hostname from GOVC_URL
spec_url = spec.get("url", "") or ""
assert GOVC_URL in spec_url or VSPHERE_URL in spec_url, (
f"vSphere URL mismatch: spec.url={spec_url!r}, expected to contain {GOVC_URL!r}"
)
status = provider.get("status") or provider.get("object", {}).get("status", {})
assert status.get("phase") == "Ready"
@pytest.mark.order(22)
async def test_get_ocp_provider_details(mcp_session):
"""Verify the OpenShift provider JSON."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "get provider",
"flags": {
"name": OCP_PROVIDER_NAME,
"namespace": TEST_NAMESPACE,
"output": "json",
},
})
data = result.get("data")
provider = data[0] if isinstance(data, list) else data
spec = provider.get("spec") or provider.get("object", {}).get("spec", {})
assert spec.get("type") == "openshift"
status = provider.get("status") or provider.get("object", {}).get("status", {})
assert status.get("phase") == "Ready"
@pytest.mark.order(23)
async def test_verify_vsphere_provider_yaml(mcp_session):
"""Fetch the vSphere provider as YAML and verify key fields."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "get provider",
"flags": {
"name": VSPHERE_PROVIDER_NAME,
"namespace": TEST_NAMESPACE,
"output": "yaml",
},
})
raw = result.get("output", "")
assert raw, "Expected YAML output, got empty string"
docs = list(yaml.safe_load_all(raw))
assert len(docs) >= 1, "Expected at least one YAML document"
doc = docs[0]
# The YAML document might be a list of resources; unwrap if needed
if isinstance(doc, list):
assert len(doc) >= 1, "Expected at least one resource in YAML list"
doc = doc[0]
spec = doc.get("spec") or doc.get("object", {}).get("spec", {})
assert spec.get("type") == "vsphere", f"Expected type=vsphere, got {spec.get('type')}"
spec_url = spec.get("url", "") or ""
assert GOVC_URL in spec_url or VSPHERE_URL in spec_url, (
f"vSphere URL not found in spec: {spec_url!r}"
)
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/scripts/lib.sh | Shell | #!/bin/bash
# Shared library functions for MCP server management scripts
# Load environment from .env if present
load_env() {
local mcp_dir="$1"
if [ -f "$mcp_dir/.env" ]; then
set -a
# shellcheck disable=SC1091
source "$mcp_dir/.env"
set +a
fi
}
# Check if a port is listening on a host
# Usage: check_port_listening HOST PORT
# Returns: 0 if listening, 1 if not
check_port_listening() {
local host="$1"
local port="$2"
if command -v nc >/dev/null 2>&1; then
nc -z "$host" "$port" 2>/dev/null
return $?
elif command -v timeout >/dev/null 2>&1; then
timeout 1 bash -c "cat < /dev/null > /dev/tcp/$host/$port" 2>/dev/null
return $?
else
# Fallback: try bash TCP redirection without timeout
(echo >/dev/tcp/"$host"/"$port") 2>/dev/null
return $?
fi
}
# Wait for server to start listening on a port
# Usage: wait_for_server HOST PORT TIMEOUT_SECONDS DESCRIPTION
# Returns: 0 if server started, 1 if timeout
wait_for_server() {
local host="$1"
local port="$2"
local timeout="$3"
local description="$4"
echo "Waiting for server to start listening..."
for i in $(seq 1 "$timeout"); do
if check_port_listening "$host" "$port"; then
return 0
fi
sleep 1
done
echo "✗ $description failed to start listening within $timeout seconds" >&2
return 1
}
# Validate required environment variables
# Usage: require_env VAR_NAME [VAR_NAME...]
# Exits with error if any variable is not set
require_env() {
local missing=()
for var in "$@"; do
if [ -z "${!var:-}" ]; then
missing+=("$var")
fi
done
if [ ${#missing[@]} -gt 0 ]; then
echo "Error: Required environment variables not set:" >&2
for var in "${missing[@]}"; do
echo " - $var" >&2
done
exit 1
fi
}
# Detect container engine (docker or podman)
# Usage: ENGINE=$(detect_container_engine)
detect_container_engine() {
if [ -n "${CONTAINER_ENGINE:-}" ]; then
echo "$CONTAINER_ENGINE"
elif command -v docker >/dev/null 2>&1; then
echo "docker"
elif command -v podman >/dev/null 2>&1; then
echo "podman"
else
echo "" >&2
return 1
fi
}
# Check if a process is running by PID
# Usage: is_process_running PID
is_process_running() {
local pid="$1"
kill -0 "$pid" 2>/dev/null
}
# Print success message with checkmark
success() {
echo "✓ $*"
}
# Print error message with X
error() {
echo "✗ $*" >&2
}
# Print info message with bullet
info() {
echo " $*"
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/scripts/server-start-image.sh | Shell | #!/bin/bash
# Start MCP server in container mode (docker/podman)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
MCP_DIR="$(dirname "$SCRIPT_DIR")"
# Load shared library
# shellcheck disable=SC1091
source "$SCRIPT_DIR/lib.sh"
# Load environment
load_env "$MCP_DIR"
# Configuration
MCP_SSE_HOST="${MCP_SSE_HOST:-127.0.0.1}"
MCP_SSE_PORT="${MCP_SSE_PORT:-18443}"
MCP_IMAGE="${MCP_IMAGE:-}"
CONTAINER_NAME="mcp-e2e-${MCP_SSE_PORT}"
# Validate required variables
require_env KUBE_API_URL MCP_IMAGE
# Detect container engine
if ! ENGINE=$(detect_container_engine); then
error "No container engine found (docker or podman)"
info "Install one or set CONTAINER_ENGINE environment variable"
exit 1
fi
# Check if container already exists
if $ENGINE ps -a --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"; then
# Check if it's running
if $ENGINE ps --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"; then
error "Container already running: $CONTAINER_NAME"
exit 1
else
echo "Removing stopped container: $CONTAINER_NAME"
$ENGINE rm "$CONTAINER_NAME" >/dev/null
fi
fi
# Start container
echo "Starting MCP server (container mode)..."
info "Image: $MCP_IMAGE"
info "Engine: $ENGINE"
info "Host: $MCP_SSE_HOST"
info "Port: $MCP_SSE_PORT"
info "API: $KUBE_API_URL"
$ENGINE run -d \
--name "$CONTAINER_NAME" \
-p "${MCP_SSE_HOST}:${MCP_SSE_PORT}:8080" \
-e "MCP_KUBE_SERVER=${KUBE_API_URL}" \
-e "MCP_KUBE_INSECURE=true" \
-e "MCP_PORT=8080" \
-e "MCP_HOST=0.0.0.0" \
"$MCP_IMAGE" >/dev/null
# Verify container is still running
sleep 1
if ! $ENGINE ps --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"; then
error "Container stopped unexpectedly"
info "Check logs: $ENGINE logs $CONTAINER_NAME"
$ENGINE rm "$CONTAINER_NAME" 2>/dev/null || true
exit 1
fi
# Wait for container to start listening
if ! wait_for_server "$MCP_SSE_HOST" "$MCP_SSE_PORT" 30 "Container"; then
error "Container is running but not accepting connections"
info "Check logs: $ENGINE logs $CONTAINER_NAME"
exit 1
fi
success "Container started successfully: $CONTAINER_NAME"
info "URL: http://$MCP_SSE_HOST:$MCP_SSE_PORT/sse"
info "Logs: $ENGINE logs -f $CONTAINER_NAME"
exit 0
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/scripts/server-start.sh | Shell | #!/bin/bash
# Start MCP server in binary mode (background process)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
MCP_DIR="$(dirname "$SCRIPT_DIR")"
# Load shared library
# shellcheck disable=SC1091
source "$SCRIPT_DIR/lib.sh"
# Load environment
load_env "$MCP_DIR"
# Configuration
MCP_SSE_HOST="${MCP_SSE_HOST:-127.0.0.1}"
MCP_SSE_PORT="${MCP_SSE_PORT:-18443}"
MTV_BINARY="${MTV_BINARY:-$MCP_DIR/../../kubectl-mtv}"
SERVER_PID_FILE="$MCP_DIR/.server.pid"
SERVER_LOG_FILE="$MCP_DIR/.server.log"
# Validate required variables
require_env KUBE_API_URL
# Check if server already running
if [ -f "$SERVER_PID_FILE" ]; then
PID=$(cat "$SERVER_PID_FILE")
if is_process_running "$PID"; then
error "Server already running (PID $PID)"
exit 1
else
echo "Removing stale PID file"
rm -f "$SERVER_PID_FILE"
fi
fi
# Check binary exists
if [ ! -x "$MTV_BINARY" ]; then
error "kubectl-mtv binary not found or not executable: $MTV_BINARY"
info "Build it with: make build"
exit 1
fi
# Start server
echo "Starting MCP server (binary mode)..."
info "Binary: $MTV_BINARY"
info "Host: $MCP_SSE_HOST"
info "Port: $MCP_SSE_PORT"
info "API: $KUBE_API_URL"
# Start server in background with no ambient credentials
KUBECONFIG=/dev/null "$MTV_BINARY" mcp-server \
--sse \
--port "$MCP_SSE_PORT" \
--host "$MCP_SSE_HOST" \
--server "$KUBE_API_URL" \
--insecure-skip-tls-verify \
> "$SERVER_LOG_FILE" 2>&1 &
SERVER_PID=$!
echo "$SERVER_PID" > "$SERVER_PID_FILE"
# Verify process started and is still running
sleep 1
if ! is_process_running "$SERVER_PID"; then
error "Server process died"
info "Check log: $SERVER_LOG_FILE"
rm -f "$SERVER_PID_FILE"
exit 1
fi
# Wait for server to start listening
if ! wait_for_server "$MCP_SSE_HOST" "$MCP_SSE_PORT" 30 "Server"; then
error "Server is running but not accepting connections"
info "Check log: $SERVER_LOG_FILE"
kill "$SERVER_PID" 2>/dev/null || true
rm -f "$SERVER_PID_FILE"
exit 1
fi
success "Server started successfully (PID $SERVER_PID)"
info "Log: $SERVER_LOG_FILE"
info "URL: http://$MCP_SSE_HOST:$MCP_SSE_PORT/sse"
exit 0
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/scripts/server-status.sh | Shell | #!/bin/bash
# Check MCP server status
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
MCP_DIR="$(dirname "$SCRIPT_DIR")"
# Load shared library
# shellcheck disable=SC1091
source "$SCRIPT_DIR/lib.sh"
# Load environment
load_env "$MCP_DIR"
# Configuration
MCP_SSE_HOST="${MCP_SSE_HOST:-127.0.0.1}"
MCP_SSE_PORT="${MCP_SSE_PORT:-18443}"
SERVER_PID_FILE="$MCP_DIR/.server.pid"
CONTAINER_NAME="mcp-e2e-${MCP_SSE_PORT}"
RUNNING=0
# Check binary server
if [ -f "$SERVER_PID_FILE" ]; then
PID=$(cat "$SERVER_PID_FILE")
if is_process_running "$PID"; then
success "Binary server running"
info "PID: $PID"
info "Log: $MCP_DIR/.server.log"
info "URL: http://$MCP_SSE_HOST:$MCP_SSE_PORT/sse"
RUNNING=1
else
error "Binary server not running (stale PID file)"
rm -f "$SERVER_PID_FILE"
fi
fi
# Check container
if ENGINE=$(detect_container_engine); then
if $ENGINE ps --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"; then
STATUS=$($ENGINE inspect --format='{{.State.Status}}' "$CONTAINER_NAME" 2>/dev/null || echo "unknown")
success "Container server running"
info "Name: $CONTAINER_NAME"
info "Status: $STATUS"
info "URL: http://$MCP_SSE_HOST:$MCP_SSE_PORT/sse"
info "Logs: $ENGINE logs -f $CONTAINER_NAME"
RUNNING=1
fi
fi
if [ $RUNNING -eq 0 ]; then
error "No server running"
echo ""
echo "Start a server with:"
info "make server-start # Binary mode"
info "make server-start-image # Container mode"
exit 1
fi
# Check if server is actually listening
echo ""
echo "Connectivity check..."
if check_port_listening "$MCP_SSE_HOST" "$MCP_SSE_PORT"; then
success "Server is listening on $MCP_SSE_HOST:$MCP_SSE_PORT"
else
error "Server is not listening on $MCP_SSE_HOST:$MCP_SSE_PORT"
exit 1
fi
exit 0
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/scripts/server-stop-image.sh | Shell | #!/bin/bash
# Stop MCP server (container mode)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
MCP_DIR="$(dirname "$SCRIPT_DIR")"
# Load shared library
# shellcheck disable=SC1091
source "$SCRIPT_DIR/lib.sh"
# Load environment
load_env "$MCP_DIR"
# Configuration
MCP_SSE_PORT="${MCP_SSE_PORT:-18443}"
CONTAINER_NAME="mcp-e2e-${MCP_SSE_PORT}"
# Detect container engine
if ! ENGINE=$(detect_container_engine); then
echo "No container engine found (docker or podman)"
exit 0
fi
# Check if container exists
if ! $ENGINE ps -a --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"; then
echo "No container server running (container not found)"
exit 0
fi
# Stop and remove the container
echo "Stopping container: $CONTAINER_NAME..."
$ENGINE stop -t 5 "$CONTAINER_NAME" >/dev/null 2>&1 || true
$ENGINE rm "$CONTAINER_NAME" >/dev/null 2>&1 || true
success "Container server stopped"
exit 0
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/scripts/server-stop.sh | Shell | #!/bin/bash
# Stop MCP server (binary mode)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
MCP_DIR="$(dirname "$SCRIPT_DIR")"
# Load shared library
# shellcheck disable=SC1091
source "$SCRIPT_DIR/lib.sh"
# Configuration
SERVER_PID_FILE="$MCP_DIR/.server.pid"
# Check if PID file exists
if [ ! -f "$SERVER_PID_FILE" ]; then
echo "No binary server running (PID file not found)"
exit 0
fi
PID=$(cat "$SERVER_PID_FILE")
# Check if process is actually running
if ! is_process_running "$PID"; then
echo "Binary server not running (removing stale PID file)"
rm -f "$SERVER_PID_FILE"
exit 0
fi
# Stop the server
echo "Stopping binary server (PID $PID)..."
kill "$PID" 2>/dev/null || true
# Wait up to 10 seconds for graceful shutdown
for i in {1..10}; do
if ! is_process_running "$PID"; then
success "Binary server stopped"
rm -f "$SERVER_PID_FILE"
exit 0
fi
sleep 1
done
# Force kill if still running
echo "Force killing server..."
kill -9 "$PID" 2>/dev/null || true
rm -f "$SERVER_PID_FILE"
success "Binary server stopped (forced)"
exit 0
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/setup/test_banner.py | Python | """Setup · banner -- print environment, versions, and configuration summary.
This is the very first test to run (order 0). It prints a block of
diagnostic info so the operator can see *exactly* which cluster,
credentials, and tools are in use. It also verifies that ``kubectl-mtv``
is executable and will fail the entire suite early if it is not.
"""
import os
import subprocess
import sys
import pytest
from conftest import (
GOVC_URL,
GOVC_USERNAME,
KUBE_API_URL,
MCP_IMAGE,
MCP_SSE_HOST,
MCP_SSE_PORT,
MCP_SSE_URL,
MCP_VERBOSE,
MTV_BINARY,
TEST_NAMESPACE,
COLD_PLAN_NAME,
WARM_PLAN_NAME,
COLD_VMS,
WARM_VMS,
NETWORK_PAIRS,
STORAGE_PAIRS,
VSPHERE_PROVIDER_NAME,
VSPHERE_URL,
OCP_PROVIDER_NAME,
ESXI_HOST_NAME,
_mtv_base_args,
)
def _cli_version() -> tuple[str, bool]:
"""Run ``kubectl-mtv version --server ... --token ...`` and return (output, ok).
Returns a tuple of (version_string, success). When the binary is missing
or the command fails, *success* is ``False``.
"""
try:
r = subprocess.run(
_mtv_base_args() + ["version"],
capture_output=True,
text=True,
timeout=10,
)
raw = (r.stdout.strip() or r.stderr.strip() or "(no output)")
# Indent continuation lines so they align under the header
lines = raw.splitlines()
formatted = ("\n" + " " * 22).join(lines)
return formatted, r.returncode == 0
except FileNotFoundError:
return "(kubectl-mtv binary not found)", False
except Exception as exc:
return f"(error: {exc})", False
def _section(title: str) -> str:
return f"\n --- {title} ---"
@pytest.mark.order(0)
async def test_print_banner(mcp_session):
"""Print versions, credentials, and test configuration.
Fails the entire suite if the ``kubectl-mtv`` binary cannot be executed.
"""
cli_ver, cli_ok = _cli_version()
# Determine server info - tests always connect to an existing server
if MCP_IMAGE:
server_info = f"Container image: {MCP_IMAGE}"
else:
server_info = f"Binary: {MTV_BINARY}"
banner = "\n".join([
"",
"=" * 60,
" MCP E2E TEST SUITE",
"=" * 60,
_section("Versions"),
f" Python: {sys.version.split()[0]}",
f" pytest: {pytest.__version__}",
f" kubectl-mtv: {cli_ver}",
_section("MCP Server Connection"),
f" MCP SSE URL: {MCP_SSE_URL}",
f" Server info: {server_info}",
_section("Cluster"),
f" API URL: {KUBE_API_URL}",
_section("vSphere source"),
f" GOVC_URL: {GOVC_URL}",
f" Provider URL: {VSPHERE_URL}",
f" Username: {GOVC_USERNAME}",
f" Provider name: {VSPHERE_PROVIDER_NAME}",
_section("OpenShift target"),
f" API URL: {KUBE_API_URL}",
f" Provider name: {OCP_PROVIDER_NAME}",
f" ESXi host: {ESXI_HOST_NAME}",
_section("Migration plans"),
f" Namespace: {TEST_NAMESPACE}",
f" Cold plan: {COLD_PLAN_NAME}",
f" Warm plan: {WARM_PLAN_NAME}",
f" Cold VMs: {COLD_VMS}",
f" Warm VMs: {WARM_VMS}",
f" Network pairs: {NETWORK_PAIRS}",
f" Storage pairs: {STORAGE_PAIRS}",
_section("Diagnostics"),
f" MCP_VERBOSE: {MCP_VERBOSE}",
"",
"=" * 60,
"",
])
print(banner)
assert cli_ok, (
f"kubectl-mtv version failed — cannot continue.\n {cli_ver}"
)
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/setup/test_setup.py | Python | """Setup -- verify MCP server, clean old namespace, then create a fresh one."""
import subprocess
import pytest
from conftest import (
KUBE_API_URL,
MCP_SSE_PORT,
MCP_SSE_URL,
TEST_NAMESPACE,
_create_namespace,
_delete_namespace,
_kubectl_base_args,
_kubectl_wait,
call_tool,
)
@pytest.mark.order(1)
async def test_mcp_server_running(mcp_session):
"""Verify the MCP SSE server is up and the client session is connected."""
result = await call_tool(mcp_session, "mtv_help", {"command": "get plan"})
assert result, "MCP server returned empty response to mtv_help"
print(f"\n MCP SSE server responding on port {MCP_SSE_PORT}")
print(f" Client connected to {MCP_SSE_URL}")
@pytest.mark.order(2)
async def test_clean_namespace(mcp_session):
"""Delete the test namespace (if it exists) and wait until it is gone."""
# Check whether the namespace exists
probe = subprocess.run(
_kubectl_base_args() + [
"get", "namespace", TEST_NAMESPACE,
"-o=jsonpath={.metadata.name}",
],
capture_output=True, text=True, timeout=30,
)
if probe.returncode == 0 and probe.stdout.strip() == TEST_NAMESPACE:
print(f"\n Namespace '{TEST_NAMESPACE}' exists -- deleting ...")
_delete_namespace(TEST_NAMESPACE)
# kubectl wait --for=delete exits 0 when the resource is gone
_kubectl_wait(f"namespace/{TEST_NAMESPACE}", "delete", timeout=120)
print(f" Namespace '{TEST_NAMESPACE}' deleted and gone")
else:
print(f"\n Namespace '{TEST_NAMESPACE}' does not exist -- nothing to clean")
@pytest.mark.order(3)
async def test_create_namespace(mcp_session):
"""Create a fresh test namespace and wait until it is fully initialised."""
_create_namespace(TEST_NAMESPACE)
# Wait for namespace to become Active
_kubectl_wait(
f"namespace/{TEST_NAMESPACE}",
"jsonpath={.status.phase}=Active",
timeout=60,
)
# Wait for the default ServiceAccount -- this signals the namespace
# controller has finished setting up RBAC and the namespace is ready
# for workloads.
_kubectl_wait(
"serviceaccount/default",
"jsonpath={.metadata.name}=default",
namespace=TEST_NAMESPACE,
timeout=30,
)
print(f"\n Namespace '{TEST_NAMESPACE}' created and fully initialised")
print(f" OCP API: {KUBE_API_URL}")
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
main.go | Go | package main
import (
"os"
"github.com/yaacov/kubectl-mtv/cmd"
)
func main() {
if err := cmd.Execute(); err != nil {
os.Exit(1)
}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/archive/plan/archive.go | Go | package plan
import (
"context"
"encoding/json"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// Archive sets the archived flag on a plan
func Archive(ctx context.Context, configFlags *genericclioptions.ConfigFlags, planName, namespace string, archived bool) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Get the plan
_, err = c.Resource(client.PlansGVR).Namespace(namespace).Get(ctx, planName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get plan '%s': %v", planName, err)
}
// Create a patch to update the archived field
patchObj := map[string]interface{}{
"spec": map[string]interface{}{
"archived": archived,
},
}
patchBytes, err := json.Marshal(patchObj)
if err != nil {
return fmt.Errorf("failed to create patch: %v", err)
}
// Apply the patch
_, err = c.Resource(client.PlansGVR).Namespace(namespace).Patch(
ctx,
planName,
types.MergePatchType,
patchBytes,
metav1.PatchOptions{},
)
if err != nil {
return fmt.Errorf("failed to update plan: %v", err)
}
action := "archived"
if !archived {
action = "unarchived"
}
fmt.Printf("Plan '%s' %s\n", planName, action)
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/cancel/plan/cancel.go | Go | package plan
import (
"context"
"encoding/json"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/plan/status"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// Cancel cancels specific VMs in a running migration
func Cancel(configFlags *genericclioptions.ConfigFlags, planName string, namespace string, vmNames []string) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Get the plan
planObj, err := c.Resource(client.PlansGVR).Namespace(namespace).Get(context.TODO(), planName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get plan '%s': %v", planName, err)
}
// Validate that VM names exist in the plan
planVMs, found, err := unstructured.NestedSlice(planObj.Object, "spec", "vms")
if err != nil || !found {
return fmt.Errorf("failed to get VMs from plan: %v", err)
}
vmNameToIDMap := make(map[string]string)
for _, vmObj := range planVMs {
vm, ok := vmObj.(map[string]interface{})
if !ok {
continue
}
vmName, ok := vm["name"].(string)
if !ok || vmName == "" {
continue
}
vmID, ok := vm["id"].(string)
if !ok || vmID == "" {
continue
}
vmNameToIDMap[vmName] = vmID
}
// Check if requested VM names exist in the plan
var invalidVMs []string
var validVMs []string
for _, vmName := range vmNames {
if _, exists := vmNameToIDMap[vmName]; !exists {
invalidVMs = append(invalidVMs, vmName)
} else {
validVMs = append(validVMs, vmName)
}
}
if len(invalidVMs) > 0 {
return fmt.Errorf("the following VMs were not found in plan '%s': %v", planName, invalidVMs)
}
// Find the running migration for this plan
runningMigration, _, err := status.GetRunningMigration(c, namespace, planObj, client.MigrationsGVR)
if err != nil {
return err
}
if runningMigration == nil {
return fmt.Errorf("no running migration found for plan '%s'", planName)
}
// Prepare the VM references to cancel
var cancelVMs []ref.Ref
for _, vmName := range validVMs {
cancelVMs = append(cancelVMs, ref.Ref{
Name: vmName,
ID: vmNameToIDMap[vmName],
})
}
// Create a patch to update the cancel field
// First, get the current cancel list to avoid overwriting it
currentCancelVMs, _, _ := unstructured.NestedSlice(runningMigration.Object, "spec", "cancel")
// Convert current cancel VMs to ref.Ref structures
var existingCancelVMs []ref.Ref
for _, vmObj := range currentCancelVMs {
vm, ok := vmObj.(map[string]interface{})
if !ok {
continue
}
vmName, _ := vm["name"].(string)
vmID, _ := vm["id"].(string)
existingCancelVMs = append(existingCancelVMs, ref.Ref{
Name: vmName,
ID: vmID,
})
}
// Merge existing and new cancel VMs, avoiding duplicates
mergedCancelVMs := mergeCancelVMs(existingCancelVMs, cancelVMs)
// Prepare the patch
patchObject := map[string]interface{}{
"spec": map[string]interface{}{
"cancel": mergedCancelVMs,
},
}
// Convert the patch to JSON
patchBytes, err := json.Marshal(patchObject)
if err != nil {
return fmt.Errorf("failed to create patch: %v", err)
}
// Apply the patch to the migration
_, err = c.Resource(client.MigrationsGVR).Namespace(namespace).Patch(
context.TODO(),
runningMigration.GetName(),
types.MergePatchType,
patchBytes,
metav1.PatchOptions{},
)
if err != nil {
return fmt.Errorf("failed to update migration with canceled VMs: %v", err)
}
fmt.Printf("Successfully requested cancellation for VMs in plan '%s': %v\n", planName, validVMs)
return nil
}
// mergeCancelVMs merges two slices of ref.Ref, avoiding duplicates based on VM ID
func mergeCancelVMs(existing, new []ref.Ref) []interface{} {
// Create a map to track unique VMs by ID
uniqueVMs := make(map[string]ref.Ref)
// Add existing VMs to the map
for _, vm := range existing {
if vm.ID != "" {
uniqueVMs[vm.ID] = vm
}
}
// Add new VMs to the map (will override any duplicates)
for _, vm := range new {
if vm.ID != "" {
uniqueVMs[vm.ID] = vm
}
}
// Convert the map back to a slice of interface{} for unstructured
result := make([]interface{}, 0, len(uniqueVMs))
for _, vm := range uniqueVMs {
// Convert ref.Ref to map for unstructured
vmMap := map[string]interface{}{
"name": vm.Name,
"id": vm.ID,
}
result = append(result, vmMap)
}
return result
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/hook/hook.go | Go | package hook
import (
"context"
"encoding/base64"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// CreateHookOptions encapsulates the parameters for creating migration hooks.
// This includes the hook name, namespace, configuration flags, and the HookSpec
// containing the hook's operational parameters.
type CreateHookOptions struct {
Name string
Namespace string
ConfigFlags *genericclioptions.ConfigFlags
HookSpec forkliftv1beta1.HookSpec
}
// Create creates a new migration hook resource.
// It validates the input parameters, encodes the playbook if provided,
// creates the hook resource in Kubernetes, and provides user feedback.
func Create(opts CreateHookOptions) error {
// Validate the hook specification
if err := validateHookSpec(opts.HookSpec); err != nil {
return fmt.Errorf("invalid hook specification: %v", err)
}
// Process and encode the playbook if provided
processedSpec := opts.HookSpec
if opts.HookSpec.Playbook != "" {
// If the playbook is not already base64 encoded, encode it
if !isBase64Encoded(opts.HookSpec.Playbook) {
encoded := base64.StdEncoding.EncodeToString([]byte(opts.HookSpec.Playbook))
processedSpec.Playbook = encoded
klog.V(2).Infof("Encoded playbook content to base64")
}
}
// Create the hook resource
hookObj, err := createSingleHook(opts.ConfigFlags, opts.Namespace, opts.Name, processedSpec)
if err != nil {
return fmt.Errorf("failed to create hook %s: %v", opts.Name, err)
}
// Provide user feedback
fmt.Printf("hook/%s created\n", hookObj.Name)
klog.V(2).Infof("Created hook '%s' in namespace '%s'", opts.Name, opts.Namespace)
return nil
}
// validateHookSpec validates the hook specification parameters.
// It ensures that required fields are present and have valid values.
func validateHookSpec(spec forkliftv1beta1.HookSpec) error {
// Image should not be empty (default is set at command level)
if spec.Image == "" {
return fmt.Errorf("image cannot be empty")
}
// Validate deadline if provided
if spec.Deadline < 0 {
return fmt.Errorf("deadline must be non-negative, got: %d", spec.Deadline)
}
return nil
}
// isBase64Encoded checks if a string is already base64 encoded by attempting to decode it.
// This helps avoid double-encoding playbook content.
func isBase64Encoded(s string) bool {
// Remove any whitespace characters
s = strings.ReplaceAll(s, " ", "")
s = strings.ReplaceAll(s, "\n", "")
s = strings.ReplaceAll(s, "\t", "")
s = strings.ReplaceAll(s, "\r", "")
// Try to decode the string
_, err := base64.StdEncoding.DecodeString(s)
return err == nil && len(s)%4 == 0
}
// createSingleHook creates a single Hook resource in Kubernetes.
// It constructs the Hook object with the provided specification and creates it using the dynamic client.
func createSingleHook(configFlags *genericclioptions.ConfigFlags, namespace, name string, spec forkliftv1beta1.HookSpec) (*forkliftv1beta1.Hook, error) {
// Create Hook resource
hookObj := &forkliftv1beta1.Hook{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: spec,
}
// Set the API version and kind
hookObj.Kind = "Hook"
hookObj.APIVersion = forkliftv1beta1.SchemeGroupVersion.String()
// Convert to unstructured for dynamic client
unstructuredHook, err := runtime.DefaultUnstructuredConverter.ToUnstructured(hookObj)
if err != nil {
return nil, fmt.Errorf("failed to convert hook to unstructured: %v", err)
}
// Get dynamic client
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to get dynamic client: %v", err)
}
// Create the hook resource
createdHookUnstructured, err := dynamicClient.Resource(client.HooksGVR).Namespace(namespace).Create(
context.Background(),
&unstructured.Unstructured{Object: unstructuredHook},
metav1.CreateOptions{},
)
if err != nil {
if errors.IsAlreadyExists(err) {
return nil, fmt.Errorf("hook '%s' already exists in namespace '%s'", name, namespace)
}
return nil, fmt.Errorf("failed to create hook: %v", err)
}
// Convert back to typed object for return
var createdHook forkliftv1beta1.Hook
err = runtime.DefaultUnstructuredConverter.FromUnstructured(createdHookUnstructured.Object, &createdHook)
if err != nil {
return nil, fmt.Errorf("failed to convert created hook back to typed object: %v", err)
}
return &createdHook, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/host/host.go | Go | package host
import (
"context"
"crypto/sha256"
"fmt"
"strings"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// CreateHostOptions encapsulates the parameters for creating migration hosts.
// This includes provider information, authentication details, network configuration,
// and TLS settings for host connections.
type CreateHostOptions struct {
HostIDs []string
Namespace string
Provider string
ConfigFlags *genericclioptions.ConfigFlags
InventoryURL string
InventoryInsecureSkipTLS bool
Username string
Password string
ExistingSecret string
IPAddress string
NetworkAdapterName string
HostInsecureSkipTLS bool
CACert string
HostSpec forkliftv1beta1.HostSpec
}
// Create creates new migration hosts for vSphere providers.
// It validates the provider, handles authentication (existing secret, provider secret, or new secret),
// resolves IP addresses from network adapters or direct input, creates the host resources,
// and establishes proper ownership relationships between providers, hosts, and secrets.
func Create(ctx context.Context, opts CreateHostOptions) error {
// Get the provider object and validate it's a vSphere provider
// Only vSphere providers support host creation
_, err := validateAndGetProvider(ctx, opts.ConfigFlags, opts.Provider, opts.Namespace)
if err != nil {
return err
}
// Fetch available hosts from provider inventory to validate requested host names
// and extract network adapter information for IP resolution
availableHosts, err := getProviderHosts(ctx, opts.ConfigFlags, opts.Provider, opts.Namespace, opts.InventoryURL, opts.InventoryInsecureSkipTLS)
if err != nil {
return fmt.Errorf("failed to get provider hosts: %v", err)
}
// Ensure all requested host IDs exist in the provider's inventory
if err := validateHostIDs(opts.HostIDs, availableHosts); err != nil {
return err
}
// Create or get secret
var secret *corev1.ObjectReference
var createdSecret *corev1.Secret
// Determine authentication strategy: ESXi endpoints can reuse provider secrets,
// otherwise use existing secret or create new one
providerHasESXIEndpoint, providerSecret, err := CheckProviderESXIEndpoint(ctx, opts.ConfigFlags, opts.Provider, opts.Namespace)
if err != nil {
return fmt.Errorf("failed to check provider endpoint type: %v", err)
}
if providerHasESXIEndpoint && opts.ExistingSecret == "" && opts.Username == "" {
// For ESXi endpoints, reuse the provider's existing secret for efficiency
secret = providerSecret
klog.V(2).Infof("Using provider secret '%s' for ESXi endpoint", providerSecret.Name)
} else if opts.ExistingSecret != "" {
// Use user-specified existing secret
secret = &corev1.ObjectReference{
Name: opts.ExistingSecret,
Namespace: opts.Namespace,
}
} else {
// Create new secret with provided credentials
// Use first host ID for secret naming when creating multiple hosts
firstHostID := opts.HostIDs[0]
firstHostResourceName := firstHostID + "-" + generateHash(firstHostID)
createdSecret, err = createHostSecret(opts.ConfigFlags, opts.Namespace, firstHostResourceName, opts.Username, opts.Password, opts.HostInsecureSkipTLS, opts.CACert)
if err != nil {
return fmt.Errorf("failed to create host secret: %v", err)
}
secret = &corev1.ObjectReference{
Name: createdSecret.Name,
Namespace: createdSecret.Namespace,
}
}
// Create each host resource with proper ownership and secret references
for _, hostID := range opts.HostIDs {
// Resolve IP address from direct input or network adapter lookup
hostIP, err := resolveHostIPAddress(opts.IPAddress, opts.NetworkAdapterName, hostID, availableHosts)
if err != nil {
return fmt.Errorf("failed to resolve IP address for host %s: %v", hostID, err)
}
// Create the host resource with provider ownership
hostObj, err := createSingleHost(ctx, opts.ConfigFlags, opts.Namespace, hostID, opts.Provider, hostIP, secret, availableHosts)
if err != nil {
return fmt.Errorf("failed to create host %s: %v", hostID, err)
}
// If we created a new secret, add this host as an owner for proper garbage collection
// This ensures the secret is deleted only when all hosts using it are deleted
if createdSecret != nil {
err = addHostAsSecretOwner(opts.ConfigFlags, opts.Namespace, createdSecret.Name, hostObj)
if err != nil {
return fmt.Errorf("failed to add host %s as owner of secret %s: %v", hostID, createdSecret.Name, err)
}
}
// Inform user about the created resource
fmt.Printf("host/%s created\n", hostObj.Name)
klog.V(2).Infof("Created host '%s' in namespace '%s'", hostID, opts.Namespace)
}
if createdSecret != nil {
klog.V(2).Infof("Created secret '%s' for host authentication", createdSecret.Name)
}
return nil
}
// validateAndGetProvider validates that the specified provider exists and is a vSphere provider.
// Only vSphere providers support host creation since hosts represent ESXi servers.
func validateAndGetProvider(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace string) (*unstructured.Unstructured, error) {
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get provider: %v", err)
}
// Verify this is a vSphere provider - other provider types don't support hosts
providerType, found, err := unstructured.NestedString(provider.Object, "spec", "type")
if err != nil || !found {
return nil, fmt.Errorf("failed to get provider type: %v", err)
}
if providerType != "vsphere" {
return nil, fmt.Errorf("only vSphere providers support host creation, got provider type: %s", providerType)
}
return provider, nil
}
// getProviderHosts retrieves the list of available ESXi hosts from the provider's inventory.
// This information is used to validate host names and extract network adapter details.
func getProviderHosts(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]map[string]interface{}, error) {
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, err
}
// Create a new provider client
providerClient := inventory.NewProviderClientWithInsecure(configFlags, provider, inventoryURL, insecureSkipTLS)
// Fetch hosts inventory
data, err := providerClient.GetHosts(ctx, 4)
if err != nil {
return nil, fmt.Errorf("failed to fetch host inventory: %v", err)
}
// Convert to expected format
dataArray, ok := data.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for host inventory")
}
hosts := make([]map[string]interface{}, 0, len(dataArray))
for _, item := range dataArray {
if host, ok := item.(map[string]interface{}); ok {
hosts = append(hosts, host)
}
}
return hosts, nil
}
// validateHostIDs ensures all requested host IDs exist in the provider's inventory.
// This prevents creation of host resources that reference non-existent ESXi hosts.
func validateHostIDs(hostIDs []string, availableHosts []map[string]interface{}) error {
hostMap := make(map[string]bool)
for _, host := range availableHosts {
if id, ok := host["id"].(string); ok {
hostMap[id] = true
}
}
var missingHosts []string
for _, hostID := range hostIDs {
if !hostMap[hostID] {
missingHosts = append(missingHosts, hostID)
}
}
if len(missingHosts) > 0 {
return fmt.Errorf("the following host IDs were not found in provider inventory: %s\nHint: use 'kubectl-mtv get inventory host --provider <name>' to list available host IDs", strings.Join(missingHosts, ", "))
}
return nil
}
// resolveHostIPAddress determines the IP address to use for host communication.
// It supports either direct IP specification or lookup from a named network adapter
// in the host's inventory data.
func resolveHostIPAddress(directIP, networkAdapterName, hostID string, availableHosts []map[string]interface{}) (string, error) {
if directIP != "" {
return directIP, nil
}
// Search through host inventory to find the specified network adapter
for _, host := range availableHosts {
if id, ok := host["id"].(string); ok && id == hostID {
if networkAdapters, ok := host["networkAdapters"].([]interface{}); ok {
for _, adapter := range networkAdapters {
if adapterMap, ok := adapter.(map[string]interface{}); ok {
if adapterName, ok := adapterMap["name"].(string); ok && adapterName == networkAdapterName {
if ipAddress, ok := adapterMap["ipAddress"].(string); ok {
return ipAddress, nil
}
}
}
}
}
}
}
return "", fmt.Errorf("network adapter '%s' not found for host '%s' or no IP address available", networkAdapterName, hostID)
}
// createSingleHost creates a single Host resource with proper ownership by the provider.
// It extracts the host ID from inventory, sets up owner references, and creates the Kubernetes resource.
// Returns the created host object for use in establishing secret ownership.
func createSingleHost(ctx context.Context, configFlags *genericclioptions.ConfigFlags, namespace, hostID, providerName, ipAddress string, secret *corev1.ObjectReference, availableHosts []map[string]interface{}) (*forkliftv1beta1.Host, error) {
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get provider for ownership: %v", err)
}
// Create Host resource with provider as controlling owner for lifecycle management
hostResourceName := hostID + "-" + generateHash(hostID)
hostObj := &forkliftv1beta1.Host{
ObjectMeta: metav1.ObjectMeta{
Name: hostResourceName,
Namespace: namespace,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: provider.GetAPIVersion(),
Kind: provider.GetKind(),
Name: provider.GetName(),
UID: provider.GetUID(),
Controller: &[]bool{true}[0], // Provider controls host lifecycle
},
},
},
Spec: forkliftv1beta1.HostSpec{
Provider: corev1.ObjectReference{
Kind: "Provider",
APIVersion: forkliftv1beta1.SchemeGroupVersion.String(),
Name: providerName,
Namespace: namespace,
},
IpAddress: ipAddress,
Secret: *secret,
},
}
hostObj.Spec.ID = hostID
hostObj.Spec.Name = getHostNameFromID(hostID, availableHosts)
hostObj.Kind = "Host"
hostObj.APIVersion = forkliftv1beta1.SchemeGroupVersion.String()
unstructuredHost, err := runtime.DefaultUnstructuredConverter.ToUnstructured(hostObj)
if err != nil {
return nil, fmt.Errorf("failed to convert host to unstructured: %v", err)
}
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to get dynamic client: %v", err)
}
createdHostUnstructured, err := dynamicClient.Resource(client.HostsGVR).Namespace(namespace).Create(
context.Background(),
&unstructured.Unstructured{Object: unstructuredHost},
metav1.CreateOptions{},
)
if err != nil {
if errors.IsAlreadyExists(err) {
return nil, fmt.Errorf("host '%s' already exists in namespace '%s'", hostID, namespace)
}
return nil, fmt.Errorf("failed to create host: %v", err)
}
var createdHost forkliftv1beta1.Host
err = runtime.DefaultUnstructuredConverter.FromUnstructured(createdHostUnstructured.Object, &createdHost)
if err != nil {
return nil, fmt.Errorf("failed to convert created host back to typed object: %v", err)
}
return &createdHost, nil
}
// getHostNameFromID returns the host name for a given host ID from inventory
func getHostNameFromID(hostID string, availableHosts []map[string]interface{}) string {
for _, host := range availableHosts {
if id, ok := host["id"].(string); ok && id == hostID {
if name, ok := host["name"].(string); ok {
return name
}
}
}
return hostID // fallback to ID if name not found
}
// generateHash creates a 4-letter hash from the input string for collision prevention
func generateHash(input string) string {
hash := sha256.Sum256([]byte(input))
return fmt.Sprintf("%x", hash[:2]) // 2 bytes = 4 hex characters
}
// createHostSecret creates a Kubernetes Secret containing host authentication credentials.
// The secret is labeled to associate it with the host resource and includes optional
// TLS configuration for secure or insecure connections.
func createHostSecret(configFlags *genericclioptions.ConfigFlags, namespace, hostResourceName, username, password string, hostInsecureSkipTLS bool, cacert string) (*corev1.Secret, error) {
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to create kubernetes client: %v", err)
}
secretData := map[string][]byte{
"user": []byte(username),
"password": []byte(password),
}
if hostInsecureSkipTLS {
secretData["insecureSkipVerify"] = []byte("true")
}
if cacert != "" {
secretData["cacert"] = []byte(cacert)
}
secretName := fmt.Sprintf("%s-host-", hostResourceName)
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: secretName,
Namespace: namespace,
Labels: map[string]string{
"createdForResource": hostResourceName,
"createdForResourceType": "hosts",
},
},
Data: secretData,
Type: corev1.SecretTypeOpaque,
}
return k8sClient.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{})
}
// addHostAsSecretOwner adds a host as an owner reference to a secret, enabling proper
// garbage collection. When multiple hosts share a secret, each becomes an owner, and
// the secret is only deleted when all owning hosts are removed.
func addHostAsSecretOwner(configFlags *genericclioptions.ConfigFlags, namespace, secretName string, host *forkliftv1beta1.Host) error {
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return fmt.Errorf("failed to create kubernetes client: %v", err)
}
secret, err := k8sClient.CoreV1().Secrets(namespace).Get(context.Background(), secretName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get secret %s: %v", secretName, err)
}
// Create owner reference for the host (non-controller since multiple hosts can own the secret)
hostOwnerRef := metav1.OwnerReference{
APIVersion: host.APIVersion,
Kind: host.Kind,
Name: host.Name,
UID: host.UID,
Controller: &[]bool{false}[0], // Multiple hosts can own the same secret
}
// Check if this host is already an owner to avoid duplicates
for _, ownerRef := range secret.OwnerReferences {
if ownerRef.UID == host.UID {
return nil // Already an owner, nothing to do
}
}
secret.OwnerReferences = append(secret.OwnerReferences, hostOwnerRef)
_, err = k8sClient.CoreV1().Secrets(namespace).Update(context.Background(), secret, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update secret %s with host owner reference: %v", secretName, err)
}
return nil
}
// CheckProviderESXIEndpoint determines if a provider is configured with ESXi endpoint type
// and returns its secret reference. ESXi endpoints allow hosts to reuse the provider's
// authentication credentials for efficiency.
func CheckProviderESXIEndpoint(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace string) (bool, *corev1.ObjectReference, error) {
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return false, nil, err
}
settings, found, err := unstructured.NestedMap(provider.Object, "spec", "settings")
if err != nil || !found {
return false, nil, nil
}
sdkEndpoint, ok := settings["sdkEndpoint"].(string)
if !ok || sdkEndpoint != "esxi" {
return false, nil, nil
}
secretName, found, err := unstructured.NestedString(provider.Object, "spec", "secret", "name")
if err != nil || !found {
return false, nil, fmt.Errorf("provider has esxi endpoint but no secret configured")
}
secretNamespace, found, err := unstructured.NestedString(provider.Object, "spec", "secret", "namespace")
if err != nil || !found {
secretNamespace = namespace
}
return true, &corev1.ObjectReference{
Name: secretName,
Namespace: secretNamespace,
}, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/mapping/mapping.go | Go | package mapping
import (
"context"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
// StorageCreateOptions holds options for creating storage mappings
type StorageCreateOptions struct {
ConfigFlags *genericclioptions.ConfigFlags
Name string
Namespace string
SourceProvider string
TargetProvider string
StoragePairs string
InventoryURL string
InventoryInsecureSkipTLS bool
DefaultVolumeMode string
DefaultAccessMode string
DefaultOffloadPlugin string
DefaultOffloadSecret string
DefaultOffloadVendor string
// Offload secret creation fields
OffloadVSphereUsername string
OffloadVSpherePassword string
OffloadVSphereURL string
OffloadStorageUsername string
OffloadStoragePassword string
OffloadStorageEndpoint string
OffloadCACert string
OffloadInsecureSkipTLS bool
}
// StorageParseOptions holds options for parsing storage pairs
type StorageParseOptions struct {
PairStr string
DefaultNamespace string
ConfigFlags *genericclioptions.ConfigFlags
SourceProvider string
InventoryURL string
InventoryInsecureSkipTLS bool
DefaultVolumeMode string
DefaultAccessMode string
DefaultOffloadPlugin string
DefaultOffloadSecret string
DefaultOffloadVendor string
}
// CreateNetwork creates a new network mapping
func CreateNetwork(configFlags *genericclioptions.ConfigFlags, name, namespace, sourceProvider, targetProvider, networkPairs, inventoryURL string) error {
return CreateNetworkWithInsecure(configFlags, name, namespace, sourceProvider, targetProvider, networkPairs, inventoryURL, false)
}
// CreateNetworkWithInsecure creates a new network mapping with optional insecure TLS skip verification
func CreateNetworkWithInsecure(configFlags *genericclioptions.ConfigFlags, name, namespace, sourceProvider, targetProvider, networkPairs, inventoryURL string, insecureSkipTLS bool) error {
return createNetworkMappingWithInsecure(configFlags, name, namespace, sourceProvider, targetProvider, networkPairs, inventoryURL, insecureSkipTLS)
}
// CreateStorageWithOptions creates a new storage mapping with additional options for VolumeMode, AccessMode, and OffloadPlugin
func CreateStorageWithOptions(opts StorageCreateOptions) error {
return createStorageMappingWithOptionsAndSecret(context.TODO(), opts)
}
// ParseNetworkPairs parses network pairs and returns the parsed pairs (exported for patch functionality)
func ParseNetworkPairs(pairStr, defaultNamespace string, configFlags *genericclioptions.ConfigFlags, sourceProvider, inventoryURL string) ([]forkliftv1beta1.NetworkPair, error) {
return parseNetworkPairs(context.TODO(), pairStr, defaultNamespace, configFlags, sourceProvider, inventoryURL)
}
// ParseStoragePairsWithOptions parses storage pairs with additional options for VolumeMode, AccessMode, and OffloadPlugin (exported for patch functionality)
func ParseStoragePairsWithOptions(opts StorageParseOptions) ([]forkliftv1beta1.StoragePair, error) {
return parseStoragePairsWithOptions(context.TODO(), opts.PairStr, opts.DefaultNamespace, opts.ConfigFlags, opts.SourceProvider, opts.InventoryURL, opts.DefaultVolumeMode, opts.DefaultAccessMode, opts.DefaultOffloadPlugin, opts.DefaultOffloadSecret, opts.DefaultOffloadVendor, opts.InventoryInsecureSkipTLS)
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/mapping/mapping_test.go | Go | package mapping
import (
"strings"
"testing"
)
// --- parseProviderReference ---
func TestParseProviderReference_NameOnly(t *testing.T) {
name, ns := parseProviderReference("my-provider", "default-ns")
if name != "my-provider" {
t.Errorf("expected name 'my-provider', got %q", name)
}
if ns != "default-ns" {
t.Errorf("expected namespace 'default-ns', got %q", ns)
}
}
func TestParseProviderReference_NamespaceSlashName(t *testing.T) {
name, ns := parseProviderReference("other-ns/my-provider", "default-ns")
if name != "my-provider" {
t.Errorf("expected name 'my-provider', got %q", name)
}
if ns != "other-ns" {
t.Errorf("expected namespace 'other-ns', got %q", ns)
}
}
func TestParseProviderReference_TrimsSpaces(t *testing.T) {
name, ns := parseProviderReference(" ns / name ", "default")
if name != "name" {
t.Errorf("expected name 'name', got %q", name)
}
if ns != "ns" {
t.Errorf("expected namespace 'ns', got %q", ns)
}
}
func TestParseProviderReference_EmptyDefault(t *testing.T) {
name, ns := parseProviderReference("provider", "")
if name != "provider" {
t.Errorf("expected name 'provider', got %q", name)
}
if ns != "" {
t.Errorf("expected empty namespace, got %q", ns)
}
}
func TestParseProviderReference_MultipleSlashes(t *testing.T) {
// SplitN with n=2 means only the first slash is used as separator
name, ns := parseProviderReference("ns/name/extra", "default")
if name != "name/extra" {
t.Errorf("expected name 'name/extra', got %q", name)
}
if ns != "ns" {
t.Errorf("expected namespace 'ns', got %q", ns)
}
}
// --- validateNetworkPairsTargets ---
func TestValidateNetworkPairsTargets_Empty(t *testing.T) {
err := validateNetworkPairsTargets("")
if err != nil {
t.Errorf("expected nil for empty input, got: %v", err)
}
}
func TestValidateNetworkPairsTargets_SingleDefault(t *testing.T) {
err := validateNetworkPairsTargets("source1:default")
if err != nil {
t.Errorf("expected nil for single default, got: %v", err)
}
}
func TestValidateNetworkPairsTargets_DuplicateDefault(t *testing.T) {
err := validateNetworkPairsTargets("source1:default,source2:default")
if err == nil {
t.Error("expected error for duplicate pod network")
}
if !strings.Contains(err.Error(), "Pod network") {
t.Errorf("expected pod network error message, got: %s", err.Error())
}
}
func TestValidateNetworkPairsTargets_MultipleNADs(t *testing.T) {
// NAD targets can be reused
err := validateNetworkPairsTargets("source1:ns/my-nad,source2:ns/my-nad")
if err != nil {
t.Errorf("expected nil for duplicate NAD targets, got: %v", err)
}
}
func TestValidateNetworkPairsTargets_MultipleIgnored(t *testing.T) {
err := validateNetworkPairsTargets("source1:ignored,source2:ignored")
if err != nil {
t.Errorf("expected nil for duplicate ignored targets, got: %v", err)
}
}
func TestValidateNetworkPairsTargets_DefaultAndNAD(t *testing.T) {
err := validateNetworkPairsTargets("source1:default,source2:ns/my-nad")
if err != nil {
t.Errorf("expected nil for default + NAD, got: %v", err)
}
}
func TestValidateNetworkPairsTargets_MalformedPairsSkipped(t *testing.T) {
// Malformed pairs (no colon) are skipped
err := validateNetworkPairsTargets("nocolon,source1:default")
if err != nil {
t.Errorf("expected nil for malformed pairs, got: %v", err)
}
}
func TestValidateNetworkPairsTargets_EmptyPairsInList(t *testing.T) {
err := validateNetworkPairsTargets("source1:default,,source2:ns/nad")
if err != nil {
t.Errorf("expected nil with empty entries, got: %v", err)
}
}
// --- validateVolumeMode ---
func TestValidateVolumeMode_Valid(t *testing.T) {
validModes := []string{"Filesystem", "Block"}
for _, mode := range validModes {
if err := validateVolumeMode(mode); err != nil {
t.Errorf("validateVolumeMode(%q) = error %v, want nil", mode, err)
}
}
}
func TestValidateVolumeMode_Invalid(t *testing.T) {
invalidModes := []string{"filesystem", "block", "ReadWriteOnce", "", "unknown"}
for _, mode := range invalidModes {
if err := validateVolumeMode(mode); err == nil {
t.Errorf("validateVolumeMode(%q) = nil, want error", mode)
}
}
}
// --- validateAccessMode ---
func TestValidateAccessMode_Valid(t *testing.T) {
validModes := []string{"ReadWriteOnce", "ReadWriteMany", "ReadOnlyMany"}
for _, mode := range validModes {
if err := validateAccessMode(mode); err != nil {
t.Errorf("validateAccessMode(%q) = error %v, want nil", mode, err)
}
}
}
func TestValidateAccessMode_Invalid(t *testing.T) {
invalidModes := []string{"readwriteonce", "ReadWriteAll", "", "Block"}
for _, mode := range invalidModes {
if err := validateAccessMode(mode); err == nil {
t.Errorf("validateAccessMode(%q) = nil, want error", mode)
}
}
}
// --- validateOffloadPlugin ---
func TestValidateOffloadPlugin_Valid(t *testing.T) {
if err := validateOffloadPlugin("vsphere"); err != nil {
t.Errorf("validateOffloadPlugin(vsphere) = error %v", err)
}
}
func TestValidateOffloadPlugin_Invalid(t *testing.T) {
invalidPlugins := []string{"ovirt", "openstack", "", "VSphere"}
for _, plugin := range invalidPlugins {
if err := validateOffloadPlugin(plugin); err == nil {
t.Errorf("validateOffloadPlugin(%q) = nil, want error", plugin)
}
}
}
// --- validateOffloadVendor ---
func TestValidateOffloadVendor_Valid(t *testing.T) {
validVendors := []string{
"flashsystem", "vantara", "ontap", "primera3par",
"pureFlashArray", "powerflex", "powermax", "powerstore", "infinibox",
}
for _, vendor := range validVendors {
if err := validateOffloadVendor(vendor); err != nil {
t.Errorf("validateOffloadVendor(%q) = error %v", vendor, err)
}
}
}
func TestValidateOffloadVendor_Invalid(t *testing.T) {
invalidVendors := []string{"netapp", "", "FlashSystem", "unknown"}
for _, vendor := range invalidVendors {
if err := validateOffloadVendor(vendor); err == nil {
t.Errorf("validateOffloadVendor(%q) = nil, want error", vendor)
}
}
}
// --- validateOffloadSecretFields ---
func TestValidateOffloadSecretFields_NoFieldsSet(t *testing.T) {
opts := StorageCreateOptions{}
if err := validateOffloadSecretFields(opts); err != nil {
t.Errorf("expected nil for no fields, got: %v", err)
}
}
func TestValidateOffloadSecretFields_AllRequired(t *testing.T) {
opts := StorageCreateOptions{
OffloadVSphereUsername: "user",
OffloadVSpherePassword: "pass",
OffloadVSphereURL: "https://vcenter",
OffloadStorageUsername: "storuser",
OffloadStoragePassword: "storpass",
OffloadStorageEndpoint: "https://storage",
}
if err := validateOffloadSecretFields(opts); err != nil {
t.Errorf("expected nil for all required fields, got: %v", err)
}
}
func TestValidateOffloadSecretFields_PartialFields(t *testing.T) {
opts := StorageCreateOptions{
OffloadVSphereUsername: "user",
// Missing all other required fields
}
err := validateOffloadSecretFields(opts)
if err == nil {
t.Error("expected error for partial fields")
}
if !strings.Contains(err.Error(), "--offload-vsphere-password") {
t.Errorf("expected missing password in error, got: %s", err.Error())
}
if !strings.Contains(err.Error(), "--offload-storage-username") {
t.Errorf("expected missing storage username in error, got: %s", err.Error())
}
}
func TestValidateOffloadSecretFields_OnlyInsecureTLS(t *testing.T) {
opts := StorageCreateOptions{
OffloadInsecureSkipTLS: true,
}
err := validateOffloadSecretFields(opts)
if err == nil {
t.Error("expected error when only insecure TLS is set")
}
}
func TestValidateOffloadSecretFields_OnlyCACert(t *testing.T) {
opts := StorageCreateOptions{
OffloadCACert: "/path/to/ca.crt",
}
err := validateOffloadSecretFields(opts)
if err == nil {
t.Error("expected error when only CA cert is set")
}
}
func TestValidateOffloadSecretFields_AllRequiredPlusOptional(t *testing.T) {
opts := StorageCreateOptions{
OffloadVSphereUsername: "user",
OffloadVSpherePassword: "pass",
OffloadVSphereURL: "https://vcenter",
OffloadStorageUsername: "storuser",
OffloadStoragePassword: "storpass",
OffloadStorageEndpoint: "https://storage",
OffloadCACert: "/path/to/ca.crt",
OffloadInsecureSkipTLS: true,
}
if err := validateOffloadSecretFields(opts); err != nil {
t.Errorf("expected nil with all required + optional, got: %v", err)
}
}
// --- needsOffloadSecret ---
func TestNeedsOffloadSecret_NoFieldsSet(t *testing.T) {
opts := StorageCreateOptions{}
if needsOffloadSecret(opts) {
t.Error("expected false when no fields set")
}
}
func TestNeedsOffloadSecret_ExistingSecretProvided(t *testing.T) {
opts := StorageCreateOptions{
DefaultOffloadSecret: "existing-secret",
OffloadVSphereUsername: "user",
}
if needsOffloadSecret(opts) {
t.Error("expected false when existing secret is provided")
}
}
func TestNeedsOffloadSecret_FieldsWithoutExistingSecret(t *testing.T) {
tests := []struct {
name string
opts StorageCreateOptions
}{
{"username", StorageCreateOptions{OffloadVSphereUsername: "u"}},
{"password", StorageCreateOptions{OffloadVSpherePassword: "p"}},
{"url", StorageCreateOptions{OffloadVSphereURL: "https://vc"}},
{"storage username", StorageCreateOptions{OffloadStorageUsername: "su"}},
{"storage password", StorageCreateOptions{OffloadStoragePassword: "sp"}},
{"storage endpoint", StorageCreateOptions{OffloadStorageEndpoint: "https://s"}},
{"ca cert", StorageCreateOptions{OffloadCACert: "/path"}},
{"insecure tls", StorageCreateOptions{OffloadInsecureSkipTLS: true}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if !needsOffloadSecret(tt.opts) {
t.Errorf("expected true when %s is set", tt.name)
}
})
}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/mapping/network.go | Go | package mapping
import (
"context"
"fmt"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/provider"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
corev1 "k8s.io/api/core/v1"
)
// parseProviderReference parses a provider reference that might contain namespace/name pattern
// Returns the name and namespace separately. If no namespace is specified, returns the default namespace.
func parseProviderReference(providerRef, defaultNamespace string) (name, namespace string) {
if strings.Contains(providerRef, "/") {
parts := strings.SplitN(providerRef, "/", 2)
namespace = strings.TrimSpace(parts[0])
name = strings.TrimSpace(parts[1])
} else {
name = strings.TrimSpace(providerRef)
namespace = defaultNamespace
}
return name, namespace
}
// validateNetworkPairsTargets validates network mapping pairs for duplicate pod network targets.
// This is a pre-validation that can be done before resolution.
// Network mapping constraints for targets:
// - Pod networking ("default") can only be mapped once (only one source can use pod networking)
// - NAD targets can be used multiple times (multiple sources can map to the same NAD)
// - "ignored" targets can be used multiple times
func validateNetworkPairsTargets(pairStr string) error {
if pairStr == "" {
return nil
}
// Track if pod networking (default) has already been used
podNetworkUsed := false
pairList := strings.Split(pairStr, ",")
for _, pair := range pairList {
pair = strings.TrimSpace(pair)
if pair == "" {
continue
}
parts := strings.SplitN(pair, ":", 2)
if len(parts) != 2 {
continue // Skip malformed pairs, let parseNetworkPairs handle the error
}
targetPart := strings.TrimSpace(parts[1])
// Check for duplicate pod networking target
if targetPart == "default" {
if podNetworkUsed {
return fmt.Errorf("invalid network mapping: Pod network ('default') can only be mapped once. Found duplicate mapping to 'default' in '%s'. Use 'source:ignored' for additional sources that don't need network access", pair)
}
podNetworkUsed = true
}
}
return nil
}
// parseNetworkPairs parses network pairs in format "source1:namespace/target1,source2:namespace/target2"
// If namespace is omitted, the provided defaultNamespace will be used
// Special target values: "default" for pod networking, "ignored" to ignore the source network
func parseNetworkPairs(ctx context.Context, pairStr, defaultNamespace string, configFlags *genericclioptions.ConfigFlags, sourceProvider, inventoryURL string) ([]forkliftv1beta1.NetworkPair, error) {
return parseNetworkPairsWithInsecure(ctx, pairStr, defaultNamespace, configFlags, sourceProvider, inventoryURL, false)
}
// parseNetworkPairsWithInsecure parses network pairs with optional insecure TLS skip verification
func parseNetworkPairsWithInsecure(ctx context.Context, pairStr, defaultNamespace string, configFlags *genericclioptions.ConfigFlags, sourceProvider, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.NetworkPair, error) {
if pairStr == "" {
return nil, nil
}
// Validate target constraints before processing (pod network can only be mapped once)
if err := validateNetworkPairsTargets(pairStr); err != nil {
return nil, err
}
var pairs []forkliftv1beta1.NetworkPair
pairList := strings.Split(pairStr, ",")
// Track source network IDs to detect duplicates
sourceIDsSeen := make(map[string]string) // ID -> source name (for error messages)
for _, pairStr := range pairList {
pairStr = strings.TrimSpace(pairStr)
if pairStr == "" {
continue
}
parts := strings.SplitN(pairStr, ":", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("invalid network pair format '%s': expected 'source:target-namespace/target-network', 'source:target-network', 'source:default', or 'source:ignored'", pairStr)
}
sourceName := strings.TrimSpace(parts[0])
targetPart := strings.TrimSpace(parts[1])
// Resolve source network name to ID
sourceNetworkRefs, err := resolveNetworkNameToIDWithInsecure(ctx, configFlags, sourceProvider, defaultNamespace, inventoryURL, sourceName, insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to resolve source network '%s': %v", sourceName, err)
}
// Check for duplicate source network IDs
for _, sourceRef := range sourceNetworkRefs {
if existingName, exists := sourceIDsSeen[sourceRef.ID]; exists {
return nil, fmt.Errorf("invalid network mapping: Source network ID '%s' is mapped multiple times (via '%s' and '%s'). Each source network can only be mapped once", sourceRef.ID, existingName, sourceName)
}
sourceIDsSeen[sourceRef.ID] = sourceName
}
// Parse target part which can be just a name or namespace/name
var targetNamespace, targetName, targetType string
if strings.Contains(targetPart, "/") {
targetParts := strings.SplitN(targetPart, "/", 2)
targetNamespace = strings.TrimSpace(targetParts[0])
targetName = strings.TrimSpace(targetParts[1])
targetType = "multus"
} else {
// Special handling for 'default' and 'ignored' types
switch targetPart {
case "default":
targetType = "pod"
case "ignored":
targetType = "ignored"
default:
// Use the target part as network name and default namespace
targetName = targetPart
targetNamespace = defaultNamespace
targetType = "multus"
}
}
destinationNetwork := forkliftv1beta1.DestinationNetwork{
Type: targetType,
}
if targetName != "" {
destinationNetwork.Name = targetName
}
// Always set namespace for multus networks, use plan namespace if empty
if targetType == "multus" {
if targetNamespace != "" {
destinationNetwork.Namespace = targetNamespace
} else {
destinationNetwork.Namespace = defaultNamespace
}
}
// Create a pair for each matching source network resource
for _, sourceNetworkRef := range sourceNetworkRefs {
pair := forkliftv1beta1.NetworkPair{
Source: sourceNetworkRef,
Destination: destinationNetwork,
}
pairs = append(pairs, pair)
}
}
return pairs, nil
}
// createNetworkMappingWithInsecure creates a new network mapping with optional insecure TLS skip verification
func createNetworkMappingWithInsecure(configFlags *genericclioptions.ConfigFlags, name, namespace, sourceProvider, targetProvider, networkPairs, inventoryURL string, insecureSkipTLS bool) error {
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Parse provider references to extract names and namespaces
sourceProviderName, sourceProviderNamespace := parseProviderReference(sourceProvider, namespace)
targetProviderName, targetProviderNamespace := parseProviderReference(targetProvider, namespace)
// Parse network pairs if provided
var mappingPairs []forkliftv1beta1.NetworkPair
if networkPairs != "" {
mappingPairs, err = parseNetworkPairsWithInsecure(context.TODO(), networkPairs, namespace, configFlags, sourceProvider, inventoryURL, insecureSkipTLS)
if err != nil {
return fmt.Errorf("failed to parse network pairs: %v", err)
}
}
// Create a typed NetworkMap
networkMap := &forkliftv1beta1.NetworkMap{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: forkliftv1beta1.NetworkMapSpec{
Provider: provider.Pair{
Source: corev1.ObjectReference{
Name: sourceProviderName,
Namespace: sourceProviderNamespace,
},
Destination: corev1.ObjectReference{
Name: targetProviderName,
Namespace: targetProviderNamespace,
},
},
Map: mappingPairs,
},
}
// Convert to unstructured
unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(networkMap)
if err != nil {
return fmt.Errorf("failed to convert to unstructured: %v", err)
}
mapping := &unstructured.Unstructured{Object: unstructuredObj}
mapping.SetGroupVersionKind(schema.GroupVersionKind{
Group: client.Group,
Version: client.Version,
Kind: "NetworkMap",
})
_, err = dynamicClient.Resource(client.NetworkMapGVR).Namespace(namespace).Create(context.TODO(), mapping, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create network mapping: %v", err)
}
fmt.Printf("networkmap/%s created\n", name)
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/mapping/network_helpers.go | Go | package mapping
import (
"context"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// resolveOpenShiftNetworkNameToIDWithInsecure resolves network name for OpenShift provider with optional insecure TLS skip verification
func resolveOpenShiftNetworkNameToIDWithInsecure(ctx context.Context, configFlags *genericclioptions.ConfigFlags, inventoryURL string, provider *unstructured.Unstructured, networkName string, insecureSkipTLS bool) ([]ref.Ref, error) {
// If networkName is empty, return an empty ref
if networkName == "" {
return nil, fmt.Errorf("network name cannot be empty")
}
// If networkName is default, return special pod reference
if networkName == "default" {
return []ref.Ref{{
Type: "pod",
}}, nil
}
// Parse namespace/name format
var targetNamespace, targetName string
if strings.Contains(networkName, "/") {
parts := strings.SplitN(networkName, "/", 2)
targetNamespace = strings.TrimSpace(parts[0])
targetName = strings.TrimSpace(parts[1])
} else {
// If no namespace specified, assume "default"
targetNamespace = "default"
targetName = strings.TrimSpace(networkName)
}
// Fetch NetworkAttachmentDefinitions from OpenShift
networksInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "networkattachmentdefinitions?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch networks inventory: %v", err)
}
networksArray, ok := networksInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for networks inventory")
}
// Search for all networks matching the name and namespace
var matchingRefs []ref.Ref
for _, item := range networksArray {
network, ok := item.(map[string]interface{})
if !ok {
continue
}
// For OpenShift NetworkAttachmentDefinitions
if obj, exists := network["object"]; exists {
if objMap, ok := obj.(map[string]interface{}); ok {
if metadata, exists := objMap["metadata"]; exists {
if metadataMap, ok := metadata.(map[string]interface{}); ok {
name, _ := metadataMap["name"].(string)
ns, _ := metadataMap["namespace"].(string)
id, _ := metadataMap["uid"].(string)
// Match both name and namespace
if name == targetName && ns == targetNamespace {
matchingRefs = append(matchingRefs, ref.Ref{
ID: id,
Name: name,
Namespace: ns,
Type: "multus",
})
}
}
}
}
}
}
if len(matchingRefs) == 0 {
return nil, fmt.Errorf("network '%s' in namespace '%s' not found in OpenShift provider inventory", targetName, targetNamespace)
}
return matchingRefs, nil
}
// resolveVirtualizationNetworkNameToIDWithInsecure resolves network name for virtualization providers (VMware, oVirt, OpenStack) with optional insecure TLS skip verification
func resolveVirtualizationNetworkNameToIDWithInsecure(ctx context.Context, configFlags *genericclioptions.ConfigFlags, inventoryURL string, provider *unstructured.Unstructured, networkName string, insecureSkipTLS bool) ([]ref.Ref, error) {
// Fetch networks from virtualization providers
networksInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "networks?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch networks inventory: %v", err)
}
networksArray, ok := networksInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for networks inventory")
}
// Search for all networks matching the name
var matchingRefs []ref.Ref
for _, item := range networksArray {
network, ok := item.(map[string]interface{})
if !ok {
continue
}
// For virtualization providers (VMware, oVirt, etc.)
name, _ := network["name"].(string)
id, _ := network["id"].(string)
if name == networkName {
matchingRefs = append(matchingRefs, ref.Ref{
ID: id,
})
}
}
if len(matchingRefs) == 0 {
return nil, fmt.Errorf("network '%s' not found in virtualization provider inventory", networkName)
}
return matchingRefs, nil
}
// resolveNetworkNameToIDWithInsecure resolves a network name to its ref.Ref by querying the provider inventory with optional insecure TLS skip verification
func resolveNetworkNameToIDWithInsecure(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL, networkName string, insecureSkipTLS bool) ([]ref.Ref, error) {
// Get source provider
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get provider '%s': %v", providerName, err)
}
// Check provider type to determine which helper to use
providerType, _, err := unstructured.NestedString(provider.Object, "spec", "type")
if err != nil {
return nil, fmt.Errorf("failed to get provider type: %v", err)
}
switch providerType {
case "openshift":
return resolveOpenShiftNetworkNameToIDWithInsecure(ctx, configFlags, inventoryURL, provider, networkName, insecureSkipTLS)
case "ec2":
return resolveEC2NetworkNameToIDWithInsecure(ctx, configFlags, inventoryURL, provider, networkName, insecureSkipTLS)
case "vsphere", "ovirt", "openstack", "ova":
return resolveVirtualizationNetworkNameToIDWithInsecure(ctx, configFlags, inventoryURL, provider, networkName, insecureSkipTLS)
default:
return resolveVirtualizationNetworkNameToIDWithInsecure(ctx, configFlags, inventoryURL, provider, networkName, insecureSkipTLS)
}
}
// resolveEC2NetworkNameToIDWithInsecure resolves network name for EC2 provider with optional insecure TLS skip verification
func resolveEC2NetworkNameToIDWithInsecure(ctx context.Context, configFlags *genericclioptions.ConfigFlags, inventoryURL string, provider *unstructured.Unstructured, networkName string, insecureSkipTLS bool) ([]ref.Ref, error) {
// Fetch networks (VPCs and Subnets) from EC2
networksInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "networks?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch networks inventory: %v", err)
}
// Extract objects from EC2 envelope
networksInventory = inventory.ExtractEC2Objects(networksInventory)
networksArray, ok := networksInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for networks inventory")
}
// Search for networks matching the name (from Tags) or ID
var matchingRefs []ref.Ref
for _, item := range networksArray {
network, ok := item.(map[string]interface{})
if !ok {
continue
}
// Get network ID (either VpcId or SubnetId)
var networkID string
if subnetID, ok := network["SubnetId"].(string); ok && subnetID != "" {
networkID = subnetID
} else if vpcID, ok := network["VpcId"].(string); ok && vpcID != "" {
networkID = vpcID
}
// Match by ID
if networkID == networkName {
matchingRefs = append(matchingRefs, ref.Ref{
ID: networkID,
})
continue
}
// Match by Name tag
if tags, exists := network["Tags"]; exists {
if tagsArray, ok := tags.([]interface{}); ok {
for _, tagInterface := range tagsArray {
if tag, ok := tagInterface.(map[string]interface{}); ok {
if key, keyOk := tag["Key"].(string); keyOk && key == "Name" {
if value, valueOk := tag["Value"].(string); valueOk && value == networkName {
matchingRefs = append(matchingRefs, ref.Ref{
ID: networkID,
})
break
}
}
}
}
}
}
}
if len(matchingRefs) == 0 {
return nil, fmt.Errorf("network '%s' not found in EC2 provider inventory", networkName)
}
return matchingRefs, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/mapping/offload_secrets.go | Go | package mapping
import (
"context"
"fmt"
"os"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// createOffloadSecret creates a secret for offload plugin authentication
func createOffloadSecret(configFlags *genericclioptions.ConfigFlags, namespace, baseName string, opts StorageCreateOptions) (*corev1.Secret, error) {
// Get the Kubernetes client using configFlags
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to create kubernetes client: %v", err)
}
// Process CA certificate file if specified with @filename
cacert := opts.OffloadCACert
if strings.HasPrefix(cacert, "@") {
filePath := cacert[1:]
fileContent, err := os.ReadFile(filePath)
if err != nil {
return nil, fmt.Errorf("failed to read CA certificate file %s: %v", filePath, err)
}
cacert = string(fileContent)
}
// Create secret data without base64 encoding (the API handles this automatically)
secretData := map[string][]byte{}
// Add vSphere credentials (required)
if opts.OffloadVSphereUsername != "" {
secretData["user"] = []byte(opts.OffloadVSphereUsername)
}
if opts.OffloadVSpherePassword != "" {
secretData["password"] = []byte(opts.OffloadVSpherePassword)
}
if opts.OffloadVSphereURL != "" {
secretData["url"] = []byte(opts.OffloadVSphereURL)
}
// Add storage array credentials (required)
if opts.OffloadStorageUsername != "" {
secretData["storageUser"] = []byte(opts.OffloadStorageUsername)
}
if opts.OffloadStoragePassword != "" {
secretData["storagePassword"] = []byte(opts.OffloadStoragePassword)
}
if opts.OffloadStorageEndpoint != "" {
secretData["storageEndpoint"] = []byte(opts.OffloadStorageEndpoint)
}
// Add optional TLS fields
if opts.OffloadInsecureSkipTLS {
secretData["insecureSkipVerify"] = []byte("true")
}
if cacert != "" {
secretData["cacert"] = []byte(cacert)
}
// Validate that we have the minimum required fields
if len(secretData) == 0 {
return nil, fmt.Errorf("no offload secret fields provided")
}
// Generate a name prefix for the secret
secretName := fmt.Sprintf("%s-offload-", baseName)
// Create the secret object directly as a typed Secret
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: secretName,
Namespace: namespace,
Labels: map[string]string{
"createdForResourceType": "offload",
"createdForMapping": baseName,
},
},
Data: secretData,
Type: corev1.SecretTypeOpaque,
}
return k8sClient.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{})
}
// validateOffloadSecretFields validates that required fields are present for offload secret creation
func validateOffloadSecretFields(opts StorageCreateOptions) error {
// Check if any offload secret creation fields are provided
hasOffloadFields := opts.OffloadVSphereUsername != "" ||
opts.OffloadVSpherePassword != "" ||
opts.OffloadVSphereURL != "" ||
opts.OffloadStorageUsername != "" ||
opts.OffloadStoragePassword != "" ||
opts.OffloadStorageEndpoint != "" ||
opts.OffloadCACert != "" ||
opts.OffloadInsecureSkipTLS
if !hasOffloadFields {
return nil // No validation needed if no fields provided
}
// If any offload fields are provided, validate required combinations
var missingFields []string
// vSphere credentials are required
if opts.OffloadVSphereUsername == "" {
missingFields = append(missingFields, "--offload-vsphere-username")
}
if opts.OffloadVSpherePassword == "" {
missingFields = append(missingFields, "--offload-vsphere-password")
}
if opts.OffloadVSphereURL == "" {
missingFields = append(missingFields, "--offload-vsphere-url")
}
// Storage credentials are required
if opts.OffloadStorageUsername == "" {
missingFields = append(missingFields, "--offload-storage-username")
}
if opts.OffloadStoragePassword == "" {
missingFields = append(missingFields, "--offload-storage-password")
}
if opts.OffloadStorageEndpoint == "" {
missingFields = append(missingFields, "--offload-storage-endpoint")
}
if len(missingFields) > 0 {
return fmt.Errorf("when creating offload secrets inline, all required fields must be provided. Missing: %s", strings.Join(missingFields, ", "))
}
return nil
}
// needsOffloadSecret determines if we should create an offload secret
func needsOffloadSecret(opts StorageCreateOptions) bool {
// Only create a secret if:
// 1. No existing secret name is provided AND
// 2. Some offload secret creation fields are provided
return opts.DefaultOffloadSecret == "" &&
(opts.OffloadVSphereUsername != "" ||
opts.OffloadVSpherePassword != "" ||
opts.OffloadVSphereURL != "" ||
opts.OffloadStorageUsername != "" ||
opts.OffloadStoragePassword != "" ||
opts.OffloadStorageEndpoint != "" ||
opts.OffloadCACert != "" ||
opts.OffloadInsecureSkipTLS)
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/mapping/storage.go | Go | package mapping
import (
"context"
"fmt"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/provider"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// validateVolumeMode validates volume mode values
func validateVolumeMode(mode string) error {
switch mode {
case "Filesystem", "Block":
return nil
default:
return fmt.Errorf("must be one of: Filesystem, Block")
}
}
// validateAccessMode validates access mode values
func validateAccessMode(mode string) error {
switch mode {
case "ReadWriteOnce", "ReadWriteMany", "ReadOnlyMany":
return nil
default:
return fmt.Errorf("must be one of: ReadWriteOnce, ReadWriteMany, ReadOnlyMany")
}
}
// validateOffloadPlugin validates offload plugin values
func validateOffloadPlugin(plugin string) error {
switch plugin {
case "vsphere":
return nil
default:
return fmt.Errorf("must be one of: vsphere")
}
}
// validateOffloadVendor validates offload vendor values
func validateOffloadVendor(vendor string) error {
switch vendor {
case "flashsystem", "vantara", "ontap", "primera3par", "pureFlashArray", "powerflex", "powermax", "powerstore", "infinibox":
return nil
default:
return fmt.Errorf("must be one of: flashsystem, vantara, ontap, primera3par, pureFlashArray, powerflex, powermax, powerstore, infinibox")
}
}
// StoragePairOptions holds options for parsing storage pairs
type StoragePairOptions struct {
DefaultVolumeMode string
DefaultAccessMode string
DefaultOffloadPlugin string
DefaultOffloadSecret string
DefaultOffloadVendor string
}
// parseStoragePairsWithOptions parses storage pairs with additional options for VolumeMode, AccessMode, and OffloadPlugin
func parseStoragePairsWithOptions(ctx context.Context, pairStr, defaultNamespace string, configFlags *genericclioptions.ConfigFlags, sourceProvider, inventoryURL string, defaultVolumeMode, defaultAccessMode, defaultOffloadPlugin, defaultOffloadSecret, defaultOffloadVendor string, insecureSkipTLS bool) ([]forkliftv1beta1.StoragePair, error) {
options := StoragePairOptions{
DefaultVolumeMode: defaultVolumeMode,
DefaultAccessMode: defaultAccessMode,
DefaultOffloadPlugin: defaultOffloadPlugin,
DefaultOffloadSecret: defaultOffloadSecret,
DefaultOffloadVendor: defaultOffloadVendor,
}
return parseStoragePairsInternal(pairStr, defaultNamespace, configFlags, sourceProvider, inventoryURL, &options, insecureSkipTLS)
}
// parseStoragePairsInternal is the internal implementation that handles the parsing logic
func parseStoragePairsInternal(pairStr, defaultNamespace string, configFlags *genericclioptions.ConfigFlags, sourceProvider, inventoryURL string, options *StoragePairOptions, insecureSkipTLS bool) ([]forkliftv1beta1.StoragePair, error) {
if pairStr == "" {
return nil, nil
}
var pairs []forkliftv1beta1.StoragePair
pairList := strings.Split(pairStr, ",")
for _, pairStr := range pairList {
pairStr = strings.TrimSpace(pairStr)
if pairStr == "" {
continue
}
// Parse the enhanced format: "source:storage-class;volumeMode=Block;accessMode=ReadWriteOnce;offloadPlugin=vsphere;offloadSecret=secret-name;offloadVendor=vantara"
pairParts := strings.Split(pairStr, ";")
if len(pairParts) == 0 {
continue
}
// First part should be source:storage-class
mainPart := strings.TrimSpace(pairParts[0])
parts := strings.SplitN(mainPart, ":", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("invalid storage pair format '%s': expected 'source:storage-class' or 'source:namespace/storage-class'", mainPart)
}
sourceName := strings.TrimSpace(parts[0])
targetPart := strings.TrimSpace(parts[1])
// Parse target part which can be namespace/storage-class or just storage-class
// Note: namespace is ignored since storage classes are cluster-scoped
var targetStorageClass string
if strings.Contains(targetPart, "/") {
targetParts := strings.SplitN(targetPart, "/", 2)
// Ignore the namespace part for storage classes since they are cluster-scoped
targetStorageClass = strings.TrimSpace(targetParts[1])
} else {
// Use the target part as storage class
targetStorageClass = targetPart
}
if targetStorageClass == "" {
return nil, fmt.Errorf("invalid target format '%s': storage class must be specified", targetPart)
}
// Parse additional options from remaining parts
volumeMode := options.DefaultVolumeMode
accessMode := options.DefaultAccessMode
offloadPlugin := options.DefaultOffloadPlugin
offloadSecret := options.DefaultOffloadSecret
offloadVendor := options.DefaultOffloadVendor
for i := 1; i < len(pairParts); i++ {
optionPart := strings.TrimSpace(pairParts[i])
if optionPart == "" {
continue
}
optionParts := strings.SplitN(optionPart, "=", 2)
if len(optionParts) != 2 {
return nil, fmt.Errorf("invalid option format '%s': expected 'key=value'", optionPart)
}
key := strings.TrimSpace(optionParts[0])
value := strings.TrimSpace(optionParts[1])
switch key {
case "volumeMode":
if err := validateVolumeMode(value); err != nil {
return nil, fmt.Errorf("invalid volumeMode '%s': %v", value, err)
}
volumeMode = value
case "accessMode":
if err := validateAccessMode(value); err != nil {
return nil, fmt.Errorf("invalid accessMode '%s': %v", value, err)
}
accessMode = value
case "offloadPlugin":
if err := validateOffloadPlugin(value); err != nil {
return nil, fmt.Errorf("invalid offloadPlugin '%s': %v", value, err)
}
offloadPlugin = value
case "offloadSecret":
offloadSecret = value
case "offloadVendor":
if err := validateOffloadVendor(value); err != nil {
return nil, fmt.Errorf("invalid offloadVendor '%s': %v", value, err)
}
offloadVendor = value
default:
return nil, fmt.Errorf("unknown option '%s' in storage pair", key)
}
}
// Validate offload configuration completeness
if (offloadPlugin != "" && offloadVendor == "") || (offloadPlugin == "" && offloadVendor != "") {
return nil, fmt.Errorf("both offloadPlugin and offloadVendor must be specified together for storage pair '%s'", sourceName)
}
// Resolve source storage name to ID
sourceStorageRefs, err := resolveStorageNameToID(context.TODO(), configFlags, sourceProvider, defaultNamespace, inventoryURL, sourceName, insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to resolve source storage '%s': %v", sourceName, err)
}
// Create a pair for each matching source storage resource
for _, sourceStorageRef := range sourceStorageRefs {
destination := forkliftv1beta1.DestinationStorage{
StorageClass: targetStorageClass,
}
// Set volume mode if specified
if volumeMode != "" {
destination.VolumeMode = corev1.PersistentVolumeMode(volumeMode)
}
// Set access mode if specified
if accessMode != "" {
destination.AccessMode = corev1.PersistentVolumeAccessMode(accessMode)
}
pair := forkliftv1beta1.StoragePair{
Source: sourceStorageRef,
Destination: destination,
}
// Set offload plugin if specified
if offloadPlugin != "" && offloadVendor != "" {
offloadPluginConfig := &forkliftv1beta1.OffloadPlugin{}
switch offloadPlugin {
case "vsphere":
offloadPluginConfig.VSphereXcopyPluginConfig = &forkliftv1beta1.VSphereXcopyPluginConfig{
SecretRef: offloadSecret,
StorageVendorProduct: forkliftv1beta1.StorageVendorProduct(offloadVendor),
}
default:
return nil, fmt.Errorf("unknown offload plugin '%s' for storage pair '%s': supported plugins are: vsphere", offloadPlugin, sourceName)
}
pair.OffloadPlugin = offloadPluginConfig
}
pairs = append(pairs, pair)
}
}
return pairs, nil
}
// createStorageMappingWithOptions creates a new storage mapping with additional options for VolumeMode, AccessMode, and OffloadPlugin
func createStorageMappingWithOptions(ctx context.Context, configFlags *genericclioptions.ConfigFlags, name, namespace, sourceProvider, targetProvider, storagePairs, inventoryURL string, defaultVolumeMode, defaultAccessMode, defaultOffloadPlugin, defaultOffloadSecret, defaultOffloadVendor string, insecureSkipTLS bool) error {
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Parse provider references to extract names and namespaces
sourceProviderName, sourceProviderNamespace := parseProviderReference(sourceProvider, namespace)
targetProviderName, targetProviderNamespace := parseProviderReference(targetProvider, namespace)
// Parse storage pairs if provided
var mappingPairs []forkliftv1beta1.StoragePair
if storagePairs != "" {
mappingPairs, err = parseStoragePairsWithOptions(ctx, storagePairs, namespace, configFlags, sourceProvider, inventoryURL, defaultVolumeMode, defaultAccessMode, defaultOffloadPlugin, defaultOffloadSecret, defaultOffloadVendor, insecureSkipTLS)
if err != nil {
return fmt.Errorf("failed to parse storage pairs: %v", err)
}
}
// Create a typed StorageMap
storageMap := &forkliftv1beta1.StorageMap{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: forkliftv1beta1.StorageMapSpec{
Provider: provider.Pair{
Source: corev1.ObjectReference{
Name: sourceProviderName,
Namespace: sourceProviderNamespace,
},
Destination: corev1.ObjectReference{
Name: targetProviderName,
Namespace: targetProviderNamespace,
},
},
Map: mappingPairs,
},
}
// Convert to unstructured
unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(storageMap)
if err != nil {
return fmt.Errorf("failed to convert to unstructured: %v", err)
}
mapping := &unstructured.Unstructured{Object: unstructuredObj}
mapping.SetGroupVersionKind(schema.GroupVersionKind{
Group: client.Group,
Version: client.Version,
Kind: "StorageMap",
})
_, err = dynamicClient.Resource(client.StorageMapGVR).Namespace(namespace).Create(context.TODO(), mapping, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create storage mapping: %v", err)
}
fmt.Printf("storagemap/%s created\n", name)
return nil
}
// createStorageMappingWithOptionsAndSecret creates a new storage mapping with offload secret creation support
func createStorageMappingWithOptionsAndSecret(ctx context.Context, opts StorageCreateOptions) error {
var createdOffloadSecret *corev1.Secret
var err error
// Validate offload secret creation fields if provided
if err := validateOffloadSecretFields(opts); err != nil {
return err
}
// Determine if we need to create an offload secret
if needsOffloadSecret(opts) {
fmt.Printf("Creating offload secret for storage mapping '%s'\n", opts.Name)
createdOffloadSecret, err = createOffloadSecret(opts.ConfigFlags, opts.Namespace, opts.Name, opts)
if err != nil {
return fmt.Errorf("failed to create offload secret: %v", err)
}
// Use the created secret name as the default offload secret
opts.DefaultOffloadSecret = createdOffloadSecret.Name
fmt.Printf("Created offload secret '%s' for storage mapping authentication\n", createdOffloadSecret.Name)
}
// Call the original function with the updated options
err = createStorageMappingWithOptions(ctx, opts.ConfigFlags, opts.Name, opts.Namespace,
opts.SourceProvider, opts.TargetProvider, opts.StoragePairs, opts.InventoryURL,
opts.DefaultVolumeMode, opts.DefaultAccessMode, opts.DefaultOffloadPlugin,
opts.DefaultOffloadSecret, opts.DefaultOffloadVendor, opts.InventoryInsecureSkipTLS)
if err != nil {
// Clean up the created secret if mapping creation fails
if createdOffloadSecret != nil {
if delErr := cleanupOffloadSecret(opts.ConfigFlags, opts.Namespace, createdOffloadSecret.Name); delErr != nil {
fmt.Printf("Warning: failed to clean up offload secret '%s': %v\n", createdOffloadSecret.Name, delErr)
}
}
return err
}
return nil
}
// cleanupOffloadSecret removes a created offload secret on failure
func cleanupOffloadSecret(configFlags *genericclioptions.ConfigFlags, namespace, secretName string) error {
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return fmt.Errorf("failed to get kubernetes client: %v", err)
}
return k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), secretName, metav1.DeleteOptions{})
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/mapping/storage_helpers.go | Go | package mapping
import (
"context"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// resolveVSphereStorageNameToID resolves storage name for VMware vSphere provider
func resolveVSphereStorageNameToID(ctx context.Context, configFlags *genericclioptions.ConfigFlags, inventoryURL string, provider *unstructured.Unstructured, storageName string, insecureSkipTLS bool) ([]ref.Ref, error) {
// Fetch datastores from VMware vSphere
storageInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "datastores?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch storage inventory: %v", err)
}
storageArray, ok := storageInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for storage inventory")
}
// Search for the storage by name
var matchingRefs []ref.Ref
for _, item := range storageArray {
storage, ok := item.(map[string]interface{})
if !ok {
continue
}
name, _ := storage["name"].(string)
id, _ := storage["id"].(string)
if name == storageName {
matchingRefs = append(matchingRefs, ref.Ref{
Name: name,
ID: id,
})
}
}
if len(matchingRefs) == 0 {
return nil, fmt.Errorf("datastore '%s' not found in vSphere provider inventory", storageName)
}
return matchingRefs, nil
}
// resolveOvirtStorageNameToID resolves storage name for oVirt provider
func resolveOvirtStorageNameToID(ctx context.Context, configFlags *genericclioptions.ConfigFlags, inventoryURL string, provider *unstructured.Unstructured, storageName string, insecureSkipTLS bool) ([]ref.Ref, error) {
// Fetch storage domains from oVirt
storageInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "storagedomains?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch storage inventory: %v", err)
}
storageArray, ok := storageInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for storage inventory")
}
// Search for the storage by name
var matchingRefs []ref.Ref
for _, item := range storageArray {
storage, ok := item.(map[string]interface{})
if !ok {
continue
}
name, _ := storage["name"].(string)
id, _ := storage["id"].(string)
if name == storageName {
matchingRefs = append(matchingRefs, ref.Ref{
Name: name,
ID: id,
})
}
}
if len(matchingRefs) == 0 {
return nil, fmt.Errorf("storage domain '%s' not found in oVirt provider inventory", storageName)
}
return matchingRefs, nil
}
// resolveOpenStackStorageNameToID resolves storage name for OpenStack provider
func resolveOpenStackStorageNameToID(ctx context.Context, configFlags *genericclioptions.ConfigFlags, inventoryURL string, provider *unstructured.Unstructured, storageName string, insecureSkipTLS bool) ([]ref.Ref, error) {
// Handle '__DEFAULT__' as a special case - return ref with type 'default'
if storageName == "__DEFAULT__" {
return []ref.Ref{{
Type: "default",
}}, nil
}
// Fetch storage types from OpenStack
storageInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "volumetypes?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch storage inventory: %v", err)
}
storageArray, ok := storageInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for storage inventory")
}
// Search for the storage by name
var matchingRefs []ref.Ref
for _, item := range storageArray {
storage, ok := item.(map[string]interface{})
if !ok {
continue
}
name, _ := storage["name"].(string)
id, _ := storage["id"].(string)
if name == storageName {
matchingRefs = append(matchingRefs, ref.Ref{
Name: name,
ID: id,
})
}
}
if len(matchingRefs) == 0 {
return nil, fmt.Errorf("storage type '%s' not found in OpenStack provider inventory", storageName)
}
return matchingRefs, nil
}
// resolveOVAStorageNameToID resolves storage name for OVA provider
func resolveOVAStorageNameToID(ctx context.Context, configFlags *genericclioptions.ConfigFlags, inventoryURL string, provider *unstructured.Unstructured, storageName string, insecureSkipTLS bool) ([]ref.Ref, error) {
// Fetch storage from OVA
storageInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "storages?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch storage inventory: %v", err)
}
storageArray, ok := storageInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for storage inventory")
}
// Search for the storage by name
var matchingRefs []ref.Ref
for _, item := range storageArray {
storage, ok := item.(map[string]interface{})
if !ok {
continue
}
name, _ := storage["name"].(string)
id, _ := storage["id"].(string)
if name == storageName {
matchingRefs = append(matchingRefs, ref.Ref{
Name: name,
ID: id,
})
}
}
if len(matchingRefs) == 0 {
return nil, fmt.Errorf("storage '%s' not found in OVA provider inventory", storageName)
}
return matchingRefs, nil
}
// resolveStorageNameToID resolves a storage name to its ref.Ref by querying the provider inventory
func resolveStorageNameToID(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL, storageName string, insecureSkipTLS bool) ([]ref.Ref, error) {
// Get source provider
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get provider '%s': %v", providerName, err)
}
// Check provider type to determine which helper to use
providerType, _, err := unstructured.NestedString(provider.Object, "spec", "type")
if err != nil {
return nil, fmt.Errorf("failed to get provider type: %v", err)
}
switch providerType {
case "openshift":
// For OpenShift source providers, only include the name in the source reference
// Storage classes are cluster-scoped resources, so we don't need to resolve the ID
return []ref.Ref{{
Name: storageName,
}}, nil
case "ec2":
return resolveEC2StorageNameToID(ctx, configFlags, inventoryURL, provider, storageName, insecureSkipTLS)
case "vsphere":
return resolveVSphereStorageNameToID(ctx, configFlags, inventoryURL, provider, storageName, insecureSkipTLS)
case "ovirt":
return resolveOvirtStorageNameToID(ctx, configFlags, inventoryURL, provider, storageName, insecureSkipTLS)
case "openstack":
return resolveOpenStackStorageNameToID(ctx, configFlags, inventoryURL, provider, storageName, insecureSkipTLS)
case "ova":
return resolveOVAStorageNameToID(ctx, configFlags, inventoryURL, provider, storageName, insecureSkipTLS)
default:
// Default to generic storage endpoint for unknown providers
storageInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "storages?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch storage inventory: %v", err)
}
storageArray, ok := storageInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for storage inventory")
}
// Search for all storages matching the name
var matchingRefs []ref.Ref
for _, item := range storageArray {
storage, ok := item.(map[string]interface{})
if !ok {
continue
}
name, _ := storage["name"].(string)
id, _ := storage["id"].(string)
if name == storageName {
matchingRefs = append(matchingRefs, ref.Ref{
ID: id,
})
}
}
if len(matchingRefs) == 0 {
return nil, fmt.Errorf("storage '%s' not found in provider '%s' inventory", storageName, providerName)
}
return matchingRefs, nil
}
}
// resolveEC2StorageNameToID resolves storage name for EC2 provider
func resolveEC2StorageNameToID(ctx context.Context, configFlags *genericclioptions.ConfigFlags, inventoryURL string, provider *unstructured.Unstructured, storageName string, insecureSkipTLS bool) ([]ref.Ref, error) {
// Fetch EBS volume types from EC2
storageInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "storages?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch storage inventory: %v", err)
}
// Extract objects from EC2 envelope
storageInventory = inventory.ExtractEC2Objects(storageInventory)
storageArray, ok := storageInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for storage inventory")
}
// Search for the storage by type (gp2, gp3, io1, io2, st1, sc1, standard)
// Use case-insensitive matching and deduplicate results
var matchingRefs []ref.Ref
seen := make(map[string]struct{})
storageNameLower := strings.ToLower(storageName)
for _, item := range storageArray {
storage, ok := item.(map[string]interface{})
if !ok {
continue
}
// EC2 storage uses "type" field for EBS volume types
volumeType, _ := storage["type"].(string)
// Case-insensitive match
if strings.ToLower(volumeType) == storageNameLower {
// Deduplicate - only add if not seen before
if _, exists := seen[volumeType]; exists {
continue
}
seen[volumeType] = struct{}{}
matchingRefs = append(matchingRefs, ref.Ref{
Name: volumeType,
})
}
}
if len(matchingRefs) == 0 {
return nil, fmt.Errorf("EBS volume type '%s' not found in EC2 provider inventory", storageName)
}
return matchingRefs, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/factory.go | Go | package network
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/provider"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/fetchers"
ec2Fetcher "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/fetchers/ec2"
hypervFetcher "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/fetchers/hyperv"
openshiftFetcher "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/fetchers/openshift"
openstackFetcher "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/fetchers/openstack"
ovaFetcher "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/fetchers/ova"
ovirtFetcher "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/fetchers/ovirt"
vsphereFetcher "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/fetchers/vsphere"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper"
ec2Mapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper/ec2"
hypervMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper/hyperv"
openshiftMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper/openshift"
openstackMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper/openstack"
ovaMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper/ova"
ovirtMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper/ovirt"
vsphereMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper/vsphere"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// NetworkMapperInterface defines the interface for network mapping operations
type NetworkMapperInterface interface {
// GetSourceNetworks extracts network information from the source provider for the specified VMs
GetSourceNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error)
// GetTargetNetworks extracts available network information from the target provider
GetTargetNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationNetwork, error)
// CreateNetworkPairs creates network mapping pairs based on source networks, target networks, and optional default network
CreateNetworkPairs(sourceNetworks []ref.Ref, targetNetworks []forkliftv1beta1.DestinationNetwork, defaultTargetNetwork string, namespace string) ([]forkliftv1beta1.NetworkPair, error)
}
// NetworkMapperOptions contains common options for network mapping
type NetworkMapperOptions struct {
Name string
Namespace string
TargetNamespace string // Where VMs will be created (plan.spec.targetNamespace)
SourceProvider string
SourceProviderNamespace string
TargetProvider string
TargetProviderNamespace string
ConfigFlags *genericclioptions.ConfigFlags
InventoryURL string
InventoryInsecureSkipTLS bool
PlanVMNames []string
DefaultTargetNetwork string
}
// GetSourceNetworkFetcher returns the appropriate source network fetcher based on provider type
func GetSourceNetworkFetcher(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace string, insecureSkipTLS bool) (fetchers.SourceNetworkFetcher, error) {
// Get the provider object to determine its type
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get provider: %v", err)
}
// Create a provider client to get the provider type
// Note: GetProviderType() only reads from CRD spec (no HTTPS calls), but we pass insecureSkipTLS for consistency
providerClient := inventory.NewProviderClientWithInsecure(configFlags, provider, "", insecureSkipTLS)
providerType, err := providerClient.GetProviderType()
if err != nil {
return nil, fmt.Errorf("failed to get provider type: %v", err)
}
klog.V(4).Infof("DEBUG: GetSourceNetworkFetcher - Provider: %s, Type: %s", providerName, providerType)
// Return the appropriate fetcher based on provider type
switch providerType {
case "ec2":
klog.V(4).Infof("DEBUG: Using EC2 source network fetcher for %s", providerName)
return ec2Fetcher.NewEC2NetworkFetcher(), nil
case "openstack":
klog.V(4).Infof("DEBUG: Using OpenStack source network fetcher for %s", providerName)
return openstackFetcher.NewOpenStackNetworkFetcher(), nil
case "vsphere":
klog.V(4).Infof("DEBUG: Using VSphere source network fetcher for %s", providerName)
return vsphereFetcher.NewVSphereNetworkFetcher(), nil
case "openshift":
klog.V(4).Infof("DEBUG: Using OpenShift source network fetcher for %s", providerName)
return openshiftFetcher.NewOpenShiftNetworkFetcher(), nil
case "ova":
klog.V(4).Infof("DEBUG: Using OVA source network fetcher for %s", providerName)
return ovaFetcher.NewOVANetworkFetcher(), nil
case "ovirt":
klog.V(4).Infof("DEBUG: Using oVirt source network fetcher for %s", providerName)
return ovirtFetcher.NewOvirtNetworkFetcher(), nil
case "hyperv":
klog.V(4).Infof("DEBUG: Using HyperV source network fetcher for %s", providerName)
return hypervFetcher.NewHyperVNetworkFetcher(), nil
default:
return nil, fmt.Errorf("unsupported source provider type: %s", providerType)
}
}
// GetTargetNetworkFetcher returns the appropriate target network fetcher based on provider type
func GetTargetNetworkFetcher(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace string, insecureSkipTLS bool) (fetchers.TargetNetworkFetcher, error) {
// Get the provider object to determine its type
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get provider: %v", err)
}
// Create a provider client to get the provider type
// Note: GetProviderType() only reads from CRD spec (no HTTPS calls), but we pass insecureSkipTLS for consistency
providerClient := inventory.NewProviderClientWithInsecure(configFlags, provider, "", insecureSkipTLS)
providerType, err := providerClient.GetProviderType()
if err != nil {
return nil, fmt.Errorf("failed to get provider type: %v", err)
}
klog.V(4).Infof("DEBUG: GetTargetNetworkFetcher - Provider: %s, Type: %s", providerName, providerType)
// Return the appropriate fetcher based on provider type
switch providerType {
case "openstack":
klog.V(4).Infof("DEBUG: Using OpenStack target network fetcher for %s", providerName)
return openstackFetcher.NewOpenStackNetworkFetcher(), nil
case "vsphere":
klog.V(4).Infof("DEBUG: Using VSphere target network fetcher for %s", providerName)
return vsphereFetcher.NewVSphereNetworkFetcher(), nil
case "openshift":
klog.V(4).Infof("DEBUG: Using OpenShift target network fetcher for %s", providerName)
return openshiftFetcher.NewOpenShiftNetworkFetcher(), nil
case "ova":
klog.V(4).Infof("DEBUG: Using OVA target network fetcher for %s", providerName)
return ovaFetcher.NewOVANetworkFetcher(), nil
case "ovirt":
klog.V(4).Infof("DEBUG: Using oVirt target network fetcher for %s", providerName)
return ovirtFetcher.NewOvirtNetworkFetcher(), nil
case "hyperv":
klog.V(4).Infof("DEBUG: Using HyperV target network fetcher for %s", providerName)
return hypervFetcher.NewHyperVNetworkFetcher(), nil
default:
return nil, fmt.Errorf("unsupported target provider type: %s", providerType)
}
}
// GetNetworkMapper returns the appropriate network mapper based on source provider type
func GetNetworkMapper(ctx context.Context, configFlags *genericclioptions.ConfigFlags, sourceProviderName, sourceProviderNamespace, targetProviderName, targetProviderNamespace string, insecureSkipTLS bool) (mapper.NetworkMapper, string, string, error) {
// Get source provider type
sourceProvider, err := inventory.GetProviderByName(ctx, configFlags, sourceProviderName, sourceProviderNamespace)
if err != nil {
return nil, "", "", fmt.Errorf("failed to get source provider: %v", err)
}
// Note: GetProviderType() only reads from CRD spec (no HTTPS calls), but we pass insecureSkipTLS for consistency
sourceProviderClient := inventory.NewProviderClientWithInsecure(configFlags, sourceProvider, "", insecureSkipTLS)
sourceProviderType, err := sourceProviderClient.GetProviderType()
if err != nil {
return nil, "", "", fmt.Errorf("failed to get source provider type: %v", err)
}
// Get target provider type
targetProvider, err := inventory.GetProviderByName(ctx, configFlags, targetProviderName, targetProviderNamespace)
if err != nil {
return nil, "", "", fmt.Errorf("failed to get target provider: %v", err)
}
targetProviderClient := inventory.NewProviderClientWithInsecure(configFlags, targetProvider, "", insecureSkipTLS)
targetProviderType, err := targetProviderClient.GetProviderType()
if err != nil {
return nil, "", "", fmt.Errorf("failed to get target provider type: %v", err)
}
klog.V(4).Infof("DEBUG: GetNetworkMapper - Source provider: %s (type: %s), Target provider: %s (type: %s)",
sourceProviderName, sourceProviderType, targetProviderName, targetProviderType)
// Return the appropriate mapper based on source provider type
switch sourceProviderType {
case "ec2":
klog.V(4).Infof("DEBUG: Using EC2 network mapper for source %s", sourceProviderName)
return ec2Mapper.NewEC2NetworkMapper(), sourceProviderType, targetProviderType, nil
case "openstack":
klog.V(4).Infof("DEBUG: Using OpenStack network mapper for source %s", sourceProviderName)
return openstackMapper.NewOpenStackNetworkMapper(), sourceProviderType, targetProviderType, nil
case "vsphere":
klog.V(4).Infof("DEBUG: Using vSphere network mapper for source %s", sourceProviderName)
return vsphereMapper.NewVSphereNetworkMapper(), sourceProviderType, targetProviderType, nil
case "openshift":
klog.V(4).Infof("DEBUG: Using OpenShift network mapper for source %s", sourceProviderName)
return openshiftMapper.NewOpenShiftNetworkMapper(), sourceProviderType, targetProviderType, nil
case "ova":
klog.V(4).Infof("DEBUG: Using OVA network mapper for source %s", sourceProviderName)
return ovaMapper.NewOVANetworkMapper(), sourceProviderType, targetProviderType, nil
case "ovirt":
klog.V(4).Infof("DEBUG: Using oVirt network mapper for source %s", sourceProviderName)
return ovirtMapper.NewOvirtNetworkMapper(), sourceProviderType, targetProviderType, nil
case "hyperv":
klog.V(4).Infof("DEBUG: Using HyperV network mapper for source %s", sourceProviderName)
return hypervMapper.NewHyperVNetworkMapper(), sourceProviderType, targetProviderType, nil
default:
return nil, "", "", fmt.Errorf("unsupported source provider type: %s", sourceProviderType)
}
}
// CreateNetworkMap creates a network map using the new fetcher-based architecture
func CreateNetworkMap(ctx context.Context, opts NetworkMapperOptions) (string, error) {
klog.V(4).Infof("DEBUG: Creating network map - Source: %s, Target: %s, DefaultTargetNetwork: '%s'",
opts.SourceProvider, opts.TargetProvider, opts.DefaultTargetNetwork)
// Get source network fetcher using the provider's namespace
sourceProviderNamespace := client.GetProviderNamespace(opts.SourceProviderNamespace, opts.Namespace)
sourceFetcher, err := GetSourceNetworkFetcher(ctx, opts.ConfigFlags, opts.SourceProvider, sourceProviderNamespace, opts.InventoryInsecureSkipTLS)
if err != nil {
return "", fmt.Errorf("failed to get source network fetcher: %v", err)
}
klog.V(4).Infof("DEBUG: Source fetcher created for provider: %s", opts.SourceProvider)
// Get target network fetcher using the provider's namespace
targetProviderNamespace := client.GetProviderNamespace(opts.TargetProviderNamespace, opts.Namespace)
targetFetcher, err := GetTargetNetworkFetcher(ctx, opts.ConfigFlags, opts.TargetProvider, targetProviderNamespace, opts.InventoryInsecureSkipTLS)
if err != nil {
return "", fmt.Errorf("failed to get target network fetcher: %v", err)
}
klog.V(4).Infof("DEBUG: Target fetcher created for provider: %s", opts.TargetProvider)
// Fetch source networks
sourceNetworks, err := sourceFetcher.FetchSourceNetworks(ctx, opts.ConfigFlags, opts.SourceProvider, sourceProviderNamespace, opts.InventoryURL, opts.PlanVMNames, opts.InventoryInsecureSkipTLS)
if err != nil {
return "", fmt.Errorf("failed to fetch source networks: %v", err)
}
klog.V(4).Infof("DEBUG: Fetched %d source networks", len(sourceNetworks))
// Fetch target networks
var targetNetworks []forkliftv1beta1.DestinationNetwork
if opts.DefaultTargetNetwork == "" || (opts.DefaultTargetNetwork != "default" && opts.DefaultTargetNetwork != "") {
klog.V(4).Infof("DEBUG: Fetching target networks from target provider: %s", opts.TargetProvider)
targetNetworks, err = targetFetcher.FetchTargetNetworks(ctx, opts.ConfigFlags, opts.TargetProvider, targetProviderNamespace, opts.InventoryURL, opts.InventoryInsecureSkipTLS)
if err != nil {
return "", fmt.Errorf("failed to fetch target networks: %v", err)
}
klog.V(4).Infof("DEBUG: Fetched %d target networks", len(targetNetworks))
// Filter target networks to only include NADs in target namespace or "default" namespace
targetNetworks = filterTargetNetworksByNamespace(targetNetworks, opts.TargetNamespace)
klog.V(4).Infof("DEBUG: After filtering: %d target networks in namespace %s or default", len(targetNetworks), opts.TargetNamespace)
} else {
klog.V(4).Infof("DEBUG: Skipping target network fetch due to DefaultTargetNetwork='%s'", opts.DefaultTargetNetwork)
}
// Get provider-specific network mapper
networkMapper, sourceProviderType, targetProviderType, err := GetNetworkMapper(ctx, opts.ConfigFlags, opts.SourceProvider, sourceProviderNamespace, opts.TargetProvider, targetProviderNamespace, opts.InventoryInsecureSkipTLS)
if err != nil {
return "", fmt.Errorf("failed to get network mapper: %v", err)
}
// Create network pairs using provider-specific mapping logic
mappingOpts := mapper.NetworkMappingOptions{
DefaultTargetNetwork: opts.DefaultTargetNetwork,
Namespace: opts.Namespace,
SourceProviderType: sourceProviderType,
TargetProviderType: targetProviderType,
}
networkPairs, err := networkMapper.CreateNetworkPairs(sourceNetworks, targetNetworks, mappingOpts)
if err != nil {
return "", fmt.Errorf("failed to create network pairs: %v", err)
}
// Create the network map using the existing infrastructure
return createNetworkMap(opts, networkPairs)
}
// createNetworkMap helper function to create the actual network map resource
func createNetworkMap(opts NetworkMapperOptions, networkPairs []forkliftv1beta1.NetworkPair) (string, error) {
// If no network pairs, create a dummy pair
if len(networkPairs) == 0 {
klog.V(4).Infof("DEBUG: No network pairs found, creating dummy pair")
networkPairs = []forkliftv1beta1.NetworkPair{
{
Source: ref.Ref{
Type: "pod", // Use "pod" type for dummy entry
},
Destination: forkliftv1beta1.DestinationNetwork{
Type: "pod", // Use pod networking as default
},
},
}
}
// Create the network map name
networkMapName := opts.Name + "-network-map"
// Create NetworkMap object
networkMap := &forkliftv1beta1.NetworkMap{
ObjectMeta: metav1.ObjectMeta{
Name: networkMapName,
Namespace: opts.Namespace,
},
Spec: forkliftv1beta1.NetworkMapSpec{
Provider: provider.Pair{
Source: corev1.ObjectReference{
Kind: "Provider",
APIVersion: forkliftv1beta1.SchemeGroupVersion.String(),
Name: opts.SourceProvider,
Namespace: client.GetProviderNamespace(opts.SourceProviderNamespace, opts.Namespace),
},
Destination: corev1.ObjectReference{
Kind: "Provider",
APIVersion: forkliftv1beta1.SchemeGroupVersion.String(),
Name: opts.TargetProvider,
Namespace: client.GetProviderNamespace(opts.TargetProviderNamespace, opts.Namespace),
},
},
Map: networkPairs,
},
}
networkMap.Kind = "NetworkMap"
networkMap.APIVersion = forkliftv1beta1.SchemeGroupVersion.String()
// Convert to Unstructured
unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(networkMap)
if err != nil {
return "", fmt.Errorf("failed to convert NetworkMap to Unstructured: %v", err)
}
networkMapUnstructured := &unstructured.Unstructured{Object: unstructuredMap}
// Create the network map
c, err := client.GetDynamicClient(opts.ConfigFlags)
if err != nil {
return "", fmt.Errorf("failed to get client: %v", err)
}
_, err = c.Resource(client.NetworkMapGVR).Namespace(opts.Namespace).Create(context.TODO(), networkMapUnstructured, metav1.CreateOptions{})
if err != nil {
return "", fmt.Errorf("failed to create network map: %v", err)
}
klog.V(4).Infof("DEBUG: Created network map '%s' with %d network pairs", networkMapName, len(networkPairs))
return networkMapName, nil
}
// filterTargetNetworksByNamespace filters target networks to only include NADs
// that are in the target namespace or the "default" namespace.
// This ensures VMs can only use NADs that are accessible from their namespace.
func filterTargetNetworksByNamespace(networks []forkliftv1beta1.DestinationNetwork, targetNamespace string) []forkliftv1beta1.DestinationNetwork {
var filtered []forkliftv1beta1.DestinationNetwork
for _, network := range networks {
if network.Type != "multus" {
// Keep non-multus networks (pod networking, etc.)
filtered = append(filtered, network)
continue
}
// Only keep multus networks in target namespace or "default" namespace
if network.Namespace == targetNamespace || network.Namespace == "default" {
filtered = append(filtered, network)
klog.V(4).Infof("DEBUG: Keeping NAD %s/%s (matches target namespace or default)", network.Namespace, network.Name)
} else {
klog.V(4).Infof("DEBUG: Filtering out NAD %s/%s (not in target namespace %s or default)", network.Namespace, network.Name, targetNamespace)
}
}
return filtered
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/factory_test.go | Go | package network
import (
"testing"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper"
openshiftMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper/openshift"
openstackMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper/openstack"
ovaMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper/ova"
ovirtMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper/ovirt"
vsphereMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper/vsphere"
)
func TestGetNetworkMapper_ProviderRouting(t *testing.T) {
// Note: These tests would need to be adapted to work with the actual inventory.GetProviderByName
// function. For now, we'll test the mapper type selection logic conceptually.
tests := []struct {
name string
sourceProviderType string
targetProviderType string
expectedMapperType string
}{
{
name: "OpenShift source -> OpenShift mapper",
sourceProviderType: "openshift",
targetProviderType: "openshift",
expectedMapperType: "*openshift.OpenShiftNetworkMapper",
},
{
name: "vSphere source -> vSphere mapper",
sourceProviderType: "vsphere",
targetProviderType: "openshift",
expectedMapperType: "*vsphere.VSphereNetworkMapper",
},
{
name: "oVirt source -> oVirt mapper",
sourceProviderType: "ovirt",
targetProviderType: "openshift",
expectedMapperType: "*ovirt.OvirtNetworkMapper",
},
{
name: "OpenStack source -> OpenStack mapper",
sourceProviderType: "openstack",
targetProviderType: "openshift",
expectedMapperType: "*openstack.OpenStackNetworkMapper",
},
{
name: "OVA source -> OVA mapper",
sourceProviderType: "ova",
targetProviderType: "openshift",
expectedMapperType: "*ova.OVANetworkMapper",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Test mapper creation based on provider type
var networkMapper mapper.NetworkMapper
switch tt.sourceProviderType {
case "openshift":
networkMapper = openshiftMapper.NewOpenShiftNetworkMapper()
case "vsphere":
networkMapper = vsphereMapper.NewVSphereNetworkMapper()
case "ovirt":
networkMapper = ovirtMapper.NewOvirtNetworkMapper()
case "openstack":
networkMapper = openstackMapper.NewOpenStackNetworkMapper()
case "ova":
networkMapper = ovaMapper.NewOVANetworkMapper()
}
if networkMapper == nil {
t.Errorf("Expected mapper for provider type %s, got nil", tt.sourceProviderType)
}
// Verify mapper implements the interface
var _ mapper.NetworkMapper = networkMapper
})
}
}
func TestNetworkMapperOptions_ProviderTypeFields(t *testing.T) {
opts := mapper.NetworkMappingOptions{
DefaultTargetNetwork: "test-network",
Namespace: "test-ns",
SourceProviderType: "openshift",
TargetProviderType: "openshift",
}
// Verify the new provider type fields are available
if opts.SourceProviderType != "openshift" {
t.Errorf("SourceProviderType: got %s, want openshift", opts.SourceProviderType)
}
if opts.TargetProviderType != "openshift" {
t.Errorf("TargetProviderType: got %s, want openshift", opts.TargetProviderType)
}
}
// Test that all mapper types implement the NetworkMapper interface
func TestNetworkMapperInterface_AllImplementations(t *testing.T) {
mappers := []mapper.NetworkMapper{
openshiftMapper.NewOpenShiftNetworkMapper(),
vsphereMapper.NewVSphereNetworkMapper(),
ovirtMapper.NewOvirtNetworkMapper(),
openstackMapper.NewOpenStackNetworkMapper(),
ovaMapper.NewOVANetworkMapper(),
}
for i, m := range mappers {
if m == nil {
t.Errorf("Mapper %d is nil", i)
}
// This test ensures all mappers implement the interface
var _ mapper.NetworkMapper = m
}
}
// Test network mapper creation functions
func TestNetworkMapperCreation(t *testing.T) {
tests := []struct {
name string
createMapper func() mapper.NetworkMapper
expectedNotNil bool
}{
{
name: "OpenShift mapper creation",
createMapper: func() mapper.NetworkMapper { return openshiftMapper.NewOpenShiftNetworkMapper() },
expectedNotNil: true,
},
{
name: "vSphere mapper creation",
createMapper: func() mapper.NetworkMapper { return vsphereMapper.NewVSphereNetworkMapper() },
expectedNotNil: true,
},
{
name: "oVirt mapper creation",
createMapper: func() mapper.NetworkMapper { return ovirtMapper.NewOvirtNetworkMapper() },
expectedNotNil: true,
},
{
name: "OpenStack mapper creation",
createMapper: func() mapper.NetworkMapper { return openstackMapper.NewOpenStackNetworkMapper() },
expectedNotNil: true,
},
{
name: "OVA mapper creation",
createMapper: func() mapper.NetworkMapper { return ovaMapper.NewOVANetworkMapper() },
expectedNotNil: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mapper := tt.createMapper()
if tt.expectedNotNil && mapper == nil {
t.Errorf("Expected non-nil mapper, got nil")
}
if !tt.expectedNotNil && mapper != nil {
t.Errorf("Expected nil mapper, got non-nil")
}
})
}
}
// Test filterTargetNetworksByNamespace function
func TestFilterTargetNetworksByNamespace(t *testing.T) {
tests := []struct {
name string
networks []forkliftv1beta1.DestinationNetwork
targetNamespace string
expectedCount int
expectedNames []string
}{
{
name: "Keep networks in target namespace and default",
networks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "net1", Namespace: "target-ns"},
{Type: "multus", Name: "net2", Namespace: "default"},
{Type: "multus", Name: "net3", Namespace: "other-ns"},
},
targetNamespace: "target-ns",
expectedCount: 2,
expectedNames: []string{"net1", "net2"},
},
{
name: "Keep only default namespace when target has no NADs",
networks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "net1", Namespace: "other-ns"},
{Type: "multus", Name: "net2", Namespace: "default"},
},
targetNamespace: "target-ns",
expectedCount: 1,
expectedNames: []string{"net2"},
},
{
name: "Keep non-multus networks regardless of namespace",
networks: []forkliftv1beta1.DestinationNetwork{
{Type: "pod"},
{Type: "multus", Name: "net1", Namespace: "other-ns"},
{Type: "multus", Name: "net2", Namespace: "target-ns"},
},
targetNamespace: "target-ns",
expectedCount: 2,
expectedNames: []string{"", "net2"}, // pod network has empty name
},
{
name: "Filter out all multus networks when none match",
networks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "net1", Namespace: "ns1"},
{Type: "multus", Name: "net2", Namespace: "ns2"},
},
targetNamespace: "target-ns",
expectedCount: 0,
expectedNames: []string{},
},
{
name: "Empty networks list",
networks: []forkliftv1beta1.DestinationNetwork{},
targetNamespace: "target-ns",
expectedCount: 0,
expectedNames: []string{},
},
{
name: "All networks in target namespace",
networks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "net1", Namespace: "target-ns"},
{Type: "multus", Name: "net2", Namespace: "target-ns"},
},
targetNamespace: "target-ns",
expectedCount: 2,
expectedNames: []string{"net1", "net2"},
},
{
name: "All networks in default namespace",
networks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "net1", Namespace: "default"},
{Type: "multus", Name: "net2", Namespace: "default"},
},
targetNamespace: "target-ns",
expectedCount: 2,
expectedNames: []string{"net1", "net2"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := filterTargetNetworksByNamespace(tt.networks, tt.targetNamespace)
if len(result) != tt.expectedCount {
t.Errorf("filterTargetNetworksByNamespace() returned %d networks, expected %d", len(result), tt.expectedCount)
}
// Verify the expected names are present
for i, expected := range tt.expectedNames {
if i < len(result) && result[i].Name != expected {
t.Errorf("filterTargetNetworksByNamespace() network[%d].Name = %s, expected %s", i, result[i].Name, expected)
}
}
})
}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/fetchers/ec2/fetcher.go | Go | package ec2
import (
"context"
"fmt"
"sort"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/fetchers"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// EC2NetworkFetcher implements network fetching for EC2 providers
type EC2NetworkFetcher struct{}
// NewEC2NetworkFetcher creates a new EC2 network fetcher
func NewEC2NetworkFetcher() fetchers.NetworkFetcher {
return &EC2NetworkFetcher{}
}
// FetchSourceNetworks fetches networks (VPCs and Subnets) from EC2 provider
func (f *EC2NetworkFetcher) FetchSourceNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, _ []string, insecureSkipTLS bool) ([]ref.Ref, error) {
klog.V(4).Infof("DEBUG: EC2 - Fetching source networks from provider: %s", providerName)
// Get provider
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get EC2 provider: %v", err)
}
// Fetch EC2 networks (VPCs and Subnets)
networksInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "networks?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch EC2 networks inventory: %v", err)
}
// Extract objects from EC2 envelope
networksInventory = inventory.ExtractEC2Objects(networksInventory)
networksArray, ok := networksInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for networks inventory")
}
// Separate VPCs and Subnets
var sourceNetworks []ref.Ref
subnets := make([]map[string]interface{}, 0)
vpcs := make([]map[string]interface{}, 0)
for _, item := range networksArray {
network, ok := item.(map[string]interface{})
if !ok {
klog.V(4).Infof("DEBUG: EC2 - Skipping network item with unexpected type: %T", item)
continue
}
// Check if it's a subnet (has non-empty SubnetId) or VPC (non-empty VpcId, no subnet)
if subnetID, ok := network["SubnetId"].(string); ok && subnetID != "" {
subnets = append(subnets, network)
} else if vpcID, ok := network["VpcId"].(string); ok && vpcID != "" {
vpcs = append(vpcs, network)
}
}
// If we have subnets, return them sorted by CIDR
if len(subnets) > 0 {
sort.Slice(subnets, func(i, j int) bool {
cidrI, okI := subnets[i]["CidrBlock"].(string)
cidrJ, okJ := subnets[j]["CidrBlock"].(string)
if !okI || !okJ {
klog.V(4).Infof("DEBUG: EC2 - Missing or invalid CidrBlock during subnet sort (i:%v, j:%v)", okI, okJ)
}
return cidrI < cidrJ
})
for _, subnet := range subnets {
// Use id from top level (provided by inventory server)
if subnetID, ok := subnet["id"].(string); ok {
sourceNetworks = append(sourceNetworks, ref.Ref{
ID: subnetID,
})
}
}
} else if len(vpcs) > 0 {
// If no subnets, return VPCs sorted by ID
sort.Slice(vpcs, func(i, j int) bool {
vpcI, okI := vpcs[i]["id"].(string)
vpcJ, okJ := vpcs[j]["id"].(string)
if !okI || !okJ {
klog.V(4).Infof("DEBUG: EC2 - Missing or invalid id during VPC sort (i:%v, j:%v)", okI, okJ)
}
return vpcI < vpcJ
})
for _, vpc := range vpcs {
// Use id from top level (provided by inventory server)
if vpcID, ok := vpc["id"].(string); ok {
sourceNetworks = append(sourceNetworks, ref.Ref{
ID: vpcID,
})
}
}
}
klog.V(4).Infof("DEBUG: EC2 - Found %d source networks (%d subnets, %d VPCs)", len(sourceNetworks), len(subnets), len(vpcs))
return sourceNetworks, nil
}
// FetchTargetNetworks fetches target networks from EC2 provider (not typically used as EC2 is usually source)
func (f *EC2NetworkFetcher) FetchTargetNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationNetwork, error) {
klog.V(4).Infof("DEBUG: EC2 - Fetching target networks (EC2 is typically not a migration target)")
// EC2 is typically not used as a migration target, but we implement the interface for completeness
return []forkliftv1beta1.DestinationNetwork{}, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/fetchers/hyperv/fetcher.go | Go | package hyperv
import (
"context"
"fmt"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/query"
)
// HyperVNetworkFetcher implements network fetching for HyperV providers
type HyperVNetworkFetcher struct{}
// NewHyperVNetworkFetcher creates a new HyperV network fetcher
func NewHyperVNetworkFetcher() *HyperVNetworkFetcher {
return &HyperVNetworkFetcher{}
}
// FetchSourceNetworks extracts network references from HyperV VMs
func (f *HyperVNetworkFetcher) FetchSourceNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error) {
klog.V(4).Infof("HyperV fetcher - extracting source networks for provider: %s", providerName)
// Get the provider object
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get source provider: %w", err)
}
// Fetch networks inventory first to create ID-to-network mapping
networksInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "networks?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch networks inventory: %w", err)
}
networksArray, ok := networksInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for networks inventory")
}
// Create ID-to-network mapping
networkIDToNetwork := make(map[string]map[string]interface{})
for _, item := range networksArray {
if network, ok := item.(map[string]interface{}); ok {
if networkID, ok := network["id"].(string); ok {
networkIDToNetwork[networkID] = network
}
}
}
klog.V(4).Infof("Available network mappings:")
for id, networkItem := range networkIDToNetwork {
if name, ok := networkItem["name"].(string); ok {
klog.V(4).Infof(" %s -> %s", id, name)
}
}
// Fetch VMs inventory to get network references from VMs
vmsInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "vms?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch VMs inventory: %w", err)
}
vmsArray, ok := vmsInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for VMs inventory")
}
// Extract network IDs used by the plan VMs
networkIDSet := make(map[string]bool)
planVMSet := make(map[string]bool)
for _, vmName := range planVMNames {
planVMSet[vmName] = true
}
for _, item := range vmsArray {
vm, ok := item.(map[string]interface{})
if !ok {
continue
}
vmName, ok := vm["name"].(string)
if !ok || !planVMSet[vmName] {
continue
}
klog.V(4).Infof("Processing VM: %s", vmName)
// Extract network UUIDs from VM NICs (HyperV VMs have nics array with networkUUID field)
nics, err := query.GetValueByPathString(vm, "nics")
if err == nil && nics != nil {
if nicsArray, ok := nics.([]interface{}); ok {
klog.V(4).Infof("VM %s has %d NICs", vmName, len(nicsArray))
for _, nicItem := range nicsArray {
if nicMap, ok := nicItem.(map[string]interface{}); ok {
if networkUUID, ok := nicMap["networkUUID"].(string); ok {
klog.V(4).Infof("Found network UUID: %s", networkUUID)
networkIDSet[networkUUID] = true
}
}
}
}
} else {
klog.V(4).Infof("VM %s has no NICs or failed to extract: err=%v", vmName, err)
}
}
klog.V(4).Infof("Final networkIDSet: %v", networkIDSet)
// If no networks found from VMs, return empty list
if len(networkIDSet) == 0 {
klog.V(4).Infof("No networks found from VMs")
return []ref.Ref{}, nil
}
// Build source networks list using the collected IDs
var sourceNetworks []ref.Ref
for networkID := range networkIDSet {
if networkItem, exists := networkIDToNetwork[networkID]; exists {
sourceNetwork := ref.Ref{
ID: networkID,
}
if name, ok := networkItem["name"].(string); ok {
sourceNetwork.Name = name
}
sourceNetworks = append(sourceNetworks, sourceNetwork)
} else {
klog.V(4).Infof("Network ID %s referenced by VM NICs not found in network inventory", networkID)
}
}
klog.V(4).Infof("HyperV fetcher - found %d source networks", len(sourceNetworks))
return sourceNetworks, nil
}
// FetchTargetNetworks is not supported for HyperV as target - only OpenShift is supported as target
func (f *HyperVNetworkFetcher) FetchTargetNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationNetwork, error) {
klog.V(4).Infof("HyperV provider does not support target network fetching - only OpenShift is supported as target")
return nil, fmt.Errorf("HyperV provider does not support target network fetching - only OpenShift is supported as migration target")
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/fetchers/interfaces.go | Go | package fetchers
import (
"context"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
// SourceNetworkFetcher interface for extracting network information from source VMs
type SourceNetworkFetcher interface {
// FetchSourceNetworks extracts network references from VMs to be migrated
FetchSourceNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error)
}
// TargetNetworkFetcher interface for extracting available target networks
type TargetNetworkFetcher interface {
// FetchTargetNetworks extracts available destination networks from target provider
FetchTargetNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationNetwork, error)
}
// NetworkFetcher combines both source and target fetching for providers that can act as both
type NetworkFetcher interface {
SourceNetworkFetcher
TargetNetworkFetcher
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/fetchers/openshift/fetcher.go | Go | package openshift
import "context"
import (
"fmt"
"strings"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/query"
)
// OpenShiftNetworkFetcher implements network fetching for OpenShift providers
type OpenShiftNetworkFetcher struct{}
// NewOpenShiftNetworkFetcher creates a new OpenShift network fetcher
func NewOpenShiftNetworkFetcher() *OpenShiftNetworkFetcher {
return &OpenShiftNetworkFetcher{}
}
// FetchSourceNetworks extracts network references from OpenShift VMs
func (f *OpenShiftNetworkFetcher) FetchSourceNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error) {
klog.V(4).Infof("OpenShift fetcher - extracting source networks for provider: %s", providerName)
// Get the provider object
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get source provider: %v", err)
}
// Fetch networks inventory (NADs in OpenShift) first to create name-to-ID mapping
networksInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "networkattachmentdefinitions?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch networks inventory: %v", err)
}
networksArray, ok := networksInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for networks inventory")
}
// Create name-to-ID and ID-to-network mappings for NADs
networkNameToID := make(map[string]string)
networkIDToNetwork := make(map[string]map[string]interface{})
for _, item := range networksArray {
if network, ok := item.(map[string]interface{}); ok {
// Use the actual UUID as the ID
if networkID, ok := network["id"].(string); ok {
if networkName, ok := network["name"].(string); ok {
// Map network name to the actual UUID
networkNameToID[networkName] = networkID
networkIDToNetwork[networkID] = network
}
}
}
}
klog.V(4).Infof("Available NAD mappings:")
for id, networkItem := range networkIDToNetwork {
if name, ok := networkItem["name"].(string); ok {
klog.V(4).Infof(" %s -> %s", id, name)
}
}
// Fetch VMs inventory to get network references from VMs
vmsInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "vms?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch VMs inventory: %v", err)
}
vmsArray, ok := vmsInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for VMs inventory")
}
// Extract network IDs used by the plan VMs
networkIDSet := make(map[string]bool)
planVMSet := make(map[string]bool)
for _, vmName := range planVMNames {
planVMSet[vmName] = true
}
for _, item := range vmsArray {
vm, ok := item.(map[string]interface{})
if !ok {
continue
}
vmName, ok := vm["name"].(string)
if !ok || !planVMSet[vmName] {
continue
}
klog.V(4).Infof("Processing VM: %s", vmName)
// Extract network names from VM spec.template.spec.networks (OpenShift VMs)
networks, err := query.GetValueByPathString(vm, "object.spec.template.spec.networks")
if err == nil && networks != nil {
if networksArray, ok := networks.([]interface{}); ok {
klog.V(4).Infof("VM %s has %d networks", vmName, len(networksArray))
for _, networkItem := range networksArray {
if networkMap, ok := networkItem.(map[string]interface{}); ok {
// For OpenShift VMs, networks are typically referenced by NAD name
if networkName, ok := networkMap["name"].(string); ok {
klog.V(4).Infof("Found network name: %s", networkName)
if networkID, exists := networkNameToID[networkName]; exists {
klog.V(4).Infof("Found exact NAD match: %s -> %s", networkName, networkID)
networkIDSet[networkID] = true
} else {
// Try fuzzy matching if exact match fails
for availableName, availableID := range networkNameToID {
if strings.Contains(networkName, availableName) || strings.Contains(availableName, networkName) {
klog.V(4).Infof("Found fuzzy NAD match: %s -> %s (via %s)", networkName, availableID, availableName)
networkIDSet[availableID] = true
break
}
}
}
}
}
}
}
} else {
klog.V(4).Infof("VM %s has no networks or failed to extract: err=%v", vmName, err)
}
}
klog.V(4).Infof("Final networkIDSet: %v", networkIDSet)
// If no networks found from VMs, return empty list
if len(networkIDSet) == 0 {
klog.V(4).Infof("No networks found from VMs")
return []ref.Ref{}, nil
}
// Build source networks list using the collected IDs
var sourceNetworks []ref.Ref
for networkID := range networkIDSet {
if networkItem, exists := networkIDToNetwork[networkID]; exists {
sourceNetwork := ref.Ref{
ID: networkID,
}
if name, ok := networkItem["name"].(string); ok {
sourceNetwork.Name = name
}
if namespace, ok := networkItem["namespace"].(string); ok {
sourceNetwork.Namespace = namespace
}
sourceNetworks = append(sourceNetworks, sourceNetwork)
}
}
klog.V(4).Infof("OpenShift fetcher - found %d source networks", len(sourceNetworks))
return sourceNetworks, nil
}
// FetchTargetNetworks extracts available destination networks from target provider
func (f *OpenShiftNetworkFetcher) FetchTargetNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationNetwork, error) {
klog.V(4).Infof("OpenShift fetcher - extracting target networks for provider: %s", providerName)
// Get the target provider
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get target provider: %v", err)
}
// Get provider type for target provider to determine network format
providerClient := inventory.NewProviderClientWithInsecure(configFlags, provider, inventoryURL, insecureSkipTLS)
providerType, err := providerClient.GetProviderType()
if err != nil {
return nil, fmt.Errorf("failed to get provider type: %v", err)
}
klog.V(4).Infof("Target provider name: %s", providerName)
klog.V(4).Infof("Target provider type detected: %s", providerType)
// For OpenShift targets, always fetch NADs
klog.V(4).Infof("Fetching NetworkAttachmentDefinitions for OpenShift target")
networksInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "networkattachmentdefinitions?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch target networks inventory: %v", err)
}
networksArray, ok := networksInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for target networks inventory")
}
// Build target networks list
var targetNetworks []forkliftv1beta1.DestinationNetwork
for _, item := range networksArray {
networkItem, ok := item.(map[string]interface{})
if !ok {
continue
}
networkName := ""
if name, ok := networkItem["name"].(string); ok {
networkName = name
}
networkNamespace := ""
if ns, ok := networkItem["namespace"].(string); ok {
networkNamespace = ns
}
// For OpenShift targets, create Multus network reference
// Always set namespace, use plan namespace if empty
klog.V(4).Infof("Creating multus network reference for: %s/%s", networkNamespace, networkName)
destNetwork := forkliftv1beta1.DestinationNetwork{
Type: "multus",
Name: networkName,
}
// Always set namespace, use plan namespace if empty
if networkNamespace != "" {
destNetwork.Namespace = networkNamespace
} else {
destNetwork.Namespace = namespace
}
targetNetworks = append(targetNetworks, destNetwork)
}
klog.V(4).Infof("Available target networks count: %d", len(targetNetworks))
return targetNetworks, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/fetchers/openstack/fetcher.go | Go | package openstack
import "context"
import (
"fmt"
"strings"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/query"
)
// OpenStackNetworkFetcher implements network fetching for OpenStack providers
type OpenStackNetworkFetcher struct{}
// NewOpenStackNetworkFetcher creates a new OpenStack network fetcher
func NewOpenStackNetworkFetcher() *OpenStackNetworkFetcher {
return &OpenStackNetworkFetcher{}
}
// FetchSourceNetworks extracts network references from OpenStack VMs
func (f *OpenStackNetworkFetcher) FetchSourceNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error) {
klog.V(4).Infof("OpenStack fetcher - extracting source networks for provider: %s", providerName)
// Get the provider object
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get source provider: %v", err)
}
// Fetch networks inventory first to create name-to-ID mapping
networksInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "networks?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch networks inventory: %v", err)
}
networksArray, ok := networksInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for networks inventory")
}
// Create name-to-ID and ID-to-network mappings
networkNameToID := make(map[string]string)
networkIDToNetwork := make(map[string]map[string]interface{})
for _, item := range networksArray {
if network, ok := item.(map[string]interface{}); ok {
if networkID, ok := network["id"].(string); ok {
networkIDToNetwork[networkID] = network
if networkName, ok := network["name"].(string); ok {
networkNameToID[networkName] = networkID
}
}
}
}
klog.V(4).Infof("Available network mappings:")
for id, networkItem := range networkIDToNetwork {
if name, ok := networkItem["name"].(string); ok {
klog.V(4).Infof(" %s -> %s", id, name)
}
}
// Fetch VMs inventory to get network references from VMs
vmsInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "vms?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch VMs inventory: %v", err)
}
vmsArray, ok := vmsInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for VMs inventory")
}
// Extract network IDs used by the plan VMs (convert names to IDs immediately)
networkIDSet := make(map[string]bool)
planVMSet := make(map[string]bool)
for _, vmName := range planVMNames {
planVMSet[vmName] = true
}
for _, item := range vmsArray {
vm, ok := item.(map[string]interface{})
if !ok {
continue
}
vmName, ok := vm["name"].(string)
if !ok || !planVMSet[vmName] {
continue
}
klog.V(4).Infof("Processing VM: %s", vmName)
addresses, err := query.GetValueByPathString(vm, "addresses")
if err != nil || addresses == nil {
klog.V(4).Infof("VM %s has no addresses or failed to extract: err=%v", vmName, err)
continue
}
if addressesMap, ok := addresses.(map[string]interface{}); ok {
for networkName := range addressesMap {
klog.V(4).Infof("Found network name: %s", networkName)
if networkID, exists := networkNameToID[networkName]; exists {
klog.V(4).Infof("Found exact network match: %s -> %s", networkName, networkID)
networkIDSet[networkID] = true
} else {
for availableName, availableID := range networkNameToID {
if strings.Contains(networkName, availableName) || strings.Contains(availableName, networkName) {
klog.V(4).Infof("Found fuzzy network match: %s -> %s (via %s)", networkName, availableID, availableName)
networkIDSet[availableID] = true
break
}
}
}
}
}
}
if len(networkIDSet) == 0 {
klog.V(4).Infof("No networks found from VMs - VMs have incomplete data (missing addresses field)")
return []ref.Ref{}, nil
}
var sourceNetworks []ref.Ref
for networkID := range networkIDSet {
if networkItem, exists := networkIDToNetwork[networkID]; exists {
sourceNetwork := ref.Ref{
ID: networkID,
}
if name, ok := networkItem["name"].(string); ok {
sourceNetwork.Name = name
}
sourceNetworks = append(sourceNetworks, sourceNetwork)
}
}
klog.V(4).Infof("OpenStack fetcher - found %d source networks", len(sourceNetworks))
return sourceNetworks, nil
}
// FetchTargetNetworks is not supported for OpenStack as target - only OpenShift is supported as target
func (f *OpenStackNetworkFetcher) FetchTargetNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationNetwork, error) {
klog.V(4).Infof("OpenStack provider does not support target network fetching - only OpenShift is supported as target")
return nil, fmt.Errorf("OpenStack provider does not support target network fetching - only OpenShift is supported as migration target")
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/fetchers/ova/fetcher.go | Go | package ova
import "context"
import (
"fmt"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/query"
)
// OVANetworkFetcher implements network fetching for OVA providers
type OVANetworkFetcher struct{}
// NewOVANetworkFetcher creates a new OVA network fetcher
func NewOVANetworkFetcher() *OVANetworkFetcher {
return &OVANetworkFetcher{}
}
// FetchSourceNetworks extracts network references from OVA VMs
func (f *OVANetworkFetcher) FetchSourceNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error) {
klog.V(4).Infof("OVA fetcher - extracting source networks for provider: %s", providerName)
// Get the provider object
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get source provider: %v", err)
}
// Fetch networks inventory first to create ID-to-network mapping
networksInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "networks?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch networks inventory: %v", err)
}
networksArray, ok := networksInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for networks inventory")
}
// Create ID-to-network mapping
networkIDToNetwork := make(map[string]map[string]interface{})
for _, item := range networksArray {
if network, ok := item.(map[string]interface{}); ok {
if networkID, ok := network["id"].(string); ok {
networkIDToNetwork[networkID] = network
}
}
}
klog.V(4).Infof("Available network mappings:")
for id, networkItem := range networkIDToNetwork {
if name, ok := networkItem["name"].(string); ok {
klog.V(4).Infof(" %s -> %s", id, name)
}
}
// Fetch VMs inventory to get network references from VMs
vmsInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "vms?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch VMs inventory: %v", err)
}
vmsArray, ok := vmsInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for VMs inventory")
}
// Extract network IDs used by the plan VMs
networkIDSet := make(map[string]bool)
planVMSet := make(map[string]bool)
for _, vmName := range planVMNames {
planVMSet[vmName] = true
}
for _, item := range vmsArray {
vm, ok := item.(map[string]interface{})
if !ok {
continue
}
vmName, ok := vm["name"].(string)
if !ok || !planVMSet[vmName] {
continue
}
klog.V(4).Infof("Processing VM: %s", vmName)
// Extract network IDs from VM networks (OVA VMs have direct networks array with ID field)
networks, err := query.GetValueByPathString(vm, "networks")
if err == nil && networks != nil {
if networksArray, ok := networks.([]interface{}); ok {
klog.V(4).Infof("VM %s has %d networks", vmName, len(networksArray))
for _, networkItem := range networksArray {
if networkMap, ok := networkItem.(map[string]interface{}); ok {
// OVA uses capital "ID" field
if networkID, ok := networkMap["ID"].(string); ok {
klog.V(4).Infof("Found network ID: %s", networkID)
networkIDSet[networkID] = true
}
}
}
}
} else {
klog.V(4).Infof("VM %s has no networks or failed to extract: err=%v", vmName, err)
}
}
klog.V(4).Infof("Final networkIDSet: %v", networkIDSet)
// If no networks found from VMs, return empty list
if len(networkIDSet) == 0 {
klog.V(4).Infof("No networks found from VMs")
return []ref.Ref{}, nil
}
// Build source networks list using the collected IDs
var sourceNetworks []ref.Ref
for networkID := range networkIDSet {
if networkItem, exists := networkIDToNetwork[networkID]; exists {
sourceNetwork := ref.Ref{
ID: networkID,
}
if name, ok := networkItem["name"].(string); ok {
sourceNetwork.Name = name
}
sourceNetworks = append(sourceNetworks, sourceNetwork)
}
}
klog.V(4).Infof("OVA fetcher - found %d source networks", len(sourceNetworks))
return sourceNetworks, nil
}
// FetchTargetNetworks is not supported for OVA as target - only OpenShift is supported as target
func (f *OVANetworkFetcher) FetchTargetNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationNetwork, error) {
klog.V(4).Infof("OVA provider does not support target network fetching - only OpenShift is supported as target")
return nil, fmt.Errorf("OVA provider does not support target network fetching - only OpenShift is supported as migration target")
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/fetchers/ovirt/fetcher.go | Go | package ovirt
import "context"
import (
"fmt"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// OvirtNetworkFetcher implements network fetching for oVirt providers
type OvirtNetworkFetcher struct{}
// NewOvirtNetworkFetcher creates a new oVirt network fetcher
func NewOvirtNetworkFetcher() *OvirtNetworkFetcher {
return &OvirtNetworkFetcher{}
}
// FetchSourceNetworks extracts network references from oVirt VMs
func (f *OvirtNetworkFetcher) FetchSourceNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error) {
klog.V(4).Infof("oVirt fetcher - extracting source networks for provider: %s", providerName)
// Get the provider object
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get source provider: %v", err)
}
// Fetch networks inventory first to create ID-to-network mapping
networksInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "networks?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch networks inventory: %v", err)
}
networksArray, ok := networksInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for networks inventory")
}
// Create ID-to-network mapping
networkIDToNetwork := make(map[string]map[string]interface{})
for _, item := range networksArray {
if network, ok := item.(map[string]interface{}); ok {
if networkID, ok := network["id"].(string); ok {
networkIDToNetwork[networkID] = network
}
}
}
klog.V(4).Infof("Available network mappings:")
for id, networkItem := range networkIDToNetwork {
if name, ok := networkItem["name"].(string); ok {
klog.V(4).Infof(" %s -> %s", id, name)
}
}
// Fetch NIC profiles to map profile IDs to network IDs
nicProfilesInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "nicprofiles?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch NIC profiles inventory: %v", err)
}
nicProfilesArray, ok := nicProfilesInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for NIC profiles inventory")
}
// Create profile ID to network ID mapping
profileIDToNetworkID := make(map[string]string)
for _, item := range nicProfilesArray {
if profile, ok := item.(map[string]interface{}); ok {
if profileID, ok := profile["id"].(string); ok {
if networkID, ok := profile["network"].(string); ok {
profileIDToNetworkID[profileID] = networkID
}
}
}
}
// Fetch VMs inventory to get network references from VMs
vmsInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "vms?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch VMs inventory: %v", err)
}
vmsArray, ok := vmsInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for VMs inventory")
}
// Extract network IDs used by the plan VMs
networkIDSet := make(map[string]bool)
planVMSet := make(map[string]bool)
for _, vmName := range planVMNames {
planVMSet[vmName] = true
}
for _, vmItem := range vmsArray {
if vm, ok := vmItem.(map[string]interface{}); ok {
if vmName, ok := vm["name"].(string); ok && planVMSet[vmName] {
klog.V(4).Infof("Processing VM: %s", vmName)
// Extract profiles from VM nics
if nics, ok := vm["nics"].([]interface{}); ok {
for _, nicItem := range nics {
if nic, ok := nicItem.(map[string]interface{}); ok {
// Get profile ID from nic
if profileID, ok := nic["profile"].(string); ok {
klog.V(4).Infof("Found profile ID: %s", profileID)
// Map profile ID to network ID
if networkID, exists := profileIDToNetworkID[profileID]; exists {
klog.V(4).Infof("Mapped profile %s to network %s", profileID, networkID)
networkIDSet[networkID] = true
}
}
}
}
}
}
}
}
klog.V(4).Infof("oVirt fetcher - found %d source networks", len(networkIDSet))
// Create source network references for the networks used by VMs
var sourceNetworks []ref.Ref
for networkID := range networkIDSet {
if networkItem, exists := networkIDToNetwork[networkID]; exists {
sourceNetwork := ref.Ref{
ID: networkID,
}
if name, ok := networkItem["name"].(string); ok {
sourceNetwork.Name = name
}
sourceNetworks = append(sourceNetworks, sourceNetwork)
}
}
return sourceNetworks, nil
}
// FetchTargetNetworks is not supported for oVirt as target - only OpenShift is supported as target
func (f *OvirtNetworkFetcher) FetchTargetNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationNetwork, error) {
klog.V(4).Infof("oVirt provider does not support target network fetching - only OpenShift is supported as target")
return nil, fmt.Errorf("oVirt provider does not support target network fetching - only OpenShift is supported as migration target")
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/fetchers/vsphere/fetcher.go | Go | package vsphere
import "context"
import (
"fmt"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/query"
)
// VSphereNetworkFetcher implements network fetching for VSphere providers
type VSphereNetworkFetcher struct{}
// NewVSphereNetworkFetcher creates a new VSphere network fetcher
func NewVSphereNetworkFetcher() *VSphereNetworkFetcher {
return &VSphereNetworkFetcher{}
}
// FetchSourceNetworks extracts network references from VSphere VMs
func (f *VSphereNetworkFetcher) FetchSourceNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error) {
klog.V(4).Infof("VSphere fetcher - extracting source networks for provider: %s", providerName)
// Get the provider object
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get source provider: %v", err)
}
// Fetch networks inventory first to create ID-to-network mapping
networksInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "networks?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch networks inventory: %v", err)
}
networksArray, ok := networksInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for networks inventory")
}
// Create ID-to-network mapping
networkIDToNetwork := make(map[string]map[string]interface{})
for _, item := range networksArray {
if network, ok := item.(map[string]interface{}); ok {
if networkID, ok := network["id"].(string); ok {
networkIDToNetwork[networkID] = network
}
}
}
klog.V(4).Infof("Available network mappings:")
for id, networkItem := range networkIDToNetwork {
if name, ok := networkItem["name"].(string); ok {
klog.V(4).Infof(" %s -> %s", id, name)
}
}
// Fetch VMs inventory to get network references from VMs
vmsInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "vms?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch VMs inventory: %v", err)
}
vmsArray, ok := vmsInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for VMs inventory")
}
// Extract network IDs used by the plan VMs
networkIDSet := make(map[string]bool)
planVMSet := make(map[string]bool)
for _, vmName := range planVMNames {
planVMSet[vmName] = true
}
for _, item := range vmsArray {
vm, ok := item.(map[string]interface{})
if !ok {
continue
}
vmName, ok := vm["name"].(string)
if !ok || !planVMSet[vmName] {
continue
}
klog.V(4).Infof("Processing VM: %s", vmName)
// Extract network IDs from VM networks (VSphere VMs have direct networks array)
networks, err := query.GetValueByPathString(vm, "networks")
if err == nil && networks != nil {
if networksArray, ok := networks.([]interface{}); ok {
klog.V(4).Infof("VM %s has %d networks", vmName, len(networksArray))
for _, networkItem := range networksArray {
if networkMap, ok := networkItem.(map[string]interface{}); ok {
if networkID, ok := networkMap["id"].(string); ok {
klog.V(4).Infof("Found network ID: %s", networkID)
networkIDSet[networkID] = true
}
}
}
}
} else {
klog.V(4).Infof("VM %s has no networks or failed to extract: err=%v", vmName, err)
}
}
klog.V(4).Infof("Final networkIDSet: %v", networkIDSet)
// If no networks found from VMs, return empty list
if len(networkIDSet) == 0 {
klog.V(4).Infof("No networks found from VMs")
return []ref.Ref{}, nil
}
// Build source networks list using the collected IDs
var sourceNetworks []ref.Ref
for networkID := range networkIDSet {
if networkItem, exists := networkIDToNetwork[networkID]; exists {
sourceNetwork := ref.Ref{
ID: networkID,
}
if name, ok := networkItem["name"].(string); ok {
sourceNetwork.Name = name
}
sourceNetworks = append(sourceNetworks, sourceNetwork)
}
}
klog.V(4).Infof("VSphere fetcher - found %d source networks", len(sourceNetworks))
return sourceNetworks, nil
}
// FetchTargetNetworks is not supported for VSphere as target - only OpenShift is supported as target
func (f *VSphereNetworkFetcher) FetchTargetNetworks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationNetwork, error) {
klog.V(4).Infof("VSphere provider does not support target network fetching - only OpenShift is supported as target")
return nil, fmt.Errorf("VSphere provider does not support target network fetching - only OpenShift is supported as migration target")
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/mapper/ec2/mapper.go | Go | package ec2
import (
"strings"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper"
)
// EC2NetworkMapper implements network mapping for EC2 providers
type EC2NetworkMapper struct{}
// NewEC2NetworkMapper creates a new EC2 network mapper
func NewEC2NetworkMapper() mapper.NetworkMapper {
return &EC2NetworkMapper{}
}
// CreateNetworkPairs creates network mapping pairs for EC2 -> OpenShift migrations
// Mapping strategy:
// - First network (typically public subnet or VPC) -> pod networking (default)
// - All other networks -> ignored
func (m *EC2NetworkMapper) CreateNetworkPairs(sourceNetworks []ref.Ref, targetNetworks []forkliftv1beta1.DestinationNetwork, opts mapper.NetworkMappingOptions) ([]forkliftv1beta1.NetworkPair, error) {
var networkPairs []forkliftv1beta1.NetworkPair
klog.V(4).Infof("DEBUG: EC2 network mapper - Creating network pairs - %d source networks", len(sourceNetworks))
if len(sourceNetworks) == 0 {
klog.V(4).Infof("DEBUG: No source networks to map")
return networkPairs, nil
}
// Determine the default destination network
var defaultDestination forkliftv1beta1.DestinationNetwork
if opts.DefaultTargetNetwork != "" {
// User specified a default target network (trim whitespace for better UX)
defaultDestination = parseDefaultNetwork(strings.TrimSpace(opts.DefaultTargetNetwork), opts.Namespace)
klog.V(4).Infof("DEBUG: Using user-defined default target network: %s/%s (%s)",
defaultDestination.Namespace, defaultDestination.Name, defaultDestination.Type)
} else {
// Default to pod networking for EC2 migrations
defaultDestination = forkliftv1beta1.DestinationNetwork{Type: "pod"}
klog.V(4).Infof("DEBUG: Using default pod networking for EC2 migration")
}
// Map the first source network to the default target
// Map all other source networks to "ignored"
for i, sourceNetwork := range sourceNetworks {
var destination forkliftv1beta1.DestinationNetwork
if i == 0 {
// Map first source network to default target network
destination = defaultDestination
klog.V(4).Infof("DEBUG: Mapping first EC2 network %s to target %s/%s (%s)",
sourceNetwork.ID, destination.Namespace, destination.Name, destination.Type)
} else {
// Set all other source networks to "ignored"
destination = forkliftv1beta1.DestinationNetwork{Type: "ignored"}
klog.V(4).Infof("DEBUG: Setting EC2 network %s to ignored", sourceNetwork.ID)
}
networkPairs = append(networkPairs, forkliftv1beta1.NetworkPair{
Source: sourceNetwork,
Destination: destination,
})
}
klog.V(4).Infof("DEBUG: EC2 network mapper - Created %d network pairs", len(networkPairs))
return networkPairs, nil
}
// parseDefaultNetwork parses the default network string into a DestinationNetwork
func parseDefaultNetwork(defaultNetwork, namespace string) forkliftv1beta1.DestinationNetwork {
// Handle special cases
if defaultNetwork == "default" || defaultNetwork == "" {
return forkliftv1beta1.DestinationNetwork{Type: "pod"}
}
// Parse namespace/name format
var targetNamespace, targetName string
if ns, name, found := strings.Cut(defaultNetwork, "/"); found {
if ns == "" {
// Format: /name (use provided namespace)
targetNamespace = namespace
} else {
targetNamespace = ns
}
targetName = name
} else {
// Just a name, use provided namespace
targetNamespace = namespace
targetName = defaultNetwork
}
// If name is empty after parsing, fall back to pod networking
if targetName == "" {
return forkliftv1beta1.DestinationNetwork{Type: "pod"}
}
return forkliftv1beta1.DestinationNetwork{
Type: "multus",
Namespace: targetNamespace,
Name: targetName,
}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/mapper/hyperv/mapper.go | Go | package hyperv
import (
"strings"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper"
)
// HyperVNetworkMapper implements network mapping for HyperV providers
type HyperVNetworkMapper struct{}
// NewHyperVNetworkMapper creates a new HyperV network mapper
func NewHyperVNetworkMapper() mapper.NetworkMapper {
return &HyperVNetworkMapper{}
}
// CreateNetworkPairs creates network mapping pairs using generic logic (no same-name matching)
func (m *HyperVNetworkMapper) CreateNetworkPairs(sourceNetworks []ref.Ref, targetNetworks []forkliftv1beta1.DestinationNetwork, opts mapper.NetworkMappingOptions) ([]forkliftv1beta1.NetworkPair, error) {
var networkPairs []forkliftv1beta1.NetworkPair
klog.V(4).Infof("DEBUG: HyperV network mapper - Creating network pairs - %d source networks, %d target networks", len(sourceNetworks), len(targetNetworks))
if len(sourceNetworks) == 0 {
klog.V(4).Infof("DEBUG: No source networks to map")
return networkPairs, nil
}
// Use generic default behavior (first -> default, others -> ignored)
defaultDestination := findDefaultTargetNetwork(targetNetworks, opts)
klog.V(4).Infof("DEBUG: Selected default target network: %s/%s (%s)",
defaultDestination.Namespace, defaultDestination.Name, defaultDestination.Type)
// Map the first source network to the default target network
// Set all other source networks to target "ignored"
for i, sourceNetwork := range sourceNetworks {
var destination forkliftv1beta1.DestinationNetwork
if i == 0 {
// Map first source network to default target network
destination = defaultDestination
klog.V(4).Infof("DEBUG: Mapping first source network %s to default target %s/%s (%s)",
sourceNetwork.Name, destination.Namespace, destination.Name, destination.Type)
} else {
// Set all other source networks to "ignored"
destination = forkliftv1beta1.DestinationNetwork{Type: "ignored"}
klog.V(4).Infof("DEBUG: Setting source network %s to ignored", sourceNetwork.Name)
}
networkPairs = append(networkPairs, forkliftv1beta1.NetworkPair{
Source: sourceNetwork,
Destination: destination,
})
}
return networkPairs, nil
}
// findDefaultTargetNetwork finds the default target network using the original priority logic
func findDefaultTargetNetwork(targetNetworks []forkliftv1beta1.DestinationNetwork, opts mapper.NetworkMappingOptions) forkliftv1beta1.DestinationNetwork {
// Priority 1: If user explicitly specified a default target network, use it
if opts.DefaultTargetNetwork != "" {
defaultDestination := parseDefaultNetwork(opts.DefaultTargetNetwork, opts.Namespace)
klog.V(4).Infof("DEBUG: Using user-defined default target network: %s/%s (%s)",
defaultDestination.Namespace, defaultDestination.Name, defaultDestination.Type)
return defaultDestination
}
// Priority 2: Find the first available multus network
for _, targetNetwork := range targetNetworks {
if targetNetwork.Type == "multus" {
klog.V(4).Infof("DEBUG: Using first available multus network as default: %s/%s",
targetNetwork.Namespace, targetNetwork.Name)
return targetNetwork
}
}
// Priority 3: Fall back to pod networking if no multus networks available
klog.V(4).Infof("DEBUG: No user-defined or multus networks available, falling back to pod networking")
return forkliftv1beta1.DestinationNetwork{Type: "pod"}
}
// parseDefaultNetwork parses a default network specification
func parseDefaultNetwork(defaultTargetNetwork, namespace string) forkliftv1beta1.DestinationNetwork {
if defaultTargetNetwork == "default" {
return forkliftv1beta1.DestinationNetwork{Type: "pod"}
}
if defaultTargetNetwork == "ignored" {
return forkliftv1beta1.DestinationNetwork{Type: "ignored"}
}
// Handle "namespace/name" format for multus networks
if parts := strings.Split(defaultTargetNetwork, "/"); len(parts) == 2 && parts[1] != "" {
destNetwork := forkliftv1beta1.DestinationNetwork{
Type: "multus",
Name: parts[1],
}
// Always set namespace, use plan namespace if empty
if parts[0] != "" {
destNetwork.Namespace = parts[0]
} else {
destNetwork.Namespace = namespace
}
return destNetwork
}
// Just a name, use the plan namespace
destNetwork := forkliftv1beta1.DestinationNetwork{
Type: "multus",
Name: defaultTargetNetwork,
Namespace: namespace,
}
return destNetwork
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/mapper/interfaces.go | Go | package mapper
import (
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
)
// NetworkMappingOptions contains options for network mapping
type NetworkMappingOptions struct {
DefaultTargetNetwork string
Namespace string
SourceProviderType string
TargetProviderType string
}
// NetworkMapper defines the interface for network mapping operations
type NetworkMapper interface {
CreateNetworkPairs(sourceNetworks []ref.Ref, targetNetworks []forkliftv1beta1.DestinationNetwork, opts NetworkMappingOptions) ([]forkliftv1beta1.NetworkPair, error)
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/mapper/openshift/mapper.go | Go | package openshift
import (
"strings"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper"
)
// OpenShiftNetworkMapper implements network mapping for OpenShift providers
type OpenShiftNetworkMapper struct{}
// NewOpenShiftNetworkMapper creates a new OpenShift network mapper
func NewOpenShiftNetworkMapper() mapper.NetworkMapper {
return &OpenShiftNetworkMapper{}
}
// CreateNetworkPairs creates network mapping pairs with OpenShift-specific logic
func (m *OpenShiftNetworkMapper) CreateNetworkPairs(sourceNetworks []ref.Ref, targetNetworks []forkliftv1beta1.DestinationNetwork, opts mapper.NetworkMappingOptions) ([]forkliftv1beta1.NetworkPair, error) {
var networkPairs []forkliftv1beta1.NetworkPair
klog.V(4).Infof("DEBUG: OpenShift network mapper - Creating network pairs - %d source networks, %d target networks", len(sourceNetworks), len(targetNetworks))
klog.V(4).Infof("DEBUG: Source provider type: %s, Target provider type: %s", opts.SourceProviderType, opts.TargetProviderType)
if len(sourceNetworks) == 0 {
klog.V(4).Infof("DEBUG: No source networks to map")
return networkPairs, nil
}
// For OCP-to-OCP: Try same-name matching (all-or-nothing, respecting uniqueness constraints)
if opts.TargetProviderType == "openshift" {
klog.V(4).Infof("DEBUG: OCP-to-OCP migration detected, attempting same-name matching")
if canMatchAllNetworksByName(sourceNetworks, targetNetworks) {
klog.V(4).Infof("DEBUG: All networks can be matched by name, using same-name mapping")
return createSameNameNetworkPairs(sourceNetworks, targetNetworks)
}
klog.V(4).Infof("DEBUG: Not all networks can be matched by name, falling back to default behavior")
}
// Fall back to default behavior
return createDefaultNetworkPairs(sourceNetworks, targetNetworks, opts)
}
// canMatchAllNetworksByName checks if every source network can be uniquely matched to a target network by name
func canMatchAllNetworksByName(sourceNetworks []ref.Ref, targetNetworks []forkliftv1beta1.DestinationNetwork) bool {
// Create a map of target network names for quick lookup (only multus networks can be matched by name)
targetNames := make(map[string]bool)
for _, target := range targetNetworks {
if target.Type == "multus" && target.Name != "" {
targetNames[target.Name] = true
}
}
klog.V(4).Infof("DEBUG: Available target networks for name matching: %v", getTargetNetworkNames(targetNetworks))
// Check if every source has a matching target by name
// Also ensure we don't have more sources than available targets (uniqueness constraint)
if len(sourceNetworks) > len(targetNames) {
klog.V(4).Infof("DEBUG: More source networks (%d) than available target networks (%d) for name matching", len(sourceNetworks), len(targetNames))
return false
}
for _, source := range sourceNetworks {
if !targetNames[source.Name] {
klog.V(4).Infof("DEBUG: Source network '%s' has no matching target by name", source.Name)
return false
}
}
klog.V(4).Infof("DEBUG: All source networks can be matched by name")
return true
}
// createSameNameNetworkPairs creates network pairs using same-name matching
func createSameNameNetworkPairs(sourceNetworks []ref.Ref, targetNetworks []forkliftv1beta1.DestinationNetwork) ([]forkliftv1beta1.NetworkPair, error) {
var networkPairs []forkliftv1beta1.NetworkPair
// Create a map of target networks by name for quick lookup (only multus networks)
targetByName := make(map[string]forkliftv1beta1.DestinationNetwork)
for _, target := range targetNetworks {
if target.Type == "multus" && target.Name != "" {
targetByName[target.Name] = target
}
}
// Create pairs using same-name matching
for _, sourceNetwork := range sourceNetworks {
if targetNetwork, exists := targetByName[sourceNetwork.Name]; exists {
networkPairs = append(networkPairs, forkliftv1beta1.NetworkPair{
Source: sourceNetwork,
Destination: targetNetwork,
})
klog.V(4).Infof("DEBUG: Mapped source network %s -> %s/%s (same name)",
sourceNetwork.Name, targetNetwork.Namespace, targetNetwork.Name)
}
}
klog.V(4).Infof("DEBUG: Created %d same-name network pairs", len(networkPairs))
return networkPairs, nil
}
// createDefaultNetworkPairs creates network pairs using the default behavior (first -> default, others -> ignored)
func createDefaultNetworkPairs(sourceNetworks []ref.Ref, targetNetworks []forkliftv1beta1.DestinationNetwork, opts mapper.NetworkMappingOptions) ([]forkliftv1beta1.NetworkPair, error) {
var networkPairs []forkliftv1beta1.NetworkPair
// Find the default target network using the original logic
defaultDestination := findDefaultTargetNetwork(targetNetworks, opts)
klog.V(4).Infof("DEBUG: Selected default target network: %s/%s (%s)",
defaultDestination.Namespace, defaultDestination.Name, defaultDestination.Type)
// Map the first source network to the default target network
// Set all other source networks to target "ignored"
for i, sourceNetwork := range sourceNetworks {
var destination forkliftv1beta1.DestinationNetwork
if i == 0 {
// Map first source network to default target network
destination = defaultDestination
klog.V(4).Infof("DEBUG: Mapping first source network %s to default target %s/%s (%s)",
sourceNetwork.Name, destination.Namespace, destination.Name, destination.Type)
} else {
// Set all other source networks to "ignored"
destination = forkliftv1beta1.DestinationNetwork{Type: "ignored"}
klog.V(4).Infof("DEBUG: Setting source network %s to ignored", sourceNetwork.Name)
}
networkPairs = append(networkPairs, forkliftv1beta1.NetworkPair{
Source: sourceNetwork,
Destination: destination,
})
}
klog.V(4).Infof("DEBUG: Created %d default network pairs", len(networkPairs))
return networkPairs, nil
}
// findDefaultTargetNetwork finds the default target network using the original priority logic
func findDefaultTargetNetwork(targetNetworks []forkliftv1beta1.DestinationNetwork, opts mapper.NetworkMappingOptions) forkliftv1beta1.DestinationNetwork {
// Priority 1: If user explicitly specified a default target network, use it
if opts.DefaultTargetNetwork != "" {
defaultDestination := parseDefaultNetwork(opts.DefaultTargetNetwork, opts.Namespace)
klog.V(4).Infof("DEBUG: Using user-defined default target network: %s/%s (%s)",
defaultDestination.Namespace, defaultDestination.Name, defaultDestination.Type)
return defaultDestination
}
// Priority 2: Find the first available multus network
for _, targetNetwork := range targetNetworks {
if targetNetwork.Type == "multus" {
klog.V(4).Infof("DEBUG: Using first available multus network as default: %s/%s",
targetNetwork.Namespace, targetNetwork.Name)
return targetNetwork
}
}
// Priority 3: Fall back to pod networking if no multus networks available
klog.V(4).Infof("DEBUG: No user-defined or multus networks available, falling back to pod networking")
return forkliftv1beta1.DestinationNetwork{Type: "pod"}
}
// parseDefaultNetwork parses a default network specification (from original mapper)
func parseDefaultNetwork(defaultTargetNetwork, namespace string) forkliftv1beta1.DestinationNetwork {
if defaultTargetNetwork == "default" {
return forkliftv1beta1.DestinationNetwork{Type: "pod"}
}
if defaultTargetNetwork == "ignored" {
return forkliftv1beta1.DestinationNetwork{Type: "ignored"}
}
// Handle "namespace/name" format for multus networks
if parts := strings.Split(defaultTargetNetwork, "/"); len(parts) == 2 {
destNetwork := forkliftv1beta1.DestinationNetwork{
Type: "multus",
Name: parts[1],
}
// Always set namespace, use plan namespace if empty
if parts[0] != "" {
destNetwork.Namespace = parts[0]
} else {
destNetwork.Namespace = namespace
}
return destNetwork
}
// Just a name, use the plan namespace
destNetwork := forkliftv1beta1.DestinationNetwork{
Type: "multus",
Name: defaultTargetNetwork,
Namespace: namespace,
}
return destNetwork
}
// getTargetNetworkNames returns a slice of target network names for logging
func getTargetNetworkNames(targetNetworks []forkliftv1beta1.DestinationNetwork) []string {
var names []string
for _, target := range targetNetworks {
if target.Type == "multus" && target.Name != "" {
names = append(names, target.Name)
}
}
return names
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/mapper/openshift/mapper_test.go | Go | package openshift
import (
"testing"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
networkmapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper"
)
func TestOpenShiftNetworkMapper_CreateNetworkPairs_SameNameMatching(t *testing.T) {
tests := []struct {
name string
sourceNetworks []ref.Ref
targetNetworks []forkliftv1beta1.DestinationNetwork
sourceProviderType string
targetProviderType string
expectedPairs int
expectedSameName bool
expectedTargetNames []string
expectedTargetTypes []string
}{
{
name: "OCP-to-OCP: All sources match by name",
sourceNetworks: []ref.Ref{
{Name: "management-net", ID: "net-1"},
{Name: "storage-net", ID: "net-2"},
},
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "management-net", Namespace: "default"},
{Type: "multus", Name: "storage-net", Namespace: "default"},
{Type: "multus", Name: "backup-net", Namespace: "default"},
},
sourceProviderType: "openshift",
targetProviderType: "openshift",
expectedPairs: 2,
expectedSameName: true,
expectedTargetNames: []string{"management-net", "storage-net"},
expectedTargetTypes: []string{"multus", "multus"},
},
{
name: "OCP-to-OCP: Some sources don't match - fallback to default",
sourceNetworks: []ref.Ref{
{Name: "management-net", ID: "net-1"},
{Name: "unknown-net", ID: "net-2"},
},
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "management-net", Namespace: "default"},
{Type: "multus", Name: "storage-net", Namespace: "default"},
},
sourceProviderType: "openshift",
targetProviderType: "openshift",
expectedPairs: 2,
expectedSameName: false,
expectedTargetNames: []string{"management-net", ""},
expectedTargetTypes: []string{"multus", "ignored"},
},
{
name: "OCP-to-OCP: More sources than targets - fallback",
sourceNetworks: []ref.Ref{
{Name: "net1", ID: "net-1"},
{Name: "net2", ID: "net-2"},
{Name: "net3", ID: "net-3"},
},
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "net1", Namespace: "default"},
{Type: "multus", Name: "net2", Namespace: "default"},
},
sourceProviderType: "openshift",
targetProviderType: "openshift",
expectedPairs: 3,
expectedSameName: false,
expectedTargetNames: []string{"net1", "", ""},
expectedTargetTypes: []string{"multus", "ignored", "ignored"},
},
{
name: "OCP-to-non-OCP: Use default behavior",
sourceNetworks: []ref.Ref{
{Name: "VM Network", ID: "net-1"},
{Name: "Management Network", ID: "net-2"},
},
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "multus-net", Namespace: "default"},
},
sourceProviderType: "openshift",
targetProviderType: "vsphere",
expectedPairs: 2,
expectedSameName: false,
expectedTargetNames: []string{"multus-net", ""},
expectedTargetTypes: []string{"multus", "ignored"},
},
{
name: "OCP-to-OCP: No multus targets - fallback to pod",
sourceNetworks: []ref.Ref{
{Name: "management-net", ID: "net-1"},
},
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "pod"},
},
sourceProviderType: "openshift",
targetProviderType: "openshift",
expectedPairs: 1,
expectedSameName: false,
expectedTargetNames: []string{""},
expectedTargetTypes: []string{"pod"},
},
{
name: "OCP-to-OCP: Empty sources",
sourceNetworks: []ref.Ref{},
targetNetworks: []forkliftv1beta1.DestinationNetwork{{Type: "multus", Name: "default"}},
sourceProviderType: "openshift",
targetProviderType: "openshift",
expectedPairs: 0,
expectedSameName: false,
expectedTargetNames: []string{},
expectedTargetTypes: []string{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
networkMapper := NewOpenShiftNetworkMapper()
opts := networkmapper.NetworkMappingOptions{
SourceProviderType: tt.sourceProviderType,
TargetProviderType: tt.targetProviderType,
Namespace: "default",
}
pairs, err := networkMapper.CreateNetworkPairs(tt.sourceNetworks, tt.targetNetworks, opts)
if err != nil {
t.Fatalf("CreateNetworkPairs() error = %v", err)
}
if len(pairs) != tt.expectedPairs {
t.Errorf("CreateNetworkPairs() got %d pairs, want %d", len(pairs), tt.expectedPairs)
}
// Verify target network names and types
for i, pair := range pairs {
if i < len(tt.expectedTargetNames) {
if pair.Destination.Name != tt.expectedTargetNames[i] {
t.Errorf("Pair %d: got target name %s, want %s", i, pair.Destination.Name, tt.expectedTargetNames[i])
}
}
if i < len(tt.expectedTargetTypes) {
if pair.Destination.Type != tt.expectedTargetTypes[i] {
t.Errorf("Pair %d: got target type %s, want %s", i, pair.Destination.Type, tt.expectedTargetTypes[i])
}
}
}
// Verify source names are preserved
for i, pair := range pairs {
if i < len(tt.sourceNetworks) {
if pair.Source.Name != tt.sourceNetworks[i].Name {
t.Errorf("Pair %d: source name %s != expected %s", i, pair.Source.Name, tt.sourceNetworks[i].Name)
}
}
}
})
}
}
func TestOpenShiftNetworkMapper_CreateNetworkPairs_DefaultNetworkSelection(t *testing.T) {
tests := []struct {
name string
targetNetworks []forkliftv1beta1.DestinationNetwork
defaultTargetNetwork string
expectedType string
expectedName string
expectedNamespace string
}{
{
name: "User-defined default network",
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "auto-selected", Namespace: "default"},
},
defaultTargetNetwork: "custom-namespace/custom-net",
expectedType: "multus",
expectedName: "custom-net",
expectedNamespace: "custom-namespace",
},
{
name: "User-defined pod networking",
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "some-net", Namespace: "default"},
},
defaultTargetNetwork: "default",
expectedType: "pod",
expectedName: "",
expectedNamespace: "",
},
{
name: "Auto-select first multus network",
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "first-multus", Namespace: "default"},
{Type: "multus", Name: "second-multus", Namespace: "default"},
},
defaultTargetNetwork: "",
expectedType: "multus",
expectedName: "first-multus",
expectedNamespace: "default",
},
{
name: "Fallback to pod networking",
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "pod"},
},
defaultTargetNetwork: "",
expectedType: "pod",
expectedName: "",
expectedNamespace: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
networkMapper := NewOpenShiftNetworkMapper()
sourceNetworks := []ref.Ref{{Name: "test-source", ID: "net-1"}}
opts := networkmapper.NetworkMappingOptions{
DefaultTargetNetwork: tt.defaultTargetNetwork,
Namespace: "default",
SourceProviderType: "openshift",
TargetProviderType: "vsphere", // Force default behavior
}
pairs, err := networkMapper.CreateNetworkPairs(sourceNetworks, tt.targetNetworks, opts)
if err != nil {
t.Fatalf("CreateNetworkPairs() error = %v", err)
}
if len(pairs) > 0 {
dest := pairs[0].Destination
if dest.Type != tt.expectedType {
t.Errorf("Default network type: got %s, want %s", dest.Type, tt.expectedType)
}
if dest.Name != tt.expectedName {
t.Errorf("Default network name: got %s, want %s", dest.Name, tt.expectedName)
}
if dest.Namespace != tt.expectedNamespace {
t.Errorf("Default network namespace: got %s, want %s", dest.Namespace, tt.expectedNamespace)
}
}
})
}
}
func TestCanMatchAllNetworksByName(t *testing.T) {
tests := []struct {
name string
sourceNetworks []ref.Ref
targetNetworks []forkliftv1beta1.DestinationNetwork
expected bool
}{
{
name: "All sources match with unique targets",
sourceNetworks: []ref.Ref{
{Name: "net1"},
{Name: "net2"},
},
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "net1"},
{Type: "multus", Name: "net2"},
{Type: "multus", Name: "net3"},
},
expected: true,
},
{
name: "Some sources don't match",
sourceNetworks: []ref.Ref{
{Name: "net1"},
{Name: "unknown-net"},
},
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "net1"},
{Type: "multus", Name: "net2"},
},
expected: false,
},
{
name: "More sources than targets",
sourceNetworks: []ref.Ref{
{Name: "net1"},
{Name: "net2"},
{Name: "net3"},
},
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "net1"},
{Type: "multus", Name: "net2"},
},
expected: false,
},
{
name: "Non-multus targets ignored",
sourceNetworks: []ref.Ref{
{Name: "net1"},
},
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "pod"},
{Type: "multus", Name: "net1"},
},
expected: true,
},
{
name: "Empty sources",
sourceNetworks: []ref.Ref{},
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "any-net"},
},
expected: true,
},
{
name: "Empty targets",
sourceNetworks: []ref.Ref{
{Name: "some-net"},
},
targetNetworks: []forkliftv1beta1.DestinationNetwork{},
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := canMatchAllNetworksByName(tt.sourceNetworks, tt.targetNetworks)
if result != tt.expected {
t.Errorf("canMatchAllNetworksByName() = %v, want %v", result, tt.expected)
}
})
}
}
func TestCreateSameNameNetworkPairs(t *testing.T) {
sourceNetworks := []ref.Ref{
{Name: "management-net", ID: "net-1"},
{Name: "storage-net", ID: "net-2"},
}
targetNetworks := []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "management-net", Namespace: "default"},
{Type: "multus", Name: "storage-net", Namespace: "default"},
{Type: "multus", Name: "extra-net", Namespace: "default"},
{Type: "pod"}, // Should be ignored
}
pairs, err := createSameNameNetworkPairs(sourceNetworks, targetNetworks)
if err != nil {
t.Fatalf("createSameNameNetworkPairs() error = %v", err)
}
if len(pairs) != 2 {
t.Errorf("Expected 2 pairs, got %d", len(pairs))
}
// Verify mappings
expectedMappings := map[string]string{
"management-net": "management-net",
"storage-net": "storage-net",
}
for _, pair := range pairs {
expectedTarget, exists := expectedMappings[pair.Source.Name]
if !exists {
t.Errorf("Unexpected source network: %s", pair.Source.Name)
continue
}
if pair.Destination.Name != expectedTarget {
t.Errorf("Source %s mapped to %s, expected %s",
pair.Source.Name, pair.Destination.Name, expectedTarget)
}
if pair.Destination.Type != "multus" {
t.Errorf("Expected multus type for network %s, got %s",
pair.Source.Name, pair.Destination.Type)
}
}
}
func TestParseDefaultNetwork(t *testing.T) {
tests := []struct {
name string
defaultTargetNetwork string
namespace string
expectedType string
expectedName string
expectedNamespace string
}{
{
name: "Default pod networking",
defaultTargetNetwork: "default",
namespace: "test-ns",
expectedType: "pod",
expectedName: "",
expectedNamespace: "",
},
{
name: "Ignored network",
defaultTargetNetwork: "ignored",
namespace: "test-ns",
expectedType: "ignored",
expectedName: "",
expectedNamespace: "",
},
{
name: "Namespace/name format",
defaultTargetNetwork: "custom-ns/custom-net",
namespace: "test-ns",
expectedType: "multus",
expectedName: "custom-net",
expectedNamespace: "custom-ns",
},
{
name: "Empty namespace in format",
defaultTargetNetwork: "/custom-net",
namespace: "test-ns",
expectedType: "multus",
expectedName: "custom-net",
expectedNamespace: "test-ns",
},
{
name: "Just name",
defaultTargetNetwork: "custom-net",
namespace: "test-ns",
expectedType: "multus",
expectedName: "custom-net",
expectedNamespace: "test-ns",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := parseDefaultNetwork(tt.defaultTargetNetwork, tt.namespace)
if result.Type != tt.expectedType {
t.Errorf("Type: got %s, want %s", result.Type, tt.expectedType)
}
if result.Name != tt.expectedName {
t.Errorf("Name: got %s, want %s", result.Name, tt.expectedName)
}
if result.Namespace != tt.expectedNamespace {
t.Errorf("Namespace: got %s, want %s", result.Namespace, tt.expectedNamespace)
}
})
}
}
func TestOpenShiftNetworkMapper_ImplementsInterface(t *testing.T) {
var _ networkmapper.NetworkMapper = &OpenShiftNetworkMapper{}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/mapper/openstack/mapper.go | Go | package openstack
import (
"strings"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper"
)
// OpenStackNetworkMapper implements network mapping for OpenStack providers
type OpenStackNetworkMapper struct{}
// NewOpenStackNetworkMapper creates a new OpenStack network mapper
func NewOpenStackNetworkMapper() mapper.NetworkMapper {
return &OpenStackNetworkMapper{}
}
// CreateNetworkPairs creates network mapping pairs using generic logic (no same-name matching)
func (m *OpenStackNetworkMapper) CreateNetworkPairs(sourceNetworks []ref.Ref, targetNetworks []forkliftv1beta1.DestinationNetwork, opts mapper.NetworkMappingOptions) ([]forkliftv1beta1.NetworkPair, error) {
var networkPairs []forkliftv1beta1.NetworkPair
klog.V(4).Infof("DEBUG: OpenStack network mapper - Creating network pairs - %d source networks, %d target networks", len(sourceNetworks), len(targetNetworks))
if len(sourceNetworks) == 0 {
klog.V(4).Infof("DEBUG: No source networks to map")
return networkPairs, nil
}
// Use generic default behavior (first -> default, others -> ignored)
defaultDestination := findDefaultTargetNetwork(targetNetworks, opts)
klog.V(4).Infof("DEBUG: Selected default target network: %s/%s (%s)",
defaultDestination.Namespace, defaultDestination.Name, defaultDestination.Type)
// Map the first source network to the default target network
// Set all other source networks to target "ignored"
for i, sourceNetwork := range sourceNetworks {
var destination forkliftv1beta1.DestinationNetwork
if i == 0 {
// Map first source network to default target network
destination = defaultDestination
klog.V(4).Infof("DEBUG: Mapping first source network %s to default target %s/%s (%s)",
sourceNetwork.Name, destination.Namespace, destination.Name, destination.Type)
} else {
// Set all other source networks to "ignored"
destination = forkliftv1beta1.DestinationNetwork{Type: "ignored"}
klog.V(4).Infof("DEBUG: Setting source network %s to ignored", sourceNetwork.Name)
}
networkPairs = append(networkPairs, forkliftv1beta1.NetworkPair{
Source: sourceNetwork,
Destination: destination,
})
}
return networkPairs, nil
}
// findDefaultTargetNetwork finds the default target network using the original priority logic
func findDefaultTargetNetwork(targetNetworks []forkliftv1beta1.DestinationNetwork, opts mapper.NetworkMappingOptions) forkliftv1beta1.DestinationNetwork {
// Priority 1: If user explicitly specified a default target network, use it
if opts.DefaultTargetNetwork != "" {
defaultDestination := parseDefaultNetwork(opts.DefaultTargetNetwork, opts.Namespace)
klog.V(4).Infof("DEBUG: Using user-defined default target network: %s/%s (%s)",
defaultDestination.Namespace, defaultDestination.Name, defaultDestination.Type)
return defaultDestination
}
// Priority 2: Find the first available multus network
for _, targetNetwork := range targetNetworks {
if targetNetwork.Type == "multus" {
klog.V(4).Infof("DEBUG: Using first available multus network as default: %s/%s",
targetNetwork.Namespace, targetNetwork.Name)
return targetNetwork
}
}
// Priority 3: Fall back to pod networking if no multus networks available
klog.V(4).Infof("DEBUG: No user-defined or multus networks available, falling back to pod networking")
return forkliftv1beta1.DestinationNetwork{Type: "pod"}
}
// parseDefaultNetwork parses a default network specification (from original mapper)
func parseDefaultNetwork(defaultTargetNetwork, namespace string) forkliftv1beta1.DestinationNetwork {
if defaultTargetNetwork == "default" {
return forkliftv1beta1.DestinationNetwork{Type: "pod"}
}
if defaultTargetNetwork == "ignored" {
return forkliftv1beta1.DestinationNetwork{Type: "ignored"}
}
// Handle "namespace/name" format for multus networks
if parts := strings.Split(defaultTargetNetwork, "/"); len(parts) == 2 {
destNetwork := forkliftv1beta1.DestinationNetwork{
Type: "multus",
Name: parts[1],
}
// Always set namespace, use plan namespace if empty
if parts[0] != "" {
destNetwork.Namespace = parts[0]
} else {
destNetwork.Namespace = namespace
}
return destNetwork
}
// Just a name, use the plan namespace
destNetwork := forkliftv1beta1.DestinationNetwork{
Type: "multus",
Name: defaultTargetNetwork,
Namespace: namespace,
}
return destNetwork
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/mapper/ova/mapper.go | Go | package ova
import (
"strings"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper"
)
// OVANetworkMapper implements network mapping for OVA providers
type OVANetworkMapper struct{}
// NewOVANetworkMapper creates a new OVA network mapper
func NewOVANetworkMapper() mapper.NetworkMapper {
return &OVANetworkMapper{}
}
// CreateNetworkPairs creates network mapping pairs using generic logic (no same-name matching)
func (m *OVANetworkMapper) CreateNetworkPairs(sourceNetworks []ref.Ref, targetNetworks []forkliftv1beta1.DestinationNetwork, opts mapper.NetworkMappingOptions) ([]forkliftv1beta1.NetworkPair, error) {
var networkPairs []forkliftv1beta1.NetworkPair
klog.V(4).Infof("DEBUG: OVA network mapper - Creating network pairs - %d source networks, %d target networks", len(sourceNetworks), len(targetNetworks))
if len(sourceNetworks) == 0 {
klog.V(4).Infof("DEBUG: No source networks to map")
return networkPairs, nil
}
// Use generic default behavior (first -> default, others -> ignored)
defaultDestination := findDefaultTargetNetwork(targetNetworks, opts)
klog.V(4).Infof("DEBUG: Selected default target network: %s/%s (%s)",
defaultDestination.Namespace, defaultDestination.Name, defaultDestination.Type)
// Map the first source network to the default target network
// Set all other source networks to target "ignored"
for i, sourceNetwork := range sourceNetworks {
var destination forkliftv1beta1.DestinationNetwork
if i == 0 {
// Map first source network to default target network
destination = defaultDestination
klog.V(4).Infof("DEBUG: Mapping first source network %s to default target %s/%s (%s)",
sourceNetwork.Name, destination.Namespace, destination.Name, destination.Type)
} else {
// Set all other source networks to "ignored"
destination = forkliftv1beta1.DestinationNetwork{Type: "ignored"}
klog.V(4).Infof("DEBUG: Setting source network %s to ignored", sourceNetwork.Name)
}
networkPairs = append(networkPairs, forkliftv1beta1.NetworkPair{
Source: sourceNetwork,
Destination: destination,
})
}
return networkPairs, nil
}
// findDefaultTargetNetwork finds the default target network using the original priority logic
func findDefaultTargetNetwork(targetNetworks []forkliftv1beta1.DestinationNetwork, opts mapper.NetworkMappingOptions) forkliftv1beta1.DestinationNetwork {
// Priority 1: If user explicitly specified a default target network, use it
if opts.DefaultTargetNetwork != "" {
defaultDestination := parseDefaultNetwork(opts.DefaultTargetNetwork, opts.Namespace)
klog.V(4).Infof("DEBUG: Using user-defined default target network: %s/%s (%s)",
defaultDestination.Namespace, defaultDestination.Name, defaultDestination.Type)
return defaultDestination
}
// Priority 2: Find the first available multus network
for _, targetNetwork := range targetNetworks {
if targetNetwork.Type == "multus" {
klog.V(4).Infof("DEBUG: Using first available multus network as default: %s/%s",
targetNetwork.Namespace, targetNetwork.Name)
return targetNetwork
}
}
// Priority 3: Fall back to pod networking if no multus networks available
klog.V(4).Infof("DEBUG: No user-defined or multus networks available, falling back to pod networking")
return forkliftv1beta1.DestinationNetwork{Type: "pod"}
}
// parseDefaultNetwork parses a default network specification (from original mapper)
func parseDefaultNetwork(defaultTargetNetwork, namespace string) forkliftv1beta1.DestinationNetwork {
if defaultTargetNetwork == "default" {
return forkliftv1beta1.DestinationNetwork{Type: "pod"}
}
if defaultTargetNetwork == "ignored" {
return forkliftv1beta1.DestinationNetwork{Type: "ignored"}
}
// Handle "namespace/name" format for multus networks
if parts := strings.Split(defaultTargetNetwork, "/"); len(parts) == 2 {
destNetwork := forkliftv1beta1.DestinationNetwork{
Type: "multus",
Name: parts[1],
}
// Always set namespace, use plan namespace if empty
if parts[0] != "" {
destNetwork.Namespace = parts[0]
} else {
destNetwork.Namespace = namespace
}
return destNetwork
}
// Just a name, use the plan namespace
destNetwork := forkliftv1beta1.DestinationNetwork{
Type: "multus",
Name: defaultTargetNetwork,
Namespace: namespace,
}
return destNetwork
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/mapper/ovirt/mapper.go | Go | package ovirt
import (
"strings"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper"
)
// OvirtNetworkMapper implements network mapping for oVirt providers
type OvirtNetworkMapper struct{}
// NewOvirtNetworkMapper creates a new oVirt network mapper
func NewOvirtNetworkMapper() mapper.NetworkMapper {
return &OvirtNetworkMapper{}
}
// CreateNetworkPairs creates network mapping pairs using generic logic (no same-name matching)
func (m *OvirtNetworkMapper) CreateNetworkPairs(sourceNetworks []ref.Ref, targetNetworks []forkliftv1beta1.DestinationNetwork, opts mapper.NetworkMappingOptions) ([]forkliftv1beta1.NetworkPair, error) {
var networkPairs []forkliftv1beta1.NetworkPair
klog.V(4).Infof("DEBUG: oVirt network mapper - Creating network pairs - %d source networks, %d target networks", len(sourceNetworks), len(targetNetworks))
if len(sourceNetworks) == 0 {
klog.V(4).Infof("DEBUG: No source networks to map")
return networkPairs, nil
}
// Use generic default behavior (first -> default, others -> ignored)
defaultDestination := findDefaultTargetNetwork(targetNetworks, opts)
klog.V(4).Infof("DEBUG: Selected default target network: %s/%s (%s)",
defaultDestination.Namespace, defaultDestination.Name, defaultDestination.Type)
// Map the first source network to the default target network
// Set all other source networks to target "ignored"
for i, sourceNetwork := range sourceNetworks {
var destination forkliftv1beta1.DestinationNetwork
if i == 0 {
// Map first source network to default target network
destination = defaultDestination
klog.V(4).Infof("DEBUG: Mapping first source network %s to default target %s/%s (%s)",
sourceNetwork.Name, destination.Namespace, destination.Name, destination.Type)
} else {
// Set all other source networks to "ignored"
destination = forkliftv1beta1.DestinationNetwork{Type: "ignored"}
klog.V(4).Infof("DEBUG: Setting source network %s to ignored", sourceNetwork.Name)
}
networkPairs = append(networkPairs, forkliftv1beta1.NetworkPair{
Source: sourceNetwork,
Destination: destination,
})
}
return networkPairs, nil
}
// findDefaultTargetNetwork finds the default target network using the original priority logic
func findDefaultTargetNetwork(targetNetworks []forkliftv1beta1.DestinationNetwork, opts mapper.NetworkMappingOptions) forkliftv1beta1.DestinationNetwork {
// Priority 1: If user explicitly specified a default target network, use it
if opts.DefaultTargetNetwork != "" {
defaultDestination := parseDefaultNetwork(opts.DefaultTargetNetwork, opts.Namespace)
klog.V(4).Infof("DEBUG: Using user-defined default target network: %s/%s (%s)",
defaultDestination.Namespace, defaultDestination.Name, defaultDestination.Type)
return defaultDestination
}
// Priority 2: Find the first available multus network
for _, targetNetwork := range targetNetworks {
if targetNetwork.Type == "multus" {
klog.V(4).Infof("DEBUG: Using first available multus network as default: %s/%s",
targetNetwork.Namespace, targetNetwork.Name)
return targetNetwork
}
}
// Priority 3: Fall back to pod networking if no multus networks available
klog.V(4).Infof("DEBUG: No user-defined or multus networks available, falling back to pod networking")
return forkliftv1beta1.DestinationNetwork{Type: "pod"}
}
// parseDefaultNetwork parses a default network specification (from original mapper)
func parseDefaultNetwork(defaultTargetNetwork, namespace string) forkliftv1beta1.DestinationNetwork {
if defaultTargetNetwork == "default" {
return forkliftv1beta1.DestinationNetwork{Type: "pod"}
}
if defaultTargetNetwork == "ignored" {
return forkliftv1beta1.DestinationNetwork{Type: "ignored"}
}
// Handle "namespace/name" format for multus networks
if parts := strings.Split(defaultTargetNetwork, "/"); len(parts) == 2 {
destNetwork := forkliftv1beta1.DestinationNetwork{
Type: "multus",
Name: parts[1],
}
// Always set namespace, use plan namespace if empty
if parts[0] != "" {
destNetwork.Namespace = parts[0]
} else {
destNetwork.Namespace = namespace
}
return destNetwork
}
// Just a name, use the plan namespace
destNetwork := forkliftv1beta1.DestinationNetwork{
Type: "multus",
Name: defaultTargetNetwork,
Namespace: namespace,
}
return destNetwork
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/mapper/vsphere/mapper.go | Go | package vsphere
import (
"strings"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper"
)
// VSphereNetworkMapper implements network mapping for vSphere providers
type VSphereNetworkMapper struct{}
// NewVSphereNetworkMapper creates a new vSphere network mapper
func NewVSphereNetworkMapper() mapper.NetworkMapper {
return &VSphereNetworkMapper{}
}
// CreateNetworkPairs creates network mapping pairs using generic logic (no same-name matching)
func (m *VSphereNetworkMapper) CreateNetworkPairs(sourceNetworks []ref.Ref, targetNetworks []forkliftv1beta1.DestinationNetwork, opts mapper.NetworkMappingOptions) ([]forkliftv1beta1.NetworkPair, error) {
var networkPairs []forkliftv1beta1.NetworkPair
klog.V(4).Infof("DEBUG: vSphere network mapper - Creating network pairs - %d source networks, %d target networks", len(sourceNetworks), len(targetNetworks))
if len(sourceNetworks) == 0 {
klog.V(4).Infof("DEBUG: No source networks to map")
return networkPairs, nil
}
// Use generic default behavior (first -> default, others -> ignored)
defaultDestination := findDefaultTargetNetwork(targetNetworks, opts)
klog.V(4).Infof("DEBUG: Selected default target network: %s/%s (%s)",
defaultDestination.Namespace, defaultDestination.Name, defaultDestination.Type)
// Map the first source network to the default target network
// Set all other source networks to target "ignored"
for i, sourceNetwork := range sourceNetworks {
var destination forkliftv1beta1.DestinationNetwork
if i == 0 {
// Map first source network to default target network
destination = defaultDestination
klog.V(4).Infof("DEBUG: Mapping first source network %s to default target %s/%s (%s)",
sourceNetwork.Name, destination.Namespace, destination.Name, destination.Type)
} else {
// Set all other source networks to "ignored"
destination = forkliftv1beta1.DestinationNetwork{Type: "ignored"}
klog.V(4).Infof("DEBUG: Setting source network %s to ignored", sourceNetwork.Name)
}
networkPairs = append(networkPairs, forkliftv1beta1.NetworkPair{
Source: sourceNetwork,
Destination: destination,
})
}
return networkPairs, nil
}
// findDefaultTargetNetwork finds the default target network using the original priority logic
func findDefaultTargetNetwork(targetNetworks []forkliftv1beta1.DestinationNetwork, opts mapper.NetworkMappingOptions) forkliftv1beta1.DestinationNetwork {
// Priority 1: If user explicitly specified a default target network, use it
if opts.DefaultTargetNetwork != "" {
defaultDestination := parseDefaultNetwork(opts.DefaultTargetNetwork, opts.Namespace)
klog.V(4).Infof("DEBUG: Using user-defined default target network: %s/%s (%s)",
defaultDestination.Namespace, defaultDestination.Name, defaultDestination.Type)
return defaultDestination
}
// Priority 2: Find the first available multus network
for _, targetNetwork := range targetNetworks {
if targetNetwork.Type == "multus" {
klog.V(4).Infof("DEBUG: Using first available multus network as default: %s/%s",
targetNetwork.Namespace, targetNetwork.Name)
return targetNetwork
}
}
// Priority 3: Fall back to pod networking if no multus networks available
klog.V(4).Infof("DEBUG: No user-defined or multus networks available, falling back to pod networking")
return forkliftv1beta1.DestinationNetwork{Type: "pod"}
}
// parseDefaultNetwork parses a default network specification (from original mapper)
func parseDefaultNetwork(defaultTargetNetwork, namespace string) forkliftv1beta1.DestinationNetwork {
if defaultTargetNetwork == "default" {
return forkliftv1beta1.DestinationNetwork{Type: "pod"}
}
if defaultTargetNetwork == "ignored" {
return forkliftv1beta1.DestinationNetwork{Type: "ignored"}
}
// Handle "namespace/name" format for multus networks
if parts := strings.Split(defaultTargetNetwork, "/"); len(parts) == 2 {
destNetwork := forkliftv1beta1.DestinationNetwork{
Type: "multus",
Name: parts[1],
}
// Always set namespace, use plan namespace if empty
if parts[0] != "" {
destNetwork.Namespace = parts[0]
} else {
destNetwork.Namespace = namespace
}
return destNetwork
}
// Just a name, use the plan namespace
destNetwork := forkliftv1beta1.DestinationNetwork{
Type: "multus",
Name: defaultTargetNetwork,
Namespace: namespace,
}
return destNetwork
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/network/mapper/vsphere/mapper_test.go | Go | package vsphere
import (
"testing"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
networkmapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network/mapper"
)
func TestVSphereNetworkMapper_CreateNetworkPairs_DefaultBehavior(t *testing.T) {
tests := []struct {
name string
sourceNetworks []ref.Ref
targetNetworks []forkliftv1beta1.DestinationNetwork
defaultTargetNetwork string
namespace string
expectedPairs int
expectedFirstTarget string
expectedFirstType string
expectedOthersType string
}{
{
name: "First source maps to first multus, others to ignored",
sourceNetworks: []ref.Ref{
{Name: "VM Network", ID: "net-1"},
{Name: "Management Network", ID: "net-2"},
{Name: "Storage Network", ID: "net-3"},
},
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "multus-net", Namespace: "default"},
{Type: "multus", Name: "another-net", Namespace: "default"},
},
namespace: "default",
expectedPairs: 3,
expectedFirstTarget: "multus-net",
expectedFirstType: "multus",
expectedOthersType: "ignored",
},
{
name: "User-defined default network",
sourceNetworks: []ref.Ref{
{Name: "VM Network", ID: "net-1"},
{Name: "Management Network", ID: "net-2"},
},
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "auto-selected", Namespace: "default"},
},
defaultTargetNetwork: "custom-ns/custom-net",
namespace: "default",
expectedPairs: 2,
expectedFirstTarget: "custom-net",
expectedFirstType: "multus",
expectedOthersType: "ignored",
},
{
name: "Fallback to pod networking",
sourceNetworks: []ref.Ref{
{Name: "VM Network", ID: "net-1"},
},
targetNetworks: []forkliftv1beta1.DestinationNetwork{
{Type: "pod"},
},
namespace: "default",
expectedPairs: 1,
expectedFirstTarget: "",
expectedFirstType: "pod",
},
{
name: "Empty sources",
sourceNetworks: []ref.Ref{},
targetNetworks: []forkliftv1beta1.DestinationNetwork{{Type: "multus", Name: "any"}},
namespace: "default",
expectedPairs: 0,
expectedFirstTarget: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
networkMapper := NewVSphereNetworkMapper()
opts := networkmapper.NetworkMappingOptions{
DefaultTargetNetwork: tt.defaultTargetNetwork,
Namespace: tt.namespace,
}
pairs, err := networkMapper.CreateNetworkPairs(tt.sourceNetworks, tt.targetNetworks, opts)
if err != nil {
t.Fatalf("CreateNetworkPairs() error = %v", err)
}
if len(pairs) != tt.expectedPairs {
t.Errorf("CreateNetworkPairs() got %d pairs, want %d", len(pairs), tt.expectedPairs)
}
// Verify first source mapping
if len(pairs) > 0 {
firstPair := pairs[0]
if firstPair.Destination.Name != tt.expectedFirstTarget {
t.Errorf("First pair target name: got %s, want %s",
firstPair.Destination.Name, tt.expectedFirstTarget)
}
if firstPair.Destination.Type != tt.expectedFirstType {
t.Errorf("First pair target type: got %s, want %s",
firstPair.Destination.Type, tt.expectedFirstType)
}
}
// Verify all other sources map to "ignored" (generic behavior)
if tt.expectedOthersType != "" {
for i := 1; i < len(pairs); i++ {
if pairs[i].Destination.Type != tt.expectedOthersType {
t.Errorf("Pair %d type: got %s, want %s",
i, pairs[i].Destination.Type, tt.expectedOthersType)
}
}
}
// Verify source names are preserved
for i, pair := range pairs {
if i < len(tt.sourceNetworks) {
if pair.Source.Name != tt.sourceNetworks[i].Name {
t.Errorf("Pair %d: source name %s != expected %s",
i, pair.Source.Name, tt.sourceNetworks[i].Name)
}
}
}
})
}
}
func TestVSphereNetworkMapper_NoSameNameMatching(t *testing.T) {
// This test verifies that vSphere mapper does NOT use same-name matching
// even when source and target names match
networkMapper := NewVSphereNetworkMapper()
sourceNetworks := []ref.Ref{
{Name: "identical-name", ID: "net-1"},
{Name: "another-name", ID: "net-2"},
}
targetNetworks := []forkliftv1beta1.DestinationNetwork{
{Type: "multus", Name: "identical-name", Namespace: "default"}, // Same name as source
{Type: "multus", Name: "different-name", Namespace: "default"},
}
opts := networkmapper.NetworkMappingOptions{
Namespace: "default",
SourceProviderType: "vsphere",
TargetProviderType: "openshift",
}
pairs, err := networkMapper.CreateNetworkPairs(sourceNetworks, targetNetworks, opts)
if err != nil {
t.Fatalf("CreateNetworkPairs() error = %v", err)
}
// First source should map to first target, second source to "ignored" - NOT same-name matching
if len(pairs) != 2 {
t.Fatalf("Expected 2 pairs, got %d", len(pairs))
}
// First source should map to first target (not same-name matching)
if pairs[0].Destination.Name != "identical-name" {
t.Errorf("First pair: got target %s, want identical-name (first available)",
pairs[0].Destination.Name)
}
if pairs[0].Destination.Type != "multus" {
t.Errorf("First pair: got type %s, want multus", pairs[0].Destination.Type)
}
// Second source should map to "ignored"
if pairs[1].Destination.Type != "ignored" {
t.Errorf("Second pair: got type %s, want ignored", pairs[1].Destination.Type)
}
}
func TestVSphereNetworkMapper_ImplementsInterface(t *testing.T) {
var _ networkmapper.NetworkMapper = &VSphereNetworkMapper{}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/plan.go | Go | package plan
import (
"context"
"encoding/json"
"fmt"
"strings"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/plan"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/provider"
mapping "github.com/yaacov/kubectl-mtv/pkg/cmd/create/mapping"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/network"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/defaultprovider"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// CreatePlanOptions encapsulates the parameters for the Create function.
type CreatePlanOptions struct {
Name string
Namespace string
SourceProvider string
TargetProvider string
SourceProviderNamespace string // parsed from SourceProvider if it contains namespace/name pattern
TargetProviderNamespace string // parsed from TargetProvider if it contains namespace/name pattern
NetworkMapping string
StorageMapping string
InventoryURL string
InventoryInsecureSkipTLS bool
DefaultTargetNetwork string
DefaultTargetStorageClass string
PlanSpec forkliftv1beta1.PlanSpec
ConfigFlags *genericclioptions.ConfigFlags
NetworkPairs string
StoragePairs string
// Storage enhancement options
DefaultVolumeMode string
DefaultAccessMode string
DefaultOffloadPlugin string
DefaultOffloadSecret string
DefaultOffloadVendor string
// Offload secret creation fields
OffloadVSphereUsername string
OffloadVSpherePassword string
OffloadVSphereURL string
OffloadStorageUsername string
OffloadStoragePassword string
OffloadStorageEndpoint string
OffloadCACert string
OffloadInsecureSkipTLS bool
}
// parseProviderName parses a provider name that might contain namespace/name pattern
// Returns the name and namespace separately. If no namespace is specified, returns the default namespace.
func parseProviderName(providerName, defaultNamespace string) (name, namespace string) {
if strings.Contains(providerName, "/") {
parts := strings.SplitN(providerName, "/", 2)
namespace = strings.TrimSpace(parts[0])
name = strings.TrimSpace(parts[1])
} else {
name = strings.TrimSpace(providerName)
namespace = defaultNamespace
}
return name, namespace
}
// Create creates a new migration plan
func Create(ctx context.Context, opts CreatePlanOptions) error {
c, err := client.GetDynamicClient(opts.ConfigFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Parse provider names to extract namespaces and names
sourceProviderName, sourceProviderNamespace := parseProviderName(opts.SourceProvider, opts.Namespace)
opts.SourceProvider = sourceProviderName
opts.SourceProviderNamespace = sourceProviderNamespace
// If the plan already exists, return an error
_, err = c.Resource(client.PlansGVR).Namespace(opts.Namespace).Get(context.TODO(), opts.Name, metav1.GetOptions{})
if err == nil {
return fmt.Errorf("plan '%s' already exists in namespace '%s'", opts.Name, opts.Namespace)
} else if !errors.IsNotFound(err) {
return fmt.Errorf("failed to check if plan exists: %v", err)
}
// If target provider is not provided, find the first OpenShift provider
if opts.TargetProvider == "" {
defaultProvider, err := defaultprovider.GetDefaultOpenShiftProvider(opts.ConfigFlags, opts.Namespace)
if err != nil {
return fmt.Errorf("failed to get default target provider: %v", err)
}
opts.TargetProvider = defaultProvider
fmt.Printf("No target provider specified, using default OpenShift provider: %s\n", opts.TargetProvider)
}
// Parse target provider name to extract namespace and name (do this after default provider logic)
targetProviderName, targetProviderNamespace := parseProviderName(opts.TargetProvider, opts.Namespace)
opts.TargetProvider = targetProviderName
opts.TargetProviderNamespace = targetProviderNamespace
// Validate that VMs exist in the source provider
err = validateVMs(ctx, opts.ConfigFlags, &opts)
if err != nil {
return fmt.Errorf("VM validation failed: %v", err)
}
// Track which maps we create for cleanup if needed
createdNetworkMap := false
createdStorageMap := false
// Extract VM names from the plan
var planVMNames []string
for _, planVM := range opts.PlanSpec.VMs {
planVMNames = append(planVMNames, planVM.Name)
}
// If target namespace is not provided, use the plan's namespace
// This must happen before creating network/storage maps so they can use it
if opts.PlanSpec.TargetNamespace == "" {
opts.PlanSpec.TargetNamespace = opts.Namespace
fmt.Printf("No target namespace specified, using plan namespace: %s\n", opts.PlanSpec.TargetNamespace)
}
// If network map is not provided, create a default network map
if opts.NetworkMapping == "" {
if opts.NetworkPairs != "" {
// Create network mapping from pairs
networkMapName := fmt.Sprintf("%s-network", opts.Name)
// For mapping creation, we need to pass the full provider references with namespaces
sourceProviderRef := opts.SourceProvider
if opts.SourceProviderNamespace != opts.Namespace {
sourceProviderRef = fmt.Sprintf("%s/%s", opts.SourceProviderNamespace, opts.SourceProvider)
}
targetProviderRef := opts.TargetProvider
if opts.TargetProviderNamespace != opts.Namespace {
targetProviderRef = fmt.Sprintf("%s/%s", opts.TargetProviderNamespace, opts.TargetProvider)
}
err := mapping.CreateNetwork(opts.ConfigFlags, networkMapName, opts.Namespace, sourceProviderRef, targetProviderRef, opts.NetworkPairs, opts.InventoryURL)
if err != nil {
return fmt.Errorf("failed to create network map from pairs: %v", err)
}
opts.NetworkMapping = networkMapName
createdNetworkMap = true
fmt.Printf("Created network mapping '%s' from provided pairs\n", networkMapName)
} else {
// Create default network mapping using existing logic
networkMapName, err := network.CreateNetworkMap(ctx, network.NetworkMapperOptions{
Name: opts.Name,
Namespace: opts.Namespace,
TargetNamespace: opts.PlanSpec.TargetNamespace,
SourceProvider: opts.SourceProvider,
SourceProviderNamespace: opts.SourceProviderNamespace,
TargetProvider: opts.TargetProvider,
TargetProviderNamespace: opts.TargetProviderNamespace,
ConfigFlags: opts.ConfigFlags,
InventoryURL: opts.InventoryURL,
InventoryInsecureSkipTLS: opts.InventoryInsecureSkipTLS,
PlanVMNames: planVMNames,
DefaultTargetNetwork: opts.DefaultTargetNetwork,
})
if err != nil {
return fmt.Errorf("failed to create default network map: %v", err)
}
opts.NetworkMapping = networkMapName
createdNetworkMap = true
}
}
// If storage map is not provided, create a default storage map
// Skip storage mapping for conversion-only migrations
if opts.StorageMapping == "" && opts.PlanSpec.Type != forkliftv1beta1.MigrationOnlyConversion {
if opts.StoragePairs != "" {
// Create storage mapping from pairs
storageMapName := fmt.Sprintf("%s-storage", opts.Name)
// For mapping creation, we need to pass the full provider references with namespaces
sourceProviderRef := opts.SourceProvider
if opts.SourceProviderNamespace != opts.Namespace {
sourceProviderRef = fmt.Sprintf("%s/%s", opts.SourceProviderNamespace, opts.SourceProvider)
}
targetProviderRef := opts.TargetProvider
if opts.TargetProviderNamespace != opts.Namespace {
targetProviderRef = fmt.Sprintf("%s/%s", opts.TargetProviderNamespace, opts.TargetProvider)
}
err := mapping.CreateStorageWithOptions(mapping.StorageCreateOptions{
ConfigFlags: opts.ConfigFlags,
Name: storageMapName,
Namespace: opts.Namespace,
SourceProvider: sourceProviderRef,
TargetProvider: targetProviderRef,
StoragePairs: opts.StoragePairs,
InventoryURL: opts.InventoryURL,
InventoryInsecureSkipTLS: opts.InventoryInsecureSkipTLS,
DefaultVolumeMode: opts.DefaultVolumeMode,
DefaultAccessMode: opts.DefaultAccessMode,
DefaultOffloadPlugin: opts.DefaultOffloadPlugin,
DefaultOffloadSecret: opts.DefaultOffloadSecret,
DefaultOffloadVendor: opts.DefaultOffloadVendor,
// Offload secret creation options
OffloadVSphereUsername: opts.OffloadVSphereUsername,
OffloadVSpherePassword: opts.OffloadVSpherePassword,
OffloadVSphereURL: opts.OffloadVSphereURL,
OffloadStorageUsername: opts.OffloadStorageUsername,
OffloadStoragePassword: opts.OffloadStoragePassword,
OffloadStorageEndpoint: opts.OffloadStorageEndpoint,
OffloadCACert: opts.OffloadCACert,
OffloadInsecureSkipTLS: opts.OffloadInsecureSkipTLS,
})
if err != nil {
// Clean up the network map if we created it
if createdNetworkMap {
if delErr := deleteMap(opts.ConfigFlags, client.NetworkMapGVR, opts.NetworkMapping, opts.Namespace); delErr != nil {
fmt.Printf("Warning: failed to delete network map: %v\n", delErr)
}
}
return fmt.Errorf("failed to create storage map from pairs: %v", err)
}
opts.StorageMapping = storageMapName
createdStorageMap = true
fmt.Printf("Created storage mapping '%s' from provided pairs\n", storageMapName)
} else {
// Create default storage mapping using existing logic
storageMapName, err := storage.CreateStorageMap(ctx, storage.StorageMapperOptions{
Name: opts.Name,
Namespace: opts.Namespace,
SourceProvider: opts.SourceProvider,
SourceProviderNamespace: opts.SourceProviderNamespace,
TargetProvider: opts.TargetProvider,
TargetProviderNamespace: opts.TargetProviderNamespace,
ConfigFlags: opts.ConfigFlags,
InventoryURL: opts.InventoryURL,
PlanVMNames: planVMNames,
DefaultTargetStorageClass: opts.DefaultTargetStorageClass,
})
if err != nil {
// Clean up the network map if we created it
if createdNetworkMap {
if delErr := deleteMap(opts.ConfigFlags, client.NetworkMapGVR, opts.NetworkMapping, opts.Namespace); delErr != nil {
fmt.Printf("Warning: failed to delete network map: %v\n", delErr)
}
}
return fmt.Errorf("failed to create default storage map: %v", err)
}
opts.StorageMapping = storageMapName
createdStorageMap = true
}
}
// Create a new Plan object using the PlanSpec
planObj := &forkliftv1beta1.Plan{
ObjectMeta: metav1.ObjectMeta{
Name: opts.Name,
Namespace: opts.Namespace,
},
Spec: opts.PlanSpec,
}
// Set provider references
planObj.Spec.Provider = provider.Pair{
Source: corev1.ObjectReference{
Kind: "Provider",
APIVersion: forkliftv1beta1.SchemeGroupVersion.String(),
Name: opts.SourceProvider,
Namespace: opts.SourceProviderNamespace,
},
Destination: corev1.ObjectReference{
Kind: "Provider",
APIVersion: forkliftv1beta1.SchemeGroupVersion.String(),
Name: opts.TargetProvider,
Namespace: opts.TargetProviderNamespace,
},
}
// Set map references
planObj.Spec.Map = plan.Map{
Network: corev1.ObjectReference{
Kind: "NetworkMap",
APIVersion: forkliftv1beta1.SchemeGroupVersion.String(),
Name: opts.NetworkMapping,
Namespace: opts.Namespace,
},
}
// Only set storage mapping for non-conversion migrations
if opts.PlanSpec.Type != forkliftv1beta1.MigrationOnlyConversion {
planObj.Spec.Map.Storage = corev1.ObjectReference{
Kind: "StorageMap",
APIVersion: forkliftv1beta1.SchemeGroupVersion.String(),
Name: opts.StorageMapping,
Namespace: opts.Namespace,
}
}
planObj.Kind = "Plan"
planObj.APIVersion = forkliftv1beta1.SchemeGroupVersion.String()
// Convert Plan object to Unstructured
unstructuredPlan, err := runtime.DefaultUnstructuredConverter.ToUnstructured(planObj)
if err != nil {
// Clean up created maps if conversion fails
if createdNetworkMap {
if delErr := deleteMap(opts.ConfigFlags, client.NetworkMapGVR, opts.NetworkMapping, opts.Namespace); delErr != nil {
fmt.Printf("Warning: failed to delete network map: %v\n", delErr)
}
}
if createdStorageMap {
if delErr := deleteMap(opts.ConfigFlags, client.StorageMapGVR, opts.StorageMapping, opts.Namespace); delErr != nil {
fmt.Printf("Warning: failed to delete storage map: %v\n", delErr)
}
}
return fmt.Errorf("failed to convert Plan to Unstructured: %v", err)
}
planUnstructured := &unstructured.Unstructured{Object: unstructuredPlan}
// Create the plan in the specified namespace
createdPlan, err := c.Resource(client.PlansGVR).Namespace(opts.Namespace).Create(context.TODO(), planUnstructured, metav1.CreateOptions{})
if err != nil {
// Clean up created maps if plan creation fails
if createdNetworkMap {
if delErr := deleteMap(opts.ConfigFlags, client.NetworkMapGVR, opts.NetworkMapping, opts.Namespace); delErr != nil {
fmt.Printf("Warning: failed to delete network map: %v\n", delErr)
}
}
if createdStorageMap {
if delErr := deleteMap(opts.ConfigFlags, client.StorageMapGVR, opts.StorageMapping, opts.Namespace); delErr != nil {
fmt.Printf("Warning: failed to delete storage map: %v\n", delErr)
}
}
return fmt.Errorf("failed to create plan: %v", err)
}
// MTV automatically sets the PVCNameTemplateUseGenerateName field to true, if opts.PlanSpec.PVCNameTemplateUseGenerateName is false
// we need to patch the plan to re-set the PVCNameTemplateUseGenerateName field to false.
if !opts.PlanSpec.PVCNameTemplateUseGenerateName {
patch := map[string]interface{}{
"spec": map[string]interface{}{
"pvcNameTemplateUseGenerateName": false,
},
}
patchBytes, err := json.Marshal(patch)
if err != nil {
// Ignore error here, we will still create the plan
fmt.Printf("Warning: failed to marshal patch for PVCNameTemplateUseGenerateName: %v\n", err)
} else {
_, err = c.Resource(client.PlansGVR).Namespace(opts.Namespace).Patch(
context.TODO(),
createdPlan.GetName(),
types.MergePatchType,
patchBytes,
metav1.PatchOptions{},
)
if err != nil {
// Ignore error here, we will still create the plan
fmt.Printf("Warning: failed to patch plan for PVCNameTemplateUseGenerateName: %v\n", err)
}
}
}
// MTV automatically sets the MigrateSharedDisks field to true, if opts.PlanSpec.MigrateSharedDisks is false
// we need to patch the plan to re-set the MigrateSharedDisks field to false.
if !opts.PlanSpec.MigrateSharedDisks {
patch := map[string]interface{}{
"spec": map[string]interface{}{
"migrateSharedDisks": false,
},
}
patchBytes, err := json.Marshal(patch)
if err != nil {
// Ignore error here, we will still create the plan
fmt.Printf("Warning: failed to marshal patch for MigrateSharedDisks: %v\n", err)
} else {
_, err = c.Resource(client.PlansGVR).Namespace(opts.Namespace).Patch(
context.TODO(),
createdPlan.GetName(),
types.MergePatchType,
patchBytes,
metav1.PatchOptions{},
)
if err != nil {
// Ignore error here, we will still create the plan
fmt.Printf("Warning: failed to patch plan for MigrateSharedDisks: %v\n", err)
}
}
}
// MTV UseCompatibilityMode sets the UseCompatibilityMode field to true, if opts.PlanSpec.UseCompatibilityMode is false
// we need to patch the plan to re-set the UseCompatibilityMode field to false.
if !opts.PlanSpec.UseCompatibilityMode {
patch := map[string]interface{}{
"spec": map[string]interface{}{
"useCompatibilityMode": false,
},
}
patchBytes, err := json.Marshal(patch)
if err != nil {
// Ignore error here, we will still create the plan
fmt.Printf("Warning: failed to marshal patch for UseCompatibilityMode: %v\n", err)
} else {
_, err = c.Resource(client.PlansGVR).Namespace(opts.Namespace).Patch(
context.TODO(),
createdPlan.GetName(),
types.MergePatchType,
patchBytes,
metav1.PatchOptions{},
)
if err != nil {
// Ignore error here, we will still create the plan
fmt.Printf("Warning: failed to patch plan for UseCompatibilityMode: %v\n", err)
}
}
}
// MTV automatically sets the PreserveStaticIPs field to true, if opts.PlanSpec.PreserveStaticIPs is false
// we need to patch the plan to re-set the PreserveStaticIPs field to false.
if !opts.PlanSpec.PreserveStaticIPs {
patch := map[string]interface{}{
"spec": map[string]interface{}{
"preserveStaticIPs": false,
},
}
patchBytes, err := json.Marshal(patch)
if err != nil {
// Ignore error here, we will still create the plan
fmt.Printf("Warning: failed to marshal patch for PreserveStaticIPs: %v\n", err)
} else {
_, err = c.Resource(client.PlansGVR).Namespace(opts.Namespace).Patch(
context.TODO(),
createdPlan.GetName(),
types.MergePatchType,
patchBytes,
metav1.PatchOptions{},
)
if err != nil {
// Ignore error here, we will still create the plan
fmt.Printf("Warning: failed to patch plan for PreserveStaticIPs: %v\n", err)
}
}
}
// MTV automatically sets the RunPreflightInspection field to true, if opts.PlanSpec.RunPreflightInspection is false
// we need to patch the plan to re-set the RunPreflightInspection field to false.
if !opts.PlanSpec.RunPreflightInspection {
patch := map[string]interface{}{
"spec": map[string]interface{}{
"runPreflightInspection": false,
},
}
patchBytes, err := json.Marshal(patch)
if err != nil {
// Ignore error here, we will still create the plan
fmt.Printf("Warning: failed to marshal patch for RunPreflightInspection: %v\n", err)
} else {
_, err = c.Resource(client.PlansGVR).Namespace(opts.Namespace).Patch(
context.TODO(),
createdPlan.GetName(),
types.MergePatchType,
patchBytes,
metav1.PatchOptions{},
)
if err != nil {
// Ignore error here, we will still create the plan
fmt.Printf("Warning: failed to patch plan for RunPreflightInspection: %v\n", err)
}
}
}
// Set ownership of maps if we created them
if createdNetworkMap {
err = setMapOwnership(opts.ConfigFlags, createdPlan, client.NetworkMapGVR, opts.NetworkMapping, opts.Namespace)
if err != nil {
fmt.Printf("Warning: failed to set ownership for network map: %v\n", err)
}
}
if createdStorageMap {
err = setMapOwnership(opts.ConfigFlags, createdPlan, client.StorageMapGVR, opts.StorageMapping, opts.Namespace)
if err != nil {
fmt.Printf("Warning: failed to set ownership for storage map: %v\n", err)
}
}
fmt.Printf("plan/%s created\n", opts.Name)
return nil
}
// validateVMs validates that all VMs in the VMList exist in the source provider,
// sets their IDs based on the names, and removes any that don't exist.
// Returns an error if no valid VMs remain.
func validateVMs(ctx context.Context, configFlags *genericclioptions.ConfigFlags, opts *CreatePlanOptions) error {
// Fetch source provider using the parsed namespace
sourceProvider, err := inventory.GetProviderByName(ctx, configFlags, opts.SourceProvider, opts.SourceProviderNamespace)
if err != nil {
return fmt.Errorf("failed to get source provider: %v", err)
}
// Fetch source VMs inventory
sourceVMsInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, opts.InventoryURL, sourceProvider, "vms", opts.InventoryInsecureSkipTLS)
if err != nil {
return fmt.Errorf("failed to fetch source VMs inventory: %v", err)
}
// Extract objects from EC2 envelope
providerType, found, err := unstructured.NestedString(sourceProvider.Object, "spec", "type")
if err == nil && found && providerType == "ec2" {
sourceVMsInventory = inventory.ExtractEC2Objects(sourceVMsInventory)
}
sourceVMsArray, ok := sourceVMsInventory.([]interface{})
if !ok {
return fmt.Errorf("unexpected data format: expected array for source VMs inventory")
}
// Create maps for VM names to VM IDs and VM IDs to VM names for lookup
vmNameToIDMap := make(map[string]string)
vmIDToNameMap := make(map[string]string)
vmIDToNamespaceMap := make(map[string]string)
for _, item := range sourceVMsArray {
vm, ok := item.(map[string]interface{})
if !ok {
continue
}
vmName, ok := vm["name"].(string)
if !ok {
continue
}
vmID, ok := vm["id"].(string)
if !ok {
continue
}
vmNamespace, ok := vm["namespace"].(string)
if !ok {
// If namespace is not available, set it to empty
vmNamespace = ""
}
vmNameToIDMap[vmName] = vmID
vmIDToNameMap[vmID] = vmName
vmIDToNamespaceMap[vmID] = vmNamespace
}
// Process VMs: first those with IDs, then those with only names
var validVMs []plan.VM
// First process VMs that already have IDs
for _, planVM := range opts.PlanSpec.VMs {
if planVM.ID != "" {
// Check if VM with this ID exists in inventory
if vmName, exists := vmIDToNameMap[planVM.ID]; exists {
// If name is empty or different, update it
if planVM.Name == "" {
planVM.Name = vmName
}
validVMs = append(validVMs, planVM)
} else {
fmt.Printf("Warning: VM with ID '%s' not found in source provider, removing from plan\n", planVM.ID)
}
}
}
// Then process VMs that only have names (and need IDs)
for _, planVM := range opts.PlanSpec.VMs {
if planVM.ID == "" && planVM.Name != "" {
vmID, exists := vmNameToIDMap[planVM.Name]
if exists {
planVM.ID = vmID
validVMs = append(validVMs, planVM)
} else {
// Fallback: check if the provided name is actually a VM ID
if vmName, existsAsID := vmIDToNameMap[planVM.Name]; existsAsID {
// The provided "name" is actually an ID
planVM.ID = planVM.Name
planVM.Name = vmName
validVMs = append(validVMs, planVM)
fmt.Printf("Info: VM ID '%s' found in source provider (name: '%s')\n", planVM.ID, planVM.Name)
} else {
fmt.Printf("Warning: VM with name '%s' not found in source provider, removing from plan\n", planVM.Name)
}
}
}
}
// Add namespaces to VMs that don't have them, if available
for i, planVM := range validVMs {
if vmNamespace, exists := vmIDToNamespaceMap[planVM.ID]; exists {
validVMs[i].Namespace = vmNamespace
}
}
// Update the VM list
opts.PlanSpec.VMs = validVMs
// Check if any VMs remain
if len(opts.PlanSpec.VMs) == 0 {
return fmt.Errorf("no valid VMs found in source provider matching the plan VMs")
}
return nil
}
// setMapOwnership sets the plan as the owner of the map
func setMapOwnership(configFlags *genericclioptions.ConfigFlags, plan *unstructured.Unstructured, mapGVR schema.GroupVersionResource, mapName, namespace string) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Create the owner reference
ownerRef := metav1.OwnerReference{
APIVersion: plan.GetAPIVersion(),
Kind: plan.GetKind(),
Name: plan.GetName(),
UID: plan.GetUID(),
Controller: boolPtr(true),
}
// Patch map to add the owner reference
patch := map[string]interface{}{
"metadata": map[string]interface{}{
"ownerReferences": []metav1.OwnerReference{ownerRef},
},
}
// Convert patch to JSON bytes
patchBytes, err := json.Marshal(patch)
if err != nil {
return fmt.Errorf("failed to marshal patch data: %v", err)
}
// Apply the patch to the map
_, err = c.Resource(mapGVR).Namespace(namespace).Patch(
context.Background(),
mapName,
types.MergePatchType,
patchBytes,
metav1.PatchOptions{},
)
if err != nil {
return fmt.Errorf("failed to patch map with owner reference: %v", err)
}
return nil
}
// deleteMap deletes a map resource
func deleteMap(configFlags *genericclioptions.ConfigFlags, mapGVR schema.GroupVersionResource, mapName, namespace string) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
err = c.Resource(mapGVR).Namespace(namespace).Delete(
context.Background(),
mapName,
metav1.DeleteOptions{},
)
if err != nil && !errors.IsNotFound(err) {
return fmt.Errorf("failed to delete map '%s': %v", mapName, err)
}
return nil
}
// boolPtr returns a pointer to a bool
func boolPtr(b bool) *bool {
return &b
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/factory.go | Go | package storage
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/provider"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/fetchers"
ec2Fetcher "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/fetchers/ec2"
hypervFetcher "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/fetchers/hyperv"
openshiftFetcher "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/fetchers/openshift"
openstackFetcher "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/fetchers/openstack"
ovaFetcher "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/fetchers/ova"
ovirtFetcher "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/fetchers/ovirt"
vsphereFetcher "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/fetchers/vsphere"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper"
ec2Mapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper/ec2"
hypervMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper/hyperv"
openshiftMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper/openshift"
openstackMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper/openstack"
ovaMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper/ova"
ovirtMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper/ovirt"
vsphereMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper/vsphere"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// StorageMapperInterface defines the interface that all provider-specific storage mappers must implement
type StorageMapperInterface interface {
// GetSourceStorages extracts storage information from the source provider for the specified VMs
GetSourceStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error)
// GetTargetStorages extracts available storage information from the target provider
GetTargetStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationStorage, error)
// CreateStoragePairs creates storage mapping pairs based on source storages, target storages, and optional default storage class
CreateStoragePairs(sourceStorages []ref.Ref, targetStorages []forkliftv1beta1.DestinationStorage, defaultTargetStorageClass string) ([]forkliftv1beta1.StoragePair, error)
}
// StorageMapperOptions contains common options for storage mapping
type StorageMapperOptions struct {
Name string
Namespace string
SourceProvider string
SourceProviderNamespace string
TargetProvider string
TargetProviderNamespace string
ConfigFlags *genericclioptions.ConfigFlags
InventoryURL string
InventoryInsecureSkipTLS bool
PlanVMNames []string
DefaultTargetStorageClass string
}
// CreateStorageMap creates a storage map using the new fetcher-based architecture
func CreateStorageMap(ctx context.Context, opts StorageMapperOptions) (string, error) {
klog.V(4).Infof("DEBUG: Creating storage map - Source: %s, Target: %s, DefaultTargetStorageClass: '%s'",
opts.SourceProvider, opts.TargetProvider, opts.DefaultTargetStorageClass)
// Get source storage fetcher using the provider's namespace
sourceProviderNamespace := client.GetProviderNamespace(opts.SourceProviderNamespace, opts.Namespace)
sourceFetcher, err := GetSourceStorageFetcher(ctx, opts.ConfigFlags, opts.SourceProvider, sourceProviderNamespace, opts.InventoryInsecureSkipTLS)
if err != nil {
return "", fmt.Errorf("failed to get source storage fetcher: %v", err)
}
klog.V(4).Infof("DEBUG: Source storage fetcher created for provider: %s", opts.SourceProvider)
// Get target storage fetcher using the provider's namespace
targetProviderNamespace := client.GetProviderNamespace(opts.TargetProviderNamespace, opts.Namespace)
targetFetcher, err := GetTargetStorageFetcher(ctx, opts.ConfigFlags, opts.TargetProvider, targetProviderNamespace, opts.InventoryInsecureSkipTLS)
if err != nil {
return "", fmt.Errorf("failed to get target storage fetcher: %v", err)
}
klog.V(4).Infof("DEBUG: Target storage fetcher created for provider: %s", opts.TargetProvider)
// Fetch source storages
sourceStorages, err := sourceFetcher.FetchSourceStorages(ctx, opts.ConfigFlags, opts.SourceProvider, sourceProviderNamespace, opts.InventoryURL, opts.PlanVMNames, opts.InventoryInsecureSkipTLS)
if err != nil {
return "", fmt.Errorf("failed to fetch source storages: %v", err)
}
klog.V(4).Infof("DEBUG: Fetched %d source storages", len(sourceStorages))
// Fetch target storages
var targetStorages []forkliftv1beta1.DestinationStorage
if opts.DefaultTargetStorageClass == "" {
klog.V(4).Infof("DEBUG: Fetching target storages from target provider: %s", opts.TargetProvider)
targetStorages, err = targetFetcher.FetchTargetStorages(ctx, opts.ConfigFlags, opts.TargetProvider, targetProviderNamespace, opts.InventoryURL, opts.InventoryInsecureSkipTLS)
if err != nil {
return "", fmt.Errorf("failed to fetch target storages: %v", err)
}
klog.V(4).Infof("DEBUG: Fetched %d target storages", len(targetStorages))
} else {
klog.V(4).Infof("DEBUG: Skipping target storage fetch due to DefaultTargetStorageClass='%s'", opts.DefaultTargetStorageClass)
}
// Get provider-specific storage mapper
storageMapper, sourceProviderType, targetProviderType, err := GetStorageMapper(ctx, opts.ConfigFlags, opts.SourceProvider, sourceProviderNamespace, opts.TargetProvider, targetProviderNamespace, opts.InventoryInsecureSkipTLS)
if err != nil {
return "", fmt.Errorf("failed to get storage mapper: %v", err)
}
// Create storage pairs using provider-specific mapping logic
mappingOpts := mapper.StorageMappingOptions{
DefaultTargetStorageClass: opts.DefaultTargetStorageClass,
SourceProviderType: sourceProviderType,
TargetProviderType: targetProviderType,
}
storagePairs, err := storageMapper.CreateStoragePairs(sourceStorages, targetStorages, mappingOpts)
if err != nil {
return "", fmt.Errorf("failed to create storage pairs: %v", err)
}
// Create the storage map using the existing infrastructure
return createStorageMap(opts, storagePairs)
}
// createStorageMap helper function to create the actual storage map resource
func createStorageMap(opts StorageMapperOptions, storagePairs []forkliftv1beta1.StoragePair) (string, error) {
// If no storage pairs, create a dummy pair
if len(storagePairs) == 0 {
klog.V(4).Infof("DEBUG: No storage pairs found, creating dummy pair")
storagePairs = []forkliftv1beta1.StoragePair{
{
Source: ref.Ref{
Type: "default", // Use "default" type for dummy entry
},
Destination: forkliftv1beta1.DestinationStorage{
// Empty StorageClass means system default
},
},
}
}
// Create the storage map name
storageMapName := opts.Name + "-storage-map"
// Create StorageMap object
storageMap := &forkliftv1beta1.StorageMap{
ObjectMeta: metav1.ObjectMeta{
Name: storageMapName,
Namespace: opts.Namespace,
},
Spec: forkliftv1beta1.StorageMapSpec{
Provider: provider.Pair{
Source: corev1.ObjectReference{
Kind: "Provider",
APIVersion: forkliftv1beta1.SchemeGroupVersion.String(),
Name: opts.SourceProvider,
Namespace: client.GetProviderNamespace(opts.SourceProviderNamespace, opts.Namespace),
},
Destination: corev1.ObjectReference{
Kind: "Provider",
APIVersion: forkliftv1beta1.SchemeGroupVersion.String(),
Name: opts.TargetProvider,
Namespace: client.GetProviderNamespace(opts.TargetProviderNamespace, opts.Namespace),
},
},
Map: storagePairs,
},
}
storageMap.Kind = "StorageMap"
storageMap.APIVersion = forkliftv1beta1.SchemeGroupVersion.String()
// Convert to Unstructured
unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(storageMap)
if err != nil {
return "", fmt.Errorf("failed to convert StorageMap to Unstructured: %v", err)
}
storageMapUnstructured := &unstructured.Unstructured{Object: unstructuredMap}
// Create the storage map
c, err := client.GetDynamicClient(opts.ConfigFlags)
if err != nil {
return "", fmt.Errorf("failed to get client: %v", err)
}
_, err = c.Resource(client.StorageMapGVR).Namespace(opts.Namespace).Create(context.TODO(), storageMapUnstructured, metav1.CreateOptions{})
if err != nil {
return "", fmt.Errorf("failed to create storage map: %v", err)
}
klog.V(4).Infof("DEBUG: Created storage map '%s' with %d storage pairs", storageMapName, len(storagePairs))
return storageMapName, nil
}
// GetSourceStorageFetcher returns the appropriate source storage fetcher based on provider type
func GetSourceStorageFetcher(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace string, insecureSkipTLS bool) (fetchers.SourceStorageFetcher, error) {
// Get the provider object to determine its type
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get provider: %v", err)
}
// Create a provider client to get the provider type
// Note: GetProviderType() only reads from CRD spec (no HTTPS calls), but we pass insecureSkipTLS for consistency
providerClient := inventory.NewProviderClientWithInsecure(configFlags, provider, "", insecureSkipTLS)
providerType, err := providerClient.GetProviderType()
if err != nil {
return nil, fmt.Errorf("failed to get provider type: %v", err)
}
klog.V(4).Infof("DEBUG: GetSourceStorageFetcher - Provider: %s, Type: %s", providerName, providerType)
// Return the appropriate fetcher based on provider type
switch providerType {
case "ec2":
klog.V(4).Infof("DEBUG: Using EC2 source storage fetcher for %s", providerName)
return ec2Fetcher.NewEC2StorageFetcher(), nil
case "openstack":
klog.V(4).Infof("DEBUG: Using OpenStack source storage fetcher for %s", providerName)
return openstackFetcher.NewOpenStackStorageFetcher(), nil
case "vsphere":
klog.V(4).Infof("DEBUG: Using VSphere source storage fetcher for %s", providerName)
return vsphereFetcher.NewVSphereStorageFetcher(), nil
case "openshift":
klog.V(4).Infof("DEBUG: Using OpenShift source storage fetcher for %s", providerName)
return openshiftFetcher.NewOpenShiftStorageFetcher(), nil
case "ova":
klog.V(4).Infof("DEBUG: Using OVA source storage fetcher for %s", providerName)
return ovaFetcher.NewOVAStorageFetcher(), nil
case "ovirt":
klog.V(4).Infof("DEBUG: Using oVirt source storage fetcher for %s", providerName)
return ovirtFetcher.NewOvirtStorageFetcher(), nil
case "hyperv":
klog.V(4).Infof("DEBUG: Using HyperV source storage fetcher for %s", providerName)
return hypervFetcher.NewHyperVStorageFetcher(), nil
default:
return nil, fmt.Errorf("unsupported source provider type: %s", providerType)
}
}
// GetTargetStorageFetcher returns the appropriate target storage fetcher based on provider type
func GetTargetStorageFetcher(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace string, insecureSkipTLS bool) (fetchers.TargetStorageFetcher, error) {
// Get the provider object to determine its type
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get provider: %v", err)
}
// Create a provider client to get the provider type
// Note: GetProviderType() only reads from CRD spec (no HTTPS calls), but we pass insecureSkipTLS for consistency
providerClient := inventory.NewProviderClientWithInsecure(configFlags, provider, "", insecureSkipTLS)
providerType, err := providerClient.GetProviderType()
if err != nil {
return nil, fmt.Errorf("failed to get provider type: %v", err)
}
klog.V(4).Infof("DEBUG: GetTargetStorageFetcher - Provider: %s, Type: %s", providerName, providerType)
// Return the appropriate fetcher based on provider type
switch providerType {
case "ec2":
// Note: EC2 is typically used as a source provider for migrations to OpenShift/Kubernetes.
// EC2 as a migration target is not a common use case, but we provide the fetcher for interface completeness.
klog.V(4).Infof("DEBUG: Using EC2 target storage fetcher for %s (note: EC2 is typically a source, not target)", providerName)
return ec2Fetcher.NewEC2StorageFetcher(), nil
case "openstack":
klog.V(4).Infof("DEBUG: Using OpenStack target storage fetcher for %s", providerName)
return openstackFetcher.NewOpenStackStorageFetcher(), nil
case "vsphere":
klog.V(4).Infof("DEBUG: Using VSphere target storage fetcher for %s", providerName)
return vsphereFetcher.NewVSphereStorageFetcher(), nil
case "openshift":
klog.V(4).Infof("DEBUG: Using OpenShift target storage fetcher for %s", providerName)
return openshiftFetcher.NewOpenShiftStorageFetcher(), nil
case "ova":
klog.V(4).Infof("DEBUG: Using OVA target storage fetcher for %s", providerName)
return ovaFetcher.NewOVAStorageFetcher(), nil
case "ovirt":
klog.V(4).Infof("DEBUG: Using oVirt target storage fetcher for %s", providerName)
return ovirtFetcher.NewOvirtStorageFetcher(), nil
case "hyperv":
klog.V(4).Infof("DEBUG: Using HyperV target storage fetcher for %s", providerName)
return hypervFetcher.NewHyperVStorageFetcher(), nil
default:
return nil, fmt.Errorf("unsupported target provider type: %s", providerType)
}
}
// GetStorageMapper returns the appropriate storage mapper based on source provider type
func GetStorageMapper(ctx context.Context, configFlags *genericclioptions.ConfigFlags, sourceProviderName, sourceProviderNamespace, targetProviderName, targetProviderNamespace string, insecureSkipTLS bool) (mapper.StorageMapper, string, string, error) {
// Get source provider type
sourceProvider, err := inventory.GetProviderByName(ctx, configFlags, sourceProviderName, sourceProviderNamespace)
if err != nil {
return nil, "", "", fmt.Errorf("failed to get source provider: %v", err)
}
// Note: GetProviderType() only reads from CRD spec (no HTTPS calls), but we pass insecureSkipTLS for consistency
sourceProviderClient := inventory.NewProviderClientWithInsecure(configFlags, sourceProvider, "", insecureSkipTLS)
sourceProviderType, err := sourceProviderClient.GetProviderType()
if err != nil {
return nil, "", "", fmt.Errorf("failed to get source provider type: %v", err)
}
// Get target provider type
targetProvider, err := inventory.GetProviderByName(ctx, configFlags, targetProviderName, targetProviderNamespace)
if err != nil {
return nil, "", "", fmt.Errorf("failed to get target provider: %v", err)
}
// Note: GetProviderType() only reads from CRD spec (no HTTPS calls), but we pass insecureSkipTLS for consistency
targetProviderClient := inventory.NewProviderClientWithInsecure(configFlags, targetProvider, "", insecureSkipTLS)
targetProviderType, err := targetProviderClient.GetProviderType()
if err != nil {
return nil, "", "", fmt.Errorf("failed to get target provider type: %v", err)
}
klog.V(4).Infof("DEBUG: GetStorageMapper - Source provider: %s (type: %s), Target provider: %s (type: %s)",
sourceProviderName, sourceProviderType, targetProviderName, targetProviderType)
// Return the appropriate mapper based on source provider type
switch sourceProviderType {
case "ec2":
klog.V(4).Infof("DEBUG: Using EC2 storage mapper for source %s", sourceProviderName)
return ec2Mapper.NewEC2StorageMapper(), sourceProviderType, targetProviderType, nil
case "openstack":
klog.V(4).Infof("DEBUG: Using OpenStack storage mapper for source %s", sourceProviderName)
return openstackMapper.NewOpenStackStorageMapper(), sourceProviderType, targetProviderType, nil
case "vsphere":
klog.V(4).Infof("DEBUG: Using vSphere storage mapper for source %s", sourceProviderName)
return vsphereMapper.NewVSphereStorageMapper(), sourceProviderType, targetProviderType, nil
case "openshift":
klog.V(4).Infof("DEBUG: Using OpenShift storage mapper for source %s", sourceProviderName)
return openshiftMapper.NewOpenShiftStorageMapper(), sourceProviderType, targetProviderType, nil
case "ova":
klog.V(4).Infof("DEBUG: Using OVA storage mapper for source %s", sourceProviderName)
return ovaMapper.NewOVAStorageMapper(), sourceProviderType, targetProviderType, nil
case "ovirt":
klog.V(4).Infof("DEBUG: Using oVirt storage mapper for source %s", sourceProviderName)
return ovirtMapper.NewOvirtStorageMapper(), sourceProviderType, targetProviderType, nil
case "hyperv":
klog.V(4).Infof("DEBUG: Using HyperV storage mapper for source %s", sourceProviderName)
return hypervMapper.NewHyperVStorageMapper(), sourceProviderType, targetProviderType, nil
default:
return nil, "", "", fmt.Errorf("unsupported source provider type: %s", sourceProviderType)
}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/factory_test.go | Go | package storage
import (
"testing"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper"
openshiftMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper/openshift"
openstackMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper/openstack"
ovaMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper/ova"
ovirtMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper/ovirt"
vsphereMapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper/vsphere"
)
// MockProviderClient simulates provider client behavior for testing
type MockProviderClient struct {
ProviderType string
}
func (m *MockProviderClient) GetProviderType() (string, error) {
return m.ProviderType, nil
}
// Test data for factory routing tests
func TestGetStorageMapper_ProviderRouting(t *testing.T) {
// Note: These tests would need to be adapted to work with the actual inventory.GetProviderByName
// function. For now, we'll test the mapper type selection logic conceptually.
tests := []struct {
name string
sourceProviderType string
targetProviderType string
expectedMapperType string
}{
{
name: "OpenShift source -> OpenShift mapper",
sourceProviderType: "openshift",
targetProviderType: "openshift",
expectedMapperType: "*openshift.OpenShiftStorageMapper",
},
{
name: "vSphere source -> vSphere mapper",
sourceProviderType: "vsphere",
targetProviderType: "openshift",
expectedMapperType: "*vsphere.VSphereStorageMapper",
},
{
name: "oVirt source -> oVirt mapper",
sourceProviderType: "ovirt",
targetProviderType: "openshift",
expectedMapperType: "*ovirt.OvirtStorageMapper",
},
{
name: "OpenStack source -> OpenStack mapper",
sourceProviderType: "openstack",
targetProviderType: "openshift",
expectedMapperType: "*openstack.OpenStackStorageMapper",
},
{
name: "OVA source -> OVA mapper",
sourceProviderType: "ova",
targetProviderType: "openshift",
expectedMapperType: "*ova.OVAStorageMapper",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Test mapper creation based on provider type
var storageMapper mapper.StorageMapper
switch tt.sourceProviderType {
case "openshift":
storageMapper = openshiftMapper.NewOpenShiftStorageMapper()
case "vsphere":
storageMapper = vsphereMapper.NewVSphereStorageMapper()
case "ovirt":
storageMapper = ovirtMapper.NewOvirtStorageMapper()
case "openstack":
storageMapper = openstackMapper.NewOpenStackStorageMapper()
case "ova":
storageMapper = ovaMapper.NewOVAStorageMapper()
}
if storageMapper == nil {
t.Errorf("Expected mapper for provider type %s, got nil", tt.sourceProviderType)
}
// Verify mapper implements the interface
var _ mapper.StorageMapper = storageMapper
})
}
}
func TestStorageMapperOptions_ProviderTypeFields(t *testing.T) {
opts := mapper.StorageMappingOptions{
DefaultTargetStorageClass: "test-storage",
SourceProviderType: "openshift",
TargetProviderType: "openshift",
}
// Verify the new provider type fields are available
if opts.SourceProviderType != "openshift" {
t.Errorf("SourceProviderType: got %s, want openshift", opts.SourceProviderType)
}
if opts.TargetProviderType != "openshift" {
t.Errorf("TargetProviderType: got %s, want openshift", opts.TargetProviderType)
}
}
// Test that all mapper types implement the StorageMapper interface
func TestStorageMapperInterface_AllImplementations(t *testing.T) {
mappers := []mapper.StorageMapper{
openshiftMapper.NewOpenShiftStorageMapper(),
vsphereMapper.NewVSphereStorageMapper(),
ovirtMapper.NewOvirtStorageMapper(),
openstackMapper.NewOpenStackStorageMapper(),
ovaMapper.NewOVAStorageMapper(),
}
for i, m := range mappers {
if m == nil {
t.Errorf("Mapper %d is nil", i)
}
// This test ensures all mappers implement the interface
var _ mapper.StorageMapper = m
}
}
// Test storage mapper creation functions
func TestStorageMapperCreation(t *testing.T) {
tests := []struct {
name string
createMapper func() mapper.StorageMapper
expectedNotNil bool
}{
{
name: "OpenShift mapper creation",
createMapper: func() mapper.StorageMapper { return openshiftMapper.NewOpenShiftStorageMapper() },
expectedNotNil: true,
},
{
name: "vSphere mapper creation",
createMapper: func() mapper.StorageMapper { return vsphereMapper.NewVSphereStorageMapper() },
expectedNotNil: true,
},
{
name: "oVirt mapper creation",
createMapper: func() mapper.StorageMapper { return ovirtMapper.NewOvirtStorageMapper() },
expectedNotNil: true,
},
{
name: "OpenStack mapper creation",
createMapper: func() mapper.StorageMapper { return openstackMapper.NewOpenStackStorageMapper() },
expectedNotNil: true,
},
{
name: "OVA mapper creation",
createMapper: func() mapper.StorageMapper { return ovaMapper.NewOVAStorageMapper() },
expectedNotNil: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mapper := tt.createMapper()
if tt.expectedNotNil && mapper == nil {
t.Errorf("Expected non-nil mapper, got nil")
}
if !tt.expectedNotNil && mapper != nil {
t.Errorf("Expected nil mapper, got non-nil")
}
})
}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/fetchers/ec2/fetcher.go | Go | package ec2
import (
"context"
"fmt"
"sort"
"strings"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/fetchers"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// EC2StorageFetcher implements storage fetching for EC2 providers
type EC2StorageFetcher struct{}
// NewEC2StorageFetcher creates a new EC2 storage fetcher
func NewEC2StorageFetcher() fetchers.StorageFetcher {
return &EC2StorageFetcher{}
}
// FetchSourceStorages fetches EBS volume types from EC2 provider
func (f *EC2StorageFetcher) FetchSourceStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error) {
klog.V(4).Infof("DEBUG: EC2 - Fetching source storage types from provider: %s", providerName)
// Get provider
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get EC2 provider: %v", err)
}
// Fetch EC2 storage types (EBS volume types)
storageInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "storages?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch EC2 storage inventory: %v", err)
}
// Extract objects from EC2 envelope
storageInventory = inventory.ExtractEC2Objects(storageInventory)
storageArray, ok := storageInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for storage inventory")
}
// Extract unique EBS volume types using a set for deduplication
var sourceStorages []ref.Ref
volumeTypeSet := make(map[string]struct{})
for _, item := range storageArray {
storage, ok := item.(map[string]interface{})
if !ok {
continue
}
// Get the EC2 volume type (e.g., "gp3", "io2", "st1")
// Normalize to lowercase to handle inventory variations (GP3, Gp3, gp3)
volumeType, ok := storage["type"].(string)
if ok && volumeType != "" {
volumeType = strings.ToLower(volumeType)
volumeTypeSet[volumeType] = struct{}{}
}
}
// Convert set to slice
volumeTypes := make([]string, 0, len(volumeTypeSet))
for vt := range volumeTypeSet {
volumeTypes = append(volumeTypes, vt)
}
// Sort volume types for consistent ordering (prioritize SSD types)
sort.Slice(volumeTypes, func(i, j int) bool {
// Priority order: gp3, gp2, io2, io1, st1, sc1, standard
priority := map[string]int{
"gp3": 1, "gp2": 2, "io2": 3, "io1": 4,
"st1": 5, "sc1": 6, "standard": 7,
}
pi, oki := priority[volumeTypes[i]]
pj, okj := priority[volumeTypes[j]]
if !oki {
pi = 99
}
if !okj {
pj = 99
}
return pi < pj
})
// Create refs for each volume type
for _, volumeType := range volumeTypes {
sourceStorages = append(sourceStorages, ref.Ref{
Name: volumeType,
})
}
klog.V(4).Infof("DEBUG: EC2 - Found %d source storage types: %v", len(sourceStorages), volumeTypes)
return sourceStorages, nil
}
// FetchTargetStorages fetches target storage from EC2 provider (not typically used as EC2 is usually source)
func (f *EC2StorageFetcher) FetchTargetStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationStorage, error) {
klog.V(4).Infof("DEBUG: EC2 - Fetching target storage (EC2 is typically not a migration target)")
// EC2 is typically not used as a migration target, but we implement the interface for completeness
return []forkliftv1beta1.DestinationStorage{}, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/fetchers/hyperv/fetcher.go | Go | package hyperv
import (
"context"
"fmt"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
)
// HyperVStorageFetcher implements storage fetching for HyperV providers
type HyperVStorageFetcher struct{}
// NewHyperVStorageFetcher creates a new HyperV storage fetcher
func NewHyperVStorageFetcher() *HyperVStorageFetcher {
return &HyperVStorageFetcher{}
}
// FetchSourceStorages extracts storage references from HyperV provider.
// HyperV uses a single SMB share for all VM storage, so the storages endpoint
// typically returns one entry. We return all storages from the API.
func (f *HyperVStorageFetcher) FetchSourceStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, _ []string, insecureSkipTLS bool) ([]ref.Ref, error) {
klog.V(4).Infof("HyperV storage fetcher - extracting source storages for provider: %s", providerName)
// Get the provider object
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get source provider: %w", err)
}
// Fetch storage inventory - HyperV typically has a single SMB share
storageInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "storages?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch storage inventory: %w", err)
}
storageArray, ok := storageInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for storage inventory")
}
klog.V(4).Infof("HyperV storage fetcher - found %d storage entries", len(storageArray))
// Build source storages list from all available storages
var sourceStorages []ref.Ref
for _, item := range storageArray {
if storage, ok := item.(map[string]interface{}); ok {
storageRef := ref.Ref{}
if storageID, ok := storage["id"].(string); ok {
storageRef.ID = storageID
}
if name, ok := storage["name"].(string); ok {
storageRef.Name = name
}
if storageRef.ID != "" {
sourceStorages = append(sourceStorages, storageRef)
klog.V(4).Infof(" Storage: %s (ID: %s)", storageRef.Name, storageRef.ID)
}
}
}
klog.V(4).Infof("HyperV storage fetcher - returning %d source storages", len(sourceStorages))
return sourceStorages, nil
}
// FetchTargetStorages is not supported for HyperV as target - only OpenShift is supported as target
func (f *HyperVStorageFetcher) FetchTargetStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationStorage, error) {
klog.V(4).Infof("HyperV provider does not support target storage fetching - only OpenShift is supported as target")
return nil, fmt.Errorf("HyperV provider does not support target storage fetching - only OpenShift is supported as migration target")
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/fetchers/interfaces.go | Go | package fetchers
import (
"context"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
// SourceStorageFetcher interface for extracting storage information from source VMs
type SourceStorageFetcher interface {
// FetchSourceStorages extracts storage references from VMs to be migrated
FetchSourceStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error)
}
// TargetStorageFetcher interface for extracting available target storage
type TargetStorageFetcher interface {
// FetchTargetStorages extracts available destination storage from target provider
FetchTargetStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationStorage, error)
}
// StorageFetcher combines both source and target fetching for providers that can act as both
type StorageFetcher interface {
SourceStorageFetcher
TargetStorageFetcher
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/fetchers/openshift/fetcher.go | Go | package openshift
import "context"
import (
"fmt"
"strings"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/query"
)
// OpenShiftStorageFetcher implements storage fetching for OpenShift providers
type OpenShiftStorageFetcher struct{}
// NewOpenShiftStorageFetcher creates a new OpenShift storage fetcher
func NewOpenShiftStorageFetcher() *OpenShiftStorageFetcher {
return &OpenShiftStorageFetcher{}
}
// FetchSourceStorages extracts storage references from OpenShift VMs
func (f *OpenShiftStorageFetcher) FetchSourceStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error) {
klog.V(4).Infof("OpenShift storage fetcher - extracting source storages for provider: %s", providerName)
// Get the provider object
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get source provider: %v", err)
}
// Fetch storage inventory (StorageClasses in OpenShift) first to create ID-to-storage mapping
storageInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "storageclasses?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch storage inventory: %v", err)
}
storageArray, ok := storageInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for storage inventory")
}
// Create ID-to-storage and name-to-ID mappings for StorageClasses
storageIDToStorage := make(map[string]map[string]interface{})
storageNameToID := make(map[string]string)
for _, item := range storageArray {
if storage, ok := item.(map[string]interface{}); ok {
if storageID, ok := storage["id"].(string); ok {
storageIDToStorage[storageID] = storage
if storageName, ok := storage["name"].(string); ok {
storageNameToID[storageName] = storageID
}
}
}
}
klog.V(4).Infof("Available StorageClass mappings:")
for id, storageItem := range storageIDToStorage {
if name, ok := storageItem["name"].(string); ok {
klog.V(4).Infof(" %s -> %s", id, name)
}
}
// Fetch VMs inventory to get storage references from VMs
vmsInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "vms?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch VMs inventory: %v", err)
}
vmsArray, ok := vmsInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for VMs inventory")
}
// Extract storage IDs used by the plan VMs
storageIDSet := make(map[string]bool)
planVMSet := make(map[string]bool)
for _, vmName := range planVMNames {
planVMSet[vmName] = true
}
for _, item := range vmsArray {
vm, ok := item.(map[string]interface{})
if !ok {
continue
}
vmName, ok := vm["name"].(string)
if !ok || !planVMSet[vmName] {
continue
}
klog.V(4).Infof("Processing VM: %s", vmName)
// Extract storage references from VM spec (OpenShift VMs use dataVolumeTemplates and volumes)
dataVolumeTemplates, err := query.GetValueByPathString(vm, "object.spec.dataVolumeTemplates")
if err == nil && dataVolumeTemplates != nil {
if dvtArray, ok := dataVolumeTemplates.([]interface{}); ok {
klog.V(4).Infof("VM %s has %d dataVolumeTemplates", vmName, len(dvtArray))
for _, dvtItem := range dvtArray {
if dvtMap, ok := dvtItem.(map[string]interface{}); ok {
// Look for storageClassName in spec.storageClassName
storageClassName, err := query.GetValueByPathString(dvtMap, "spec.storageClassName")
if err == nil && storageClassName != nil {
if scName, ok := storageClassName.(string); ok {
klog.V(4).Infof("Found explicit storageClassName: %s", scName)
if storageID, exists := storageNameToID[scName]; exists {
storageIDSet[storageID] = true
}
}
} else {
// No explicit storageClassName - check if this dataVolumeTemplate has storage requirements
// This indicates it uses the default storage class
storage, err := query.GetValueByPathString(dvtMap, "spec.storage")
if err == nil && storage != nil {
klog.V(4).Infof("Found dataVolumeTemplate with storage but no explicit storageClassName - using default storage class")
// Find the default storage class
for storageID, storageItem := range storageIDToStorage {
isDefaultValue, err := query.GetValueByPathString(storageItem, "object.metadata.annotations.storageclass.kubernetes.io/is-default-class")
if err == nil && isDefaultValue != nil {
if isDefault, ok := isDefaultValue.(string); ok && isDefault == "true" {
klog.V(4).Infof("Using default StorageClass for VM %s: %s", vmName, storageID)
storageIDSet[storageID] = true
break
}
}
}
}
}
}
}
}
}
volumes, err := query.GetValueByPathString(vm, "object.spec.template.spec.volumes")
if err == nil && volumes != nil {
if volumesArray, ok := volumes.([]interface{}); ok {
klog.V(4).Infof("VM %s has %d volumes", vmName, len(volumesArray))
for _, volumeItem := range volumesArray {
if volumeMap, ok := volumeItem.(map[string]interface{}); ok {
// Check if this volume references a DataVolume (which may have storage class info)
dataVolume, err := query.GetValueByPathString(volumeMap, "dataVolume")
if err == nil && dataVolume != nil {
klog.V(4).Infof("Found volume with dataVolume reference in VM %s", vmName)
// The actual storage class info is in the dataVolumeTemplates we already processed
} else {
klog.V(4).Infof("Found volume in VM %s", vmName)
}
}
}
}
}
}
klog.V(4).Infof("Final storageIDSet: %v", storageIDSet)
// If no storages found from VMs, still try to find a default storage class
// This handles cases where VMs exist but don't have explicit storage references
if len(storageIDSet) == 0 {
klog.V(4).Infof("No explicit storage found from VMs, looking for default storage class")
for storageID, storageItem := range storageIDToStorage {
isDefaultValue, err := query.GetValueByPathString(storageItem, "object.metadata.annotations.storageclass.kubernetes.io/is-default-class")
if err == nil && isDefaultValue != nil {
if isDefault, ok := isDefaultValue.(string); ok && isDefault == "true" {
klog.V(4).Infof("Found and using default StorageClass: %s", storageID)
storageIDSet[storageID] = true
break
}
}
}
}
// If still no storages found, return empty list
if len(storageIDSet) == 0 {
klog.V(4).Infof("No storages found from VMs")
return []ref.Ref{}, nil
}
// Build source storages list using the collected IDs
var sourceStorages []ref.Ref
for storageID := range storageIDSet {
if storageItem, exists := storageIDToStorage[storageID]; exists {
sourceStorage := ref.Ref{
ID: storageID,
}
if name, ok := storageItem["name"].(string); ok {
sourceStorage.Name = name
}
sourceStorages = append(sourceStorages, sourceStorage)
}
}
klog.V(4).Infof("OpenShift storage fetcher - found %d source storages", len(sourceStorages))
return sourceStorages, nil
}
// FetchTargetStorages extracts available destination storages from target provider
func (f *OpenShiftStorageFetcher) FetchTargetStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationStorage, error) {
klog.V(4).Infof("OpenShift storage fetcher - extracting target storages for provider: %s", providerName)
// Get the target provider
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get target provider: %v", err)
}
// For OpenShift targets, always fetch StorageClasses
klog.V(4).Infof("Fetching StorageClasses for OpenShift target")
storageInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "storageclasses?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch target storage inventory: %v", err)
}
storageArray, ok := storageInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for target storage inventory")
}
// Parse all storage classes and find the best one using priority logic
var virtAnnotationStorage, k8sAnnotationStorage, virtualizationNameStorage, firstStorage map[string]interface{}
for _, item := range storageArray {
storageItem, ok := item.(map[string]interface{})
if !ok {
continue
}
// Set first storage if not already set
if firstStorage == nil {
firstStorage = storageItem
}
storageName := ""
if name, ok := storageItem["name"].(string); ok {
storageName = name
}
// Check for storage class name containing "virtualization"
if virtualizationNameStorage == nil && strings.Contains(strings.ToLower(storageName), "virtualization") {
klog.V(4).Infof("Found storage class with 'virtualization' in name: %s", storageName)
virtualizationNameStorage = storageItem
}
// Check for annotations in the object.metadata.annotations
if object, ok := storageItem["object"].(map[string]interface{}); ok {
if metadata, ok := object["metadata"].(map[string]interface{}); ok {
if annotations, ok := metadata["annotations"].(map[string]interface{}); ok {
// Check for virt default annotation
if virtAnnotationStorage == nil {
if virtDefault, ok := annotations["storageclass.kubevirt.io/is-default-virt-class"].(string); ok && virtDefault == "true" {
klog.V(4).Infof("Found storage class with virt default annotation: %s", storageName)
virtAnnotationStorage = storageItem
}
}
// Check for k8s default annotation
if k8sAnnotationStorage == nil {
if k8sDefault, ok := annotations["storageclass.kubernetes.io/is-default-class"].(string); ok && k8sDefault == "true" {
klog.V(4).Infof("Found storage class with k8s default annotation: %s", storageName)
k8sAnnotationStorage = storageItem
}
}
}
}
}
}
// Priority: virt annotation > k8s annotation (only if no virt found) > name with "virtualization" > first available
var selectedStorage map[string]interface{}
var selectionReason string
if virtAnnotationStorage != nil {
selectedStorage = virtAnnotationStorage
selectionReason = "virt default annotation"
} else if k8sAnnotationStorage != nil {
selectedStorage = k8sAnnotationStorage
selectionReason = "k8s default annotation"
} else if virtualizationNameStorage != nil {
selectedStorage = virtualizationNameStorage
selectionReason = "name contains 'virtualization'"
} else if firstStorage != nil {
selectedStorage = firstStorage
selectionReason = "first available"
} else {
return nil, fmt.Errorf("no storage classes found")
}
storageName := ""
if name, ok := selectedStorage["name"].(string); ok {
storageName = name
}
klog.V(4).Infof("Selected storage class '%s' based on: %s", storageName, selectionReason)
// Return only the selected storage class
targetStorages := []forkliftv1beta1.DestinationStorage{
{
StorageClass: storageName,
},
}
klog.V(4).Infof("Returning single target storage: %s", storageName)
return targetStorages, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/fetchers/openstack/fetcher.go | Go | package openstack
import "context"
import (
"fmt"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/query"
)
// OpenStackStorageFetcher implements storage fetching for OpenStack providers
type OpenStackStorageFetcher struct{}
// NewOpenStackStorageFetcher creates a new OpenStack storage fetcher
func NewOpenStackStorageFetcher() *OpenStackStorageFetcher {
return &OpenStackStorageFetcher{}
}
// FetchSourceStorages extracts storage references from OpenStack VMs
func (f *OpenStackStorageFetcher) FetchSourceStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error) {
klog.V(4).Infof("OpenStack storage fetcher - extracting source storages for provider: %s", providerName)
// Get the provider object
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get source provider: %v", err)
}
// Fetch volume types inventory first to create ID-to-volumeType mapping
volumeTypesInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "volumetypes?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch volume types inventory: %v", err)
}
volumeTypesArray, ok := volumeTypesInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for volume types inventory")
}
volumeTypeIDToVolumeType := make(map[string]map[string]interface{})
volumeTypeNameToID := make(map[string]string)
for _, item := range volumeTypesArray {
if volumeType, ok := item.(map[string]interface{}); ok {
if volumeTypeID, ok := volumeType["id"].(string); ok {
volumeTypeIDToVolumeType[volumeTypeID] = volumeType
// Also create name-to-ID mapping for converting volume type names to IDs
if volumeTypeName, ok := volumeType["name"].(string); ok {
volumeTypeNameToID[volumeTypeName] = volumeTypeID
}
}
}
}
klog.V(4).Infof("DEBUG: Available volume type mappings:")
for id, volumeType := range volumeTypeIDToVolumeType {
if name, ok := volumeType["name"].(string); ok {
klog.V(4).Infof(" %s -> %s", id, name)
}
}
// Fetch VMs inventory to get volume IDs from VMs
vmsInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "vms?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch VMs inventory: %v", err)
}
vmsArray, ok := vmsInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for VMs inventory")
}
volumeIDSet := make(map[string]bool)
planVMSet := make(map[string]bool)
for _, vmName := range planVMNames {
planVMSet[vmName] = true
}
for _, item := range vmsArray {
vm, ok := item.(map[string]interface{})
if !ok {
continue
}
vmName, ok := vm["name"].(string)
if !ok || !planVMSet[vmName] {
continue
}
klog.V(4).Infof("Processing VM: %s", vmName)
volumeIDs, err := query.GetValueByPathString(vm, "attachedVolumes[*].ID")
if err != nil || volumeIDs == nil {
klog.V(4).Infof("VM %s has no attached volumes or failed to extract: err=%v", vmName, err)
continue
}
if ids, ok := volumeIDs.([]interface{}); ok {
for _, idItem := range ids {
if volumeID, ok := idItem.(string); ok {
klog.V(4).Infof("Found volume ID: %s", volumeID)
volumeIDSet[volumeID] = true
}
}
}
}
volumesInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "volumes?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch volumes inventory: %v", err)
}
volumesArray, ok := volumesInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for volumes inventory")
}
volumeTypeIDSet := make(map[string]bool)
for _, item := range volumesArray {
volumeItem, ok := item.(map[string]interface{})
if !ok {
continue
}
volumeID, ok := volumeItem["id"].(string)
if !ok {
continue
}
if !volumeIDSet[volumeID] {
continue
}
klog.V(4).Infof("Processing volume: %s", volumeID)
volumeType, err := query.GetValueByPathString(volumeItem, "volumeType")
if err == nil && volumeType != nil {
if vtNameOrID, ok := volumeType.(string); ok {
klog.V(4).Infof("Volume %s has volume type: %s", volumeID, vtNameOrID)
if _, exists := volumeTypeIDToVolumeType[vtNameOrID]; exists {
volumeTypeIDSet[vtNameOrID] = true
klog.V(4).Infof("Volume type is already an ID: %s", vtNameOrID)
} else {
if volumeTypeID, exists := volumeTypeNameToID[vtNameOrID]; exists {
volumeTypeIDSet[volumeTypeID] = true
klog.V(4).Infof("Converted volume type name %s to ID: %s", vtNameOrID, volumeTypeID)
} else {
klog.V(4).Infof("No volume type ID found for name: %s", vtNameOrID)
}
}
}
} else {
klog.V(4).Infof("Volume %s has no volume type or failed to extract: err=%v", volumeID, err)
}
}
klog.V(4).Infof("DEBUG: Final volumeTypeIDSet: %v", volumeTypeIDSet)
// If no volume types found from VMs, return empty list
if len(volumeTypeIDSet) == 0 {
klog.V(4).Infof("No volume types found from VMs - VMs have incomplete data")
return []ref.Ref{}, nil
}
var sourceStorages []ref.Ref
for volumeTypeID := range volumeTypeIDSet {
if volumeTypeItem, exists := volumeTypeIDToVolumeType[volumeTypeID]; exists {
sourceStorage := ref.Ref{
ID: volumeTypeID,
}
if name, ok := volumeTypeItem["name"].(string); ok {
sourceStorage.Name = name
}
sourceStorages = append(sourceStorages, sourceStorage)
}
}
klog.V(4).Infof("OpenStack storage fetcher - found %d source storages", len(sourceStorages))
return sourceStorages, nil
}
// FetchTargetStorages is not supported for OpenStack as target - only OpenShift is supported as target
func (f *OpenStackStorageFetcher) FetchTargetStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationStorage, error) {
klog.V(4).Infof("OpenStack provider does not support target storage fetching - only OpenShift is supported as target")
return nil, fmt.Errorf("OpenStack provider does not support target storage fetching - only OpenShift is supported as migration target")
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/fetchers/ova/fetcher.go | Go | package ova
import (
"context"
"fmt"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/query"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
)
// OVAStorageFetcher implements storage fetching for OVA providers
type OVAStorageFetcher struct{}
// NewOVAStorageFetcher creates a new OVA storage fetcher
func NewOVAStorageFetcher() *OVAStorageFetcher {
return &OVAStorageFetcher{}
}
// FetchSourceStorages extracts storage references from OVA VMs
func (f *OVAStorageFetcher) FetchSourceStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error) {
klog.V(4).Infof("OVA storage fetcher - extracting source storages for provider: %s", providerName)
// Get the provider object
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get source provider: %v", err)
}
// Fetch storage inventory first to create ID-to-storage mapping
storageInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "storages?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch storage inventory: %v", err)
}
storageArray, ok := storageInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for storage inventory")
}
// Create ID-to-storage mapping
storageIDToStorage := make(map[string]map[string]interface{})
for _, item := range storageArray {
if storage, ok := item.(map[string]interface{}); ok {
if storageID, ok := storage["id"].(string); ok {
storageIDToStorage[storageID] = storage
}
}
}
klog.V(4).Infof("Available storage mappings:")
for id, storageItem := range storageIDToStorage {
if name, ok := storageItem["name"].(string); ok {
klog.V(4).Infof(" %s -> %s", id, name)
}
}
// Fetch VMs inventory to get storage references from VMs
vmsInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "vms?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch VMs inventory: %v", err)
}
vmsArray, ok := vmsInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for VMs inventory")
}
// Extract storage IDs used by the plan VMs
storageIDSet := make(map[string]bool)
planVMSet := make(map[string]bool)
for _, vmName := range planVMNames {
planVMSet[vmName] = true
}
for _, item := range vmsArray {
vm, ok := item.(map[string]interface{})
if !ok {
continue
}
vmName, ok := vm["name"].(string)
if !ok || !planVMSet[vmName] {
continue
}
klog.V(4).Infof("Processing VM: %s", vmName)
// Extract storage IDs from VM disks (OVA VMs have direct disks array with ID field)
disks, err := query.GetValueByPathString(vm, "disks")
if err == nil && disks != nil {
if disksArray, ok := disks.([]interface{}); ok {
klog.V(4).Infof("VM %s has %d disks", vmName, len(disksArray))
for _, diskItem := range disksArray {
if diskMap, ok := diskItem.(map[string]interface{}); ok {
// OVA uses capital "ID" field
if storageID, ok := diskMap["ID"].(string); ok {
klog.V(4).Infof("Found storage ID: %s", storageID)
storageIDSet[storageID] = true
}
}
}
}
} else {
klog.V(4).Infof("VM %s has no disks or failed to extract: err=%v", vmName, err)
}
}
klog.V(4).Infof("Final storageIDSet: %v", storageIDSet)
// If no storages found from VMs, return empty list
if len(storageIDSet) == 0 {
klog.V(4).Infof("No storages found from VMs")
return []ref.Ref{}, nil
}
// Build source storages list using the collected IDs
var sourceStorages []ref.Ref
for storageID := range storageIDSet {
if storageItem, exists := storageIDToStorage[storageID]; exists {
sourceStorage := ref.Ref{
ID: storageID,
}
if name, ok := storageItem["name"].(string); ok {
sourceStorage.Name = name
}
sourceStorages = append(sourceStorages, sourceStorage)
}
}
klog.V(4).Infof("OVA storage fetcher - found %d source storages", len(sourceStorages))
return sourceStorages, nil
}
// FetchTargetStorages is not supported for OVA as target - only OpenShift is supported as target
func (f *OVAStorageFetcher) FetchTargetStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationStorage, error) {
klog.V(4).Infof("OVA provider does not support target storage fetching - only OpenShift is supported as target")
return nil, fmt.Errorf("OVA provider does not support target storage fetching - only OpenShift is supported as migration target")
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/fetchers/ovirt/fetcher.go | Go | package ovirt
import "context"
import (
"fmt"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// OvirtStorageFetcher implements storage fetching for oVirt providers
type OvirtStorageFetcher struct{}
// NewOvirtStorageFetcher creates a new oVirt storage fetcher
func NewOvirtStorageFetcher() *OvirtStorageFetcher {
return &OvirtStorageFetcher{}
}
// FetchSourceStorages extracts storage references from oVirt VMs
func (f *OvirtStorageFetcher) FetchSourceStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error) {
klog.V(4).Infof("oVirt storage fetcher - extracting source storages for provider: %s", providerName)
// Get the provider object
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get source provider: %v", err)
}
// Fetch storage domains inventory first to create ID-to-storage mapping
storageDomainsInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "storagedomains?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch storage domains inventory: %v", err)
}
storageDomainsArray, ok := storageDomainsInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for storage domains inventory")
}
// Create ID-to-storage domain mapping
storageDomainIDToStorageDomain := make(map[string]map[string]interface{})
for _, item := range storageDomainsArray {
if storageDomain, ok := item.(map[string]interface{}); ok {
if storageDomainID, ok := storageDomain["id"].(string); ok {
storageDomainIDToStorageDomain[storageDomainID] = storageDomain
}
}
}
klog.V(4).Infof("Available storage domain mappings:")
for id, storageDomainItem := range storageDomainIDToStorageDomain {
if name, ok := storageDomainItem["name"].(string); ok {
klog.V(4).Infof(" %s -> %s", id, name)
}
}
// Fetch VMs inventory to get disk references from VMs
vmsInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "vms?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch VMs inventory: %v", err)
}
vmsArray, ok := vmsInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for VMs inventory")
}
// Extract disk IDs used by the plan VMs
diskIDSet := make(map[string]bool)
planVMSet := make(map[string]bool)
for _, vmName := range planVMNames {
planVMSet[vmName] = true
}
for _, vmItem := range vmsArray {
if vm, ok := vmItem.(map[string]interface{}); ok {
if vmName, ok := vm["name"].(string); ok && planVMSet[vmName] {
klog.V(4).Infof("Processing VM: %s", vmName)
// Extract disk IDs from VM diskAttachments
if diskAttachments, ok := vm["diskAttachments"].([]interface{}); ok {
for _, diskAttachmentItem := range diskAttachments {
if diskAttachment, ok := diskAttachmentItem.(map[string]interface{}); ok {
if diskID, ok := diskAttachment["disk"].(string); ok {
klog.V(4).Infof("Found disk ID: %s", diskID)
diskIDSet[diskID] = true
}
}
}
}
}
}
}
// Fetch disk details to get storage domain information
storageDomainIDSet := make(map[string]bool)
// Try to fetch disks inventory to get storage domain mappings
disksInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "disks?detail=4", insecureSkipTLS)
if err != nil {
// If disks endpoint doesn't work, try disk profiles as fallback
klog.V(4).Infof("Disks endpoint failed, trying disk profiles: %v", err)
// Fetch disk profiles to map disks to storage domains
diskProfilesInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "diskprofiles?detail=4", insecureSkipTLS)
if err != nil {
klog.V(4).Infof("Warning: Could not fetch disk profiles either: %v", err)
// Return all storage domains as fallback
for storageDomainID := range storageDomainIDToStorageDomain {
storageDomainIDSet[storageDomainID] = true
}
} else {
diskProfilesArray, ok := diskProfilesInventory.([]interface{})
if ok {
// Use all storage domains from disk profiles as fallback
for _, item := range diskProfilesArray {
if profile, ok := item.(map[string]interface{}); ok {
if storageDomainID, ok := profile["storageDomain"].(string); ok {
storageDomainIDSet[storageDomainID] = true
}
}
}
}
}
} else {
disksArray, ok := disksInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for disks inventory")
}
// Map disk IDs to storage domains
for _, diskItem := range disksArray {
if disk, ok := diskItem.(map[string]interface{}); ok {
if diskID, ok := disk["id"].(string); ok {
// Check if this disk is used by our VMs
if diskIDSet[diskID] {
// Extract storage domain from disk
if storageDomainID, ok := disk["storageDomain"].(string); ok {
klog.V(4).Infof("Disk %s uses storage domain: %s", diskID, storageDomainID)
storageDomainIDSet[storageDomainID] = true
}
}
}
}
}
}
// Create source storage references for the storage domains used by VMs
var sourceStorages []ref.Ref
for storageDomainID := range storageDomainIDSet {
sourceStorages = append(sourceStorages, ref.Ref{
ID: storageDomainID,
})
}
klog.V(4).Infof("oVirt storage fetcher - found %d source storages", len(sourceStorages))
return sourceStorages, nil
}
// FetchTargetStorages is not supported for oVirt as target - only OpenShift is supported as target
func (f *OvirtStorageFetcher) FetchTargetStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationStorage, error) {
klog.V(4).Infof("oVirt provider does not support target storage fetching - only OpenShift is supported as target")
return nil, fmt.Errorf("oVirt provider does not support target storage fetching - only OpenShift is supported as migration target")
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/fetchers/vsphere/fetcher.go | Go | package vsphere
import "context"
import (
"fmt"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/query"
)
// VSphereStorageFetcher implements storage fetching for VSphere providers
type VSphereStorageFetcher struct{}
// NewVSphereStorageFetcher creates a new VSphere storage fetcher
func NewVSphereStorageFetcher() *VSphereStorageFetcher {
return &VSphereStorageFetcher{}
}
// FetchSourceStorages extracts storage references from VSphere VMs
func (f *VSphereStorageFetcher) FetchSourceStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, planVMNames []string, insecureSkipTLS bool) ([]ref.Ref, error) {
klog.V(4).Infof("VSphere storage fetcher - extracting source storages for provider: %s", providerName)
// Get the provider object
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get source provider: %v", err)
}
// Fetch datastores inventory first to create ID-to-datastore mapping
datastoresInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "datastores?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch datastores inventory: %v", err)
}
datastoresArray, ok := datastoresInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for datastores inventory")
}
// Create ID-to-datastore mapping
datastoreIDToDatastore := make(map[string]map[string]interface{})
for _, item := range datastoresArray {
if datastore, ok := item.(map[string]interface{}); ok {
if datastoreID, ok := datastore["id"].(string); ok {
datastoreIDToDatastore[datastoreID] = datastore
}
}
}
klog.V(4).Infof("Available datastore mappings:")
for id, datastoreItem := range datastoreIDToDatastore {
if name, ok := datastoreItem["name"].(string); ok {
klog.V(4).Infof(" %s -> %s", id, name)
}
}
// Fetch VMs inventory to get datastore references from VMs
vmsInventory, err := client.FetchProviderInventoryWithInsecure(ctx, configFlags, inventoryURL, provider, "vms?detail=4", insecureSkipTLS)
if err != nil {
return nil, fmt.Errorf("failed to fetch VMs inventory: %v", err)
}
vmsArray, ok := vmsInventory.([]interface{})
if !ok {
return nil, fmt.Errorf("unexpected data format: expected array for VMs inventory")
}
// Extract datastore IDs used by the plan VMs
datastoreIDSet := make(map[string]bool)
planVMSet := make(map[string]bool)
for _, vmName := range planVMNames {
planVMSet[vmName] = true
}
for _, item := range vmsArray {
vm, ok := item.(map[string]interface{})
if !ok {
continue
}
vmName, ok := vm["name"].(string)
if !ok || !planVMSet[vmName] {
continue
}
klog.V(4).Infof("Processing VM: %s", vmName)
// Extract datastore IDs from VM disks (VSphere VMs have direct disks array)
disks, err := query.GetValueByPathString(vm, "disks")
if err == nil && disks != nil {
if disksArray, ok := disks.([]interface{}); ok {
klog.V(4).Infof("VM %s has %d disks", vmName, len(disksArray))
for _, diskItem := range disksArray {
if diskMap, ok := diskItem.(map[string]interface{}); ok {
datastoreID, err := query.GetValueByPathString(diskMap, "datastore.id")
if err == nil && datastoreID != nil {
if dsID, ok := datastoreID.(string); ok {
klog.V(4).Infof("Found datastore ID: %s", dsID)
datastoreIDSet[dsID] = true
}
}
}
}
}
} else {
klog.V(4).Infof("VM %s has no disks or failed to extract: err=%v", vmName, err)
}
}
klog.V(4).Infof("Final datastoreIDSet: %v", datastoreIDSet)
// If no datastores found from VMs, return empty list
if len(datastoreIDSet) == 0 {
klog.V(4).Infof("No datastores found from VMs")
return []ref.Ref{}, nil
}
// Build source storages list using the collected IDs
var sourceStorages []ref.Ref
for datastoreID := range datastoreIDSet {
if datastoreItem, exists := datastoreIDToDatastore[datastoreID]; exists {
sourceStorage := ref.Ref{
ID: datastoreID,
}
if name, ok := datastoreItem["name"].(string); ok {
sourceStorage.Name = name
}
sourceStorages = append(sourceStorages, sourceStorage)
}
}
klog.V(4).Infof("VSphere storage fetcher - found %d source storages", len(sourceStorages))
return sourceStorages, nil
}
// FetchTargetStorages is not supported for VSphere as target - only OpenShift is supported as target
func (f *VSphereStorageFetcher) FetchTargetStorages(ctx context.Context, configFlags *genericclioptions.ConfigFlags, providerName, namespace, inventoryURL string, insecureSkipTLS bool) ([]forkliftv1beta1.DestinationStorage, error) {
klog.V(4).Infof("VSphere provider does not support target storage fetching - only OpenShift is supported as target")
return nil, fmt.Errorf("VSphere provider does not support target storage fetching - only OpenShift is supported as migration target")
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/mapper/ec2/mapper.go | Go | package ec2
import (
"strings"
corev1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper"
)
// EC2StorageMapper implements storage mapping for EC2 providers
type EC2StorageMapper struct{}
// NewEC2StorageMapper creates a new EC2 storage mapper
func NewEC2StorageMapper() mapper.StorageMapper {
return &EC2StorageMapper{}
}
// EC2VolumeTypeConfig defines configuration for each EC2 EBS volume type
type EC2VolumeTypeConfig struct {
VolumeMode corev1.PersistentVolumeMode
AccessMode corev1.PersistentVolumeAccessMode
}
// getVolumeTypeConfig returns the volume mode and access mode for an EC2 volume type
func getVolumeTypeConfig(ec2Type string) EC2VolumeTypeConfig {
// SSD types use Block mode, HDD types use Filesystem mode
switch ec2Type {
case "gp3", "gp2", "io1", "io2":
// General Purpose and Provisioned IOPS SSDs - use Block mode for best performance
return EC2VolumeTypeConfig{
VolumeMode: corev1.PersistentVolumeBlock,
AccessMode: corev1.ReadWriteOnce,
}
case "st1", "sc1", "standard":
// Throughput Optimized HDD, Cold HDD, Magnetic - use Filesystem mode
return EC2VolumeTypeConfig{
VolumeMode: corev1.PersistentVolumeFilesystem,
AccessMode: corev1.ReadWriteOnce,
}
default:
// Default to Block mode for unknown types
return EC2VolumeTypeConfig{
VolumeMode: corev1.PersistentVolumeBlock,
AccessMode: corev1.ReadWriteOnce,
}
}
}
// findMatchingEBSStorageClass finds the best matching EBS storage class for an EC2 volume type
// Strategy:
// 1. Filter to only EBS storage classes (kubernetes.io/aws-ebs or ebs.csi.aws.com)
// 2. Try to find exact name match (gp3 → gp3 or gp3-csi)
// 3. If not found, use the default EBS storage class
// 4. If no default, use the first EBS storage class found
func findMatchingEBSStorageClass(ec2Type string, targetStorages []forkliftv1beta1.DestinationStorage) string {
if len(targetStorages) == 0 {
return ""
}
// Separate into EBS and non-EBS storage classes
var ebsStorageClasses []forkliftv1beta1.DestinationStorage
var defaultEBSClass string
for _, storage := range targetStorages {
// Note: We don't have access to the provisioner here in the current interface
// We'll use a simple name-based heuristic for EBS detection.
// This may have false positives for non-EBS classes with similar naming.
scName := strings.ToLower(storage.StorageClass)
// Check if this looks like an EBS storage class
// Look for typical EBS type patterns (gp2, gp3, io1, io2, st1, sc1) or "ebs" keyword
isEBS := strings.Contains(scName, "ebs") ||
strings.Contains(scName, "gp2") || strings.Contains(scName, "gp3") ||
strings.Contains(scName, "io1") || strings.Contains(scName, "io2") ||
strings.Contains(scName, "st1") || strings.Contains(scName, "sc1")
if isEBS {
ebsStorageClasses = append(ebsStorageClasses, storage)
// Check if this is marked as default (we can't check annotations here, but name might have "default")
if strings.Contains(scName, "default") {
defaultEBSClass = storage.StorageClass
}
}
}
if len(ebsStorageClasses) == 0 {
klog.V(4).Infof("DEBUG: EC2 storage mapper - No EBS storage classes found, using first available")
// Find first non-empty storage class name
for _, storage := range targetStorages {
if storage.StorageClass != "" {
return storage.StorageClass
}
}
// If all are empty, return the first one anyway
return targetStorages[0].StorageClass
}
// Try to find exact or close name match
ec2TypeLower := strings.ToLower(ec2Type)
for _, storage := range ebsStorageClasses {
scName := strings.ToLower(storage.StorageClass)
// Exact match: gp3 → gp3, io2 → io2
if scName == ec2TypeLower {
klog.V(4).Infof("DEBUG: EC2 storage mapper - Found exact match: %s → %s", ec2Type, storage.StorageClass)
return storage.StorageClass
}
// Close match with suffix: gp3 → gp3-csi, io2 → io2-csi
if strings.HasPrefix(scName, ec2TypeLower+"-") {
klog.V(4).Infof("DEBUG: EC2 storage mapper - Found close match: %s → %s", ec2Type, storage.StorageClass)
return storage.StorageClass
}
}
// No exact match found, use default EBS class if available
if defaultEBSClass != "" {
klog.V(4).Infof("DEBUG: EC2 storage mapper - Using default EBS class: %s → %s", ec2Type, defaultEBSClass)
return defaultEBSClass
}
// No default found, use the first EBS storage class
klog.V(4).Infof("DEBUG: EC2 storage mapper - Using first EBS class: %s → %s", ec2Type, ebsStorageClasses[0].StorageClass)
return ebsStorageClasses[0].StorageClass
}
// CreateStoragePairs creates storage mapping pairs for EC2 → OpenShift migrations
// Strategy:
// 1. For each EC2 volume type, find matching EBS storage class by name
// 2. If user specified default, use that for all types
// 3. Otherwise try to match EC2 type to OCP storage class name (gp3→gp3, gp2→gp2)
// 4. If no match, use default EBS storage class
// 5. Set appropriate volume mode (Block/Filesystem) and access mode based on volume type
func (m *EC2StorageMapper) CreateStoragePairs(sourceStorages []ref.Ref, targetStorages []forkliftv1beta1.DestinationStorage, opts mapper.StorageMappingOptions) ([]forkliftv1beta1.StoragePair, error) {
var storagePairs []forkliftv1beta1.StoragePair
// Validate target provider type - EC2 storage mapping expects OpenShift as target
if opts.TargetProviderType != "" && opts.TargetProviderType != "openshift" {
klog.V(2).Infof("WARNING: EC2 storage mapper - Target provider type is '%s', not 'openshift'. EC2→%s migrations may not work as expected.",
opts.TargetProviderType, opts.TargetProviderType)
}
klog.V(4).Infof("DEBUG: EC2 storage mapper - Creating storage pairs for %d source EBS types", len(sourceStorages))
if len(sourceStorages) == 0 {
klog.V(4).Infof("DEBUG: No source storages to map")
return storagePairs, nil
}
// If user specified a default storage class, use it for all types
useDefaultForAll := opts.DefaultTargetStorageClass != ""
for _, sourceStorage := range sourceStorages {
ec2VolumeType := sourceStorage.Name
// Get volume mode and access mode for this EC2 type
config := getVolumeTypeConfig(ec2VolumeType)
// Determine target storage class
var ocpStorageClass string
if useDefaultForAll {
// User specified default - use it for all types
ocpStorageClass = opts.DefaultTargetStorageClass
klog.V(4).Infof("DEBUG: EC2 storage mapper - Using user-defined storage class '%s' for %s",
ocpStorageClass, ec2VolumeType)
} else {
// Try to find matching EBS storage class
ocpStorageClass = findMatchingEBSStorageClass(ec2VolumeType, targetStorages)
if ocpStorageClass == "" {
klog.V(2).Infof("WARNING: EC2 storage mapper - No target storage class found for %s, skipping", ec2VolumeType)
continue
}
}
// Create storage pair
pair := forkliftv1beta1.StoragePair{
Source: sourceStorage,
Destination: forkliftv1beta1.DestinationStorage{
StorageClass: ocpStorageClass,
VolumeMode: config.VolumeMode,
AccessMode: config.AccessMode,
},
}
storagePairs = append(storagePairs, pair)
klog.V(4).Infof("DEBUG: EC2 storage mapper - Mapped %s → %s (mode: %s, access: %s)",
ec2VolumeType, ocpStorageClass, config.VolumeMode, config.AccessMode)
}
klog.V(4).Infof("DEBUG: EC2 storage mapper - Created %d storage pairs", len(storagePairs))
return storagePairs, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/mapper/hyperv/mapper.go | Go | package hyperv
import (
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper"
)
// HyperVStorageMapper implements storage mapping for HyperV providers
type HyperVStorageMapper struct{}
// NewHyperVStorageMapper creates a new HyperV storage mapper
func NewHyperVStorageMapper() mapper.StorageMapper {
return &HyperVStorageMapper{}
}
// CreateStoragePairs creates storage mapping pairs using generic logic (no same-name matching)
func (m *HyperVStorageMapper) CreateStoragePairs(sourceStorages []ref.Ref, targetStorages []forkliftv1beta1.DestinationStorage, opts mapper.StorageMappingOptions) ([]forkliftv1beta1.StoragePair, error) {
storagePairs := make([]forkliftv1beta1.StoragePair, 0, len(sourceStorages))
klog.V(4).Infof("DEBUG: HyperV storage mapper - Creating storage pairs - %d source storages, %d target storages", len(sourceStorages), len(targetStorages))
if len(sourceStorages) == 0 {
klog.V(4).Infof("DEBUG: No source storages to map")
return storagePairs, nil
}
// Use generic default behavior (all sources -> single default target)
defaultStorageClass := findDefaultStorageClass(targetStorages, opts)
klog.V(4).Infof("DEBUG: Selected default storage class: %s", defaultStorageClass.StorageClass)
// Map all source storages to the default storage class
for _, sourceStorage := range sourceStorages {
storagePairs = append(storagePairs, forkliftv1beta1.StoragePair{
Source: sourceStorage,
Destination: defaultStorageClass,
})
klog.V(4).Infof("DEBUG: Mapped source storage %s -> %s", sourceStorage.Name, defaultStorageClass.StorageClass)
}
klog.V(4).Infof("DEBUG: Created %d storage pairs", len(storagePairs))
return storagePairs, nil
}
// findDefaultStorageClass finds the default storage class using the original priority logic
func findDefaultStorageClass(targetStorages []forkliftv1beta1.DestinationStorage, opts mapper.StorageMappingOptions) forkliftv1beta1.DestinationStorage {
// Priority 1: If user explicitly specified a default storage class, use it
if opts.DefaultTargetStorageClass != "" {
defaultStorage := forkliftv1beta1.DestinationStorage{
StorageClass: opts.DefaultTargetStorageClass,
}
klog.V(4).Infof("DEBUG: Using user-defined default storage class: %s", opts.DefaultTargetStorageClass)
return defaultStorage
}
// Priority 2-5: Use the target storage selected by FetchTargetStorages
// (which implements: virt annotation -> k8s annotation -> name with "virtualization" -> first available)
if len(targetStorages) > 0 {
defaultStorage := targetStorages[0]
klog.V(4).Infof("DEBUG: Using auto-selected storage class: %s", defaultStorage.StorageClass)
return defaultStorage
}
// Priority 6: Fall back to empty storage class (system default)
klog.V(4).Infof("DEBUG: No storage classes available, using system default")
return forkliftv1beta1.DestinationStorage{}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/mapper/interfaces.go | Go | package mapper
import (
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
)
// StorageMappingOptions contains options for storage mapping
type StorageMappingOptions struct {
DefaultTargetStorageClass string
SourceProviderType string
TargetProviderType string
}
// StorageMapper defines the interface for storage mapping operations
type StorageMapper interface {
CreateStoragePairs(sourceStorages []ref.Ref, targetStorages []forkliftv1beta1.DestinationStorage, opts StorageMappingOptions) ([]forkliftv1beta1.StoragePair, error)
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/mapper/openshift/mapper.go | Go | package openshift
import (
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper"
)
// OpenShiftStorageMapper implements storage mapping for OpenShift providers
type OpenShiftStorageMapper struct{}
// NewOpenShiftStorageMapper creates a new OpenShift storage mapper
func NewOpenShiftStorageMapper() mapper.StorageMapper {
return &OpenShiftStorageMapper{}
}
// CreateStoragePairs creates storage mapping pairs with OpenShift-specific logic
func (m *OpenShiftStorageMapper) CreateStoragePairs(sourceStorages []ref.Ref, targetStorages []forkliftv1beta1.DestinationStorage, opts mapper.StorageMappingOptions) ([]forkliftv1beta1.StoragePair, error) {
var storagePairs []forkliftv1beta1.StoragePair
klog.V(4).Infof("DEBUG: OpenShift storage mapper - Creating storage pairs - %d source storages, %d target storages", len(sourceStorages), len(targetStorages))
klog.V(4).Infof("DEBUG: Source provider type: %s, Target provider type: %s", opts.SourceProviderType, opts.TargetProviderType)
if len(sourceStorages) == 0 {
klog.V(4).Infof("DEBUG: No source storages to map")
return storagePairs, nil
}
// For OCP-to-OCP: Try same-name matching (all-or-nothing)
if opts.TargetProviderType == "openshift" {
klog.V(4).Infof("DEBUG: OCP-to-OCP migration detected, attempting same-name matching")
if canMatchAllStoragesByName(sourceStorages, targetStorages) {
klog.V(4).Infof("DEBUG: All storages can be matched by name, using same-name mapping")
return createSameNameStoragePairs(sourceStorages, targetStorages)
}
klog.V(4).Infof("DEBUG: Not all storages can be matched by name, falling back to default behavior")
}
// Fall back to default behavior
return createDefaultStoragePairs(sourceStorages, targetStorages, opts)
}
// canMatchAllStoragesByName checks if every source storage has a matching target storage by name
func canMatchAllStoragesByName(sourceStorages []ref.Ref, targetStorages []forkliftv1beta1.DestinationStorage) bool {
// Create a map of target storage class names for quick lookup
targetNames := make(map[string]bool)
for _, target := range targetStorages {
if target.StorageClass != "" {
targetNames[target.StorageClass] = true
}
}
klog.V(4).Infof("DEBUG: Available target storage classes: %v", getTargetStorageNames(targetStorages))
// Check if every source has a matching target by name
for _, source := range sourceStorages {
if !targetNames[source.Name] {
klog.V(4).Infof("DEBUG: Source storage '%s' has no matching target by name", source.Name)
return false
}
}
klog.V(4).Infof("DEBUG: All source storages can be matched by name")
return true
}
// createSameNameStoragePairs creates storage pairs using same-name matching
func createSameNameStoragePairs(sourceStorages []ref.Ref, targetStorages []forkliftv1beta1.DestinationStorage) ([]forkliftv1beta1.StoragePair, error) {
var storagePairs []forkliftv1beta1.StoragePair
// Create a map of target storages by name for quick lookup
targetByName := make(map[string]forkliftv1beta1.DestinationStorage)
for _, target := range targetStorages {
if target.StorageClass != "" {
targetByName[target.StorageClass] = target
}
}
// Create pairs using same-name matching
for _, sourceStorage := range sourceStorages {
if targetStorage, exists := targetByName[sourceStorage.Name]; exists {
storagePairs = append(storagePairs, forkliftv1beta1.StoragePair{
Source: sourceStorage,
Destination: targetStorage,
})
klog.V(4).Infof("DEBUG: Mapped source storage %s -> %s (same name)", sourceStorage.Name, targetStorage.StorageClass)
}
}
klog.V(4).Infof("DEBUG: Created %d same-name storage pairs", len(storagePairs))
return storagePairs, nil
}
// createDefaultStoragePairs creates storage pairs using the default behavior (all sources -> single default target)
func createDefaultStoragePairs(sourceStorages []ref.Ref, targetStorages []forkliftv1beta1.DestinationStorage, opts mapper.StorageMappingOptions) ([]forkliftv1beta1.StoragePair, error) {
var storagePairs []forkliftv1beta1.StoragePair
// Find default storage class using the same logic as the original mapper
defaultStorageClass := findDefaultStorageClass(targetStorages, opts)
klog.V(4).Infof("DEBUG: Selected default storage class: %s", defaultStorageClass.StorageClass)
// Map all source storages to the default storage class
for _, sourceStorage := range sourceStorages {
storagePairs = append(storagePairs, forkliftv1beta1.StoragePair{
Source: sourceStorage,
Destination: defaultStorageClass,
})
klog.V(4).Infof("DEBUG: Mapped source storage %s -> %s (default)", sourceStorage.Name, defaultStorageClass.StorageClass)
}
klog.V(4).Infof("DEBUG: Created %d default storage pairs", len(storagePairs))
return storagePairs, nil
}
// findDefaultStorageClass finds the default storage class using the original priority logic
func findDefaultStorageClass(targetStorages []forkliftv1beta1.DestinationStorage, opts mapper.StorageMappingOptions) forkliftv1beta1.DestinationStorage {
// Priority 1: If user explicitly specified a default storage class, use it
if opts.DefaultTargetStorageClass != "" {
defaultStorage := forkliftv1beta1.DestinationStorage{
StorageClass: opts.DefaultTargetStorageClass,
}
klog.V(4).Infof("DEBUG: Using user-defined default storage class: %s", opts.DefaultTargetStorageClass)
return defaultStorage
}
// Priority 2-5: Use the target storage selected by FetchTargetStorages
// (which implements: virt annotation -> k8s annotation -> name with "virtualization" -> first available)
if len(targetStorages) > 0 {
defaultStorage := targetStorages[0]
klog.V(4).Infof("DEBUG: Using auto-selected storage class: %s", defaultStorage.StorageClass)
return defaultStorage
}
// Priority 6: Fall back to empty storage class (system default)
klog.V(4).Infof("DEBUG: No storage classes available, using system default")
return forkliftv1beta1.DestinationStorage{}
}
// getTargetStorageNames returns a slice of target storage class names for logging
func getTargetStorageNames(targetStorages []forkliftv1beta1.DestinationStorage) []string {
var names []string
for _, target := range targetStorages {
if target.StorageClass != "" {
names = append(names, target.StorageClass)
}
}
return names
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/mapper/openshift/mapper_test.go | Go | package openshift
import (
"testing"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
storagemapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper"
)
func TestOpenShiftStorageMapper_CreateStoragePairs_SameNameMatching(t *testing.T) {
tests := []struct {
name string
sourceStorages []ref.Ref
targetStorages []forkliftv1beta1.DestinationStorage
sourceProviderType string
targetProviderType string
expectedPairs int
expectedSameName bool
expectedTargetNames []string
}{
{
name: "OCP-to-OCP: All sources match by name",
sourceStorages: []ref.Ref{
{Name: "fast-ssd", ID: "src-1"},
{Name: "slow-hdd", ID: "src-2"},
},
targetStorages: []forkliftv1beta1.DestinationStorage{
{StorageClass: "fast-ssd"},
{StorageClass: "slow-hdd"},
{StorageClass: "nvme-storage"},
},
sourceProviderType: "openshift",
targetProviderType: "openshift",
expectedPairs: 2,
expectedSameName: true,
expectedTargetNames: []string{"fast-ssd", "slow-hdd"},
},
{
name: "OCP-to-OCP: Some sources don't match by name - fallback to default",
sourceStorages: []ref.Ref{
{Name: "fast-ssd", ID: "src-1"},
{Name: "unknown-storage", ID: "src-2"},
},
targetStorages: []forkliftv1beta1.DestinationStorage{
{StorageClass: "fast-ssd"},
{StorageClass: "slow-hdd"},
},
sourceProviderType: "openshift",
targetProviderType: "openshift",
expectedPairs: 2,
expectedSameName: false,
expectedTargetNames: []string{"fast-ssd", "fast-ssd"}, // Both map to first (default)
},
{
name: "OCP-to-non-OCP: Use default behavior",
sourceStorages: []ref.Ref{
{Name: "datastore1", ID: "src-1"},
{Name: "datastore2", ID: "src-2"},
},
targetStorages: []forkliftv1beta1.DestinationStorage{
{StorageClass: "ocs-storagecluster-ceph-rbd"},
},
sourceProviderType: "openshift",
targetProviderType: "vsphere",
expectedPairs: 2,
expectedSameName: false,
expectedTargetNames: []string{"ocs-storagecluster-ceph-rbd", "ocs-storagecluster-ceph-rbd"},
},
{
name: "OCP-to-OCP: Empty sources",
sourceStorages: []ref.Ref{},
targetStorages: []forkliftv1beta1.DestinationStorage{{StorageClass: "default"}},
sourceProviderType: "openshift",
targetProviderType: "openshift",
expectedPairs: 0,
expectedSameName: false,
expectedTargetNames: []string{},
},
{
name: "OCP-to-OCP: Single source with match",
sourceStorages: []ref.Ref{
{Name: "fast-ssd", ID: "src-1"},
},
targetStorages: []forkliftv1beta1.DestinationStorage{
{StorageClass: "fast-ssd"},
},
sourceProviderType: "openshift",
targetProviderType: "openshift",
expectedPairs: 1,
expectedSameName: true,
expectedTargetNames: []string{"fast-ssd"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
storageMapper := NewOpenShiftStorageMapper()
opts := storagemapper.StorageMappingOptions{
SourceProviderType: tt.sourceProviderType,
TargetProviderType: tt.targetProviderType,
}
pairs, err := storageMapper.CreateStoragePairs(tt.sourceStorages, tt.targetStorages, opts)
if err != nil {
t.Fatalf("CreateStoragePairs() error = %v", err)
}
if len(pairs) != tt.expectedPairs {
t.Errorf("CreateStoragePairs() got %d pairs, want %d", len(pairs), tt.expectedPairs)
}
// Verify target storage class names
for i, pair := range pairs {
if i < len(tt.expectedTargetNames) {
if pair.Destination.StorageClass != tt.expectedTargetNames[i] {
t.Errorf("Pair %d: got target %s, want %s", i, pair.Destination.StorageClass, tt.expectedTargetNames[i])
}
}
}
// Verify source names are preserved
for i, pair := range pairs {
if i < len(tt.sourceStorages) {
if pair.Source.Name != tt.sourceStorages[i].Name {
t.Errorf("Pair %d: source name %s != expected %s", i, pair.Source.Name, tt.sourceStorages[i].Name)
}
}
}
})
}
}
func TestOpenShiftStorageMapper_CreateStoragePairs_DefaultStorageClassSelection(t *testing.T) {
tests := []struct {
name string
targetStorages []forkliftv1beta1.DestinationStorage
defaultTargetStorageClass string
expectedDefaultClass string
}{
{
name: "User-defined default takes priority",
targetStorages: []forkliftv1beta1.DestinationStorage{
{StorageClass: "auto-selected"},
{StorageClass: "other-class"},
},
defaultTargetStorageClass: "user-defined",
expectedDefaultClass: "user-defined",
},
{
name: "Auto-selected when no user default",
targetStorages: []forkliftv1beta1.DestinationStorage{
{StorageClass: "first-available"},
{StorageClass: "second-class"},
},
defaultTargetStorageClass: "",
expectedDefaultClass: "first-available",
},
{
name: "Empty when no targets and no user default",
targetStorages: []forkliftv1beta1.DestinationStorage{},
defaultTargetStorageClass: "",
expectedDefaultClass: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
storageMapper := NewOpenShiftStorageMapper()
sourceStorages := []ref.Ref{{Name: "test-source", ID: "src-1"}}
opts := storagemapper.StorageMappingOptions{
DefaultTargetStorageClass: tt.defaultTargetStorageClass,
SourceProviderType: "openshift",
TargetProviderType: "vsphere", // Force default behavior
}
pairs, err := storageMapper.CreateStoragePairs(sourceStorages, tt.targetStorages, opts)
if err != nil {
t.Fatalf("CreateStoragePairs() error = %v", err)
}
if len(pairs) > 0 {
got := pairs[0].Destination.StorageClass
if got != tt.expectedDefaultClass {
t.Errorf("Default storage class: got %s, want %s", got, tt.expectedDefaultClass)
}
}
})
}
}
func TestCanMatchAllStoragesByName(t *testing.T) {
tests := []struct {
name string
sourceStorages []ref.Ref
targetStorages []forkliftv1beta1.DestinationStorage
expected bool
}{
{
name: "All sources match",
sourceStorages: []ref.Ref{
{Name: "fast-ssd"},
{Name: "slow-hdd"},
},
targetStorages: []forkliftv1beta1.DestinationStorage{
{StorageClass: "fast-ssd"},
{StorageClass: "slow-hdd"},
{StorageClass: "nvme-storage"},
},
expected: true,
},
{
name: "Some sources don't match",
sourceStorages: []ref.Ref{
{Name: "fast-ssd"},
{Name: "unknown-storage"},
},
targetStorages: []forkliftv1beta1.DestinationStorage{
{StorageClass: "fast-ssd"},
{StorageClass: "slow-hdd"},
},
expected: false,
},
{
name: "Empty sources",
sourceStorages: []ref.Ref{},
targetStorages: []forkliftv1beta1.DestinationStorage{
{StorageClass: "any-storage"},
},
expected: true,
},
{
name: "Empty targets",
sourceStorages: []ref.Ref{
{Name: "some-storage"},
},
targetStorages: []forkliftv1beta1.DestinationStorage{},
expected: false,
},
{
name: "Target with empty storage class name",
sourceStorages: []ref.Ref{
{Name: "fast-ssd"},
},
targetStorages: []forkliftv1beta1.DestinationStorage{
{StorageClass: ""},
{StorageClass: "fast-ssd"},
},
expected: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := canMatchAllStoragesByName(tt.sourceStorages, tt.targetStorages)
if result != tt.expected {
t.Errorf("canMatchAllStoragesByName() = %v, want %v", result, tt.expected)
}
})
}
}
func TestCreateSameNameStoragePairs(t *testing.T) {
sourceStorages := []ref.Ref{
{Name: "fast-ssd", ID: "src-1"},
{Name: "slow-hdd", ID: "src-2"},
}
targetStorages := []forkliftv1beta1.DestinationStorage{
{StorageClass: "fast-ssd"},
{StorageClass: "slow-hdd"},
{StorageClass: "extra-storage"},
}
pairs, err := createSameNameStoragePairs(sourceStorages, targetStorages)
if err != nil {
t.Fatalf("createSameNameStoragePairs() error = %v", err)
}
if len(pairs) != 2 {
t.Errorf("Expected 2 pairs, got %d", len(pairs))
}
// Verify mappings
expectedMappings := map[string]string{
"fast-ssd": "fast-ssd",
"slow-hdd": "slow-hdd",
}
for _, pair := range pairs {
expectedTarget, exists := expectedMappings[pair.Source.Name]
if !exists {
t.Errorf("Unexpected source storage: %s", pair.Source.Name)
continue
}
if pair.Destination.StorageClass != expectedTarget {
t.Errorf("Source %s mapped to %s, expected %s",
pair.Source.Name, pair.Destination.StorageClass, expectedTarget)
}
}
}
func TestOpenShiftStorageMapper_ImplementsInterface(t *testing.T) {
var _ storagemapper.StorageMapper = &OpenShiftStorageMapper{}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/mapper/openstack/mapper.go | Go | package openstack
import (
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper"
)
// OpenStackStorageMapper implements storage mapping for OpenStack providers
type OpenStackStorageMapper struct{}
// NewOpenStackStorageMapper creates a new OpenStack storage mapper
func NewOpenStackStorageMapper() mapper.StorageMapper {
return &OpenStackStorageMapper{}
}
// CreateStoragePairs creates storage mapping pairs using generic logic (no same-name matching)
func (m *OpenStackStorageMapper) CreateStoragePairs(sourceStorages []ref.Ref, targetStorages []forkliftv1beta1.DestinationStorage, opts mapper.StorageMappingOptions) ([]forkliftv1beta1.StoragePair, error) {
var storagePairs []forkliftv1beta1.StoragePair
klog.V(4).Infof("DEBUG: OpenStack storage mapper - Creating storage pairs - %d source storages, %d target storages", len(sourceStorages), len(targetStorages))
if len(sourceStorages) == 0 {
klog.V(4).Infof("DEBUG: No source storages to map")
return storagePairs, nil
}
// Use generic default behavior (all sources -> single default target)
defaultStorageClass := findDefaultStorageClass(targetStorages, opts)
klog.V(4).Infof("DEBUG: Selected default storage class: %s", defaultStorageClass.StorageClass)
// Map all source storages to the default storage class
for _, sourceStorage := range sourceStorages {
storagePairs = append(storagePairs, forkliftv1beta1.StoragePair{
Source: sourceStorage,
Destination: defaultStorageClass,
})
klog.V(4).Infof("DEBUG: Mapped source storage %s -> %s", sourceStorage.Name, defaultStorageClass.StorageClass)
}
klog.V(4).Infof("DEBUG: Created %d storage pairs", len(storagePairs))
return storagePairs, nil
}
// findDefaultStorageClass finds the default storage class using the original priority logic
func findDefaultStorageClass(targetStorages []forkliftv1beta1.DestinationStorage, opts mapper.StorageMappingOptions) forkliftv1beta1.DestinationStorage {
// Priority 1: If user explicitly specified a default storage class, use it
if opts.DefaultTargetStorageClass != "" {
defaultStorage := forkliftv1beta1.DestinationStorage{
StorageClass: opts.DefaultTargetStorageClass,
}
klog.V(4).Infof("DEBUG: Using user-defined default storage class: %s", opts.DefaultTargetStorageClass)
return defaultStorage
}
// Priority 2-5: Use the target storage selected by FetchTargetStorages
// (which implements: virt annotation -> k8s annotation -> name with "virtualization" -> first available)
if len(targetStorages) > 0 {
defaultStorage := targetStorages[0]
klog.V(4).Infof("DEBUG: Using auto-selected storage class: %s", defaultStorage.StorageClass)
return defaultStorage
}
// Priority 6: Fall back to empty storage class (system default)
klog.V(4).Infof("DEBUG: No storage classes available, using system default")
return forkliftv1beta1.DestinationStorage{}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/mapper/ova/mapper.go | Go | package ova
import (
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper"
)
// OVAStorageMapper implements storage mapping for OVA providers
type OVAStorageMapper struct{}
// NewOVAStorageMapper creates a new OVA storage mapper
func NewOVAStorageMapper() mapper.StorageMapper {
return &OVAStorageMapper{}
}
// CreateStoragePairs creates storage mapping pairs using generic logic (no same-name matching)
func (m *OVAStorageMapper) CreateStoragePairs(sourceStorages []ref.Ref, targetStorages []forkliftv1beta1.DestinationStorage, opts mapper.StorageMappingOptions) ([]forkliftv1beta1.StoragePair, error) {
var storagePairs []forkliftv1beta1.StoragePair
klog.V(4).Infof("DEBUG: OVA storage mapper - Creating storage pairs - %d source storages, %d target storages", len(sourceStorages), len(targetStorages))
if len(sourceStorages) == 0 {
klog.V(4).Infof("DEBUG: No source storages to map")
return storagePairs, nil
}
// Use generic default behavior (all sources -> single default target)
defaultStorageClass := findDefaultStorageClass(targetStorages, opts)
klog.V(4).Infof("DEBUG: Selected default storage class: %s", defaultStorageClass.StorageClass)
// Map all source storages to the default storage class
for _, sourceStorage := range sourceStorages {
storagePairs = append(storagePairs, forkliftv1beta1.StoragePair{
Source: sourceStorage,
Destination: defaultStorageClass,
})
klog.V(4).Infof("DEBUG: Mapped source storage %s -> %s", sourceStorage.Name, defaultStorageClass.StorageClass)
}
klog.V(4).Infof("DEBUG: Created %d storage pairs", len(storagePairs))
return storagePairs, nil
}
// findDefaultStorageClass finds the default storage class using the original priority logic
func findDefaultStorageClass(targetStorages []forkliftv1beta1.DestinationStorage, opts mapper.StorageMappingOptions) forkliftv1beta1.DestinationStorage {
// Priority 1: If user explicitly specified a default storage class, use it
if opts.DefaultTargetStorageClass != "" {
defaultStorage := forkliftv1beta1.DestinationStorage{
StorageClass: opts.DefaultTargetStorageClass,
}
klog.V(4).Infof("DEBUG: Using user-defined default storage class: %s", opts.DefaultTargetStorageClass)
return defaultStorage
}
// Priority 2-5: Use the target storage selected by FetchTargetStorages
// (which implements: virt annotation -> k8s annotation -> name with "virtualization" -> first available)
if len(targetStorages) > 0 {
defaultStorage := targetStorages[0]
klog.V(4).Infof("DEBUG: Using auto-selected storage class: %s", defaultStorage.StorageClass)
return defaultStorage
}
// Priority 6: Fall back to empty storage class (system default)
klog.V(4).Infof("DEBUG: No storage classes available, using system default")
return forkliftv1beta1.DestinationStorage{}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/mapper/ovirt/mapper.go | Go | package ovirt
import (
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper"
)
// OvirtStorageMapper implements storage mapping for oVirt providers
type OvirtStorageMapper struct{}
// NewOvirtStorageMapper creates a new oVirt storage mapper
func NewOvirtStorageMapper() mapper.StorageMapper {
return &OvirtStorageMapper{}
}
// CreateStoragePairs creates storage mapping pairs using generic logic (no same-name matching)
func (m *OvirtStorageMapper) CreateStoragePairs(sourceStorages []ref.Ref, targetStorages []forkliftv1beta1.DestinationStorage, opts mapper.StorageMappingOptions) ([]forkliftv1beta1.StoragePair, error) {
var storagePairs []forkliftv1beta1.StoragePair
klog.V(4).Infof("DEBUG: oVirt storage mapper - Creating storage pairs - %d source storages, %d target storages", len(sourceStorages), len(targetStorages))
if len(sourceStorages) == 0 {
klog.V(4).Infof("DEBUG: No source storages to map")
return storagePairs, nil
}
// Use generic default behavior (all sources -> single default target)
defaultStorageClass := findDefaultStorageClass(targetStorages, opts)
klog.V(4).Infof("DEBUG: Selected default storage class: %s", defaultStorageClass.StorageClass)
// Map all source storages to the default storage class
for _, sourceStorage := range sourceStorages {
storagePairs = append(storagePairs, forkliftv1beta1.StoragePair{
Source: sourceStorage,
Destination: defaultStorageClass,
})
klog.V(4).Infof("DEBUG: Mapped source storage %s -> %s", sourceStorage.Name, defaultStorageClass.StorageClass)
}
klog.V(4).Infof("DEBUG: Created %d storage pairs", len(storagePairs))
return storagePairs, nil
}
// findDefaultStorageClass finds the default storage class using the original priority logic
func findDefaultStorageClass(targetStorages []forkliftv1beta1.DestinationStorage, opts mapper.StorageMappingOptions) forkliftv1beta1.DestinationStorage {
// Priority 1: If user explicitly specified a default storage class, use it
if opts.DefaultTargetStorageClass != "" {
defaultStorage := forkliftv1beta1.DestinationStorage{
StorageClass: opts.DefaultTargetStorageClass,
}
klog.V(4).Infof("DEBUG: Using user-defined default storage class: %s", opts.DefaultTargetStorageClass)
return defaultStorage
}
// Priority 2-5: Use the target storage selected by FetchTargetStorages
// (which implements: virt annotation -> k8s annotation -> name with "virtualization" -> first available)
if len(targetStorages) > 0 {
defaultStorage := targetStorages[0]
klog.V(4).Infof("DEBUG: Using auto-selected storage class: %s", defaultStorage.StorageClass)
return defaultStorage
}
// Priority 6: Fall back to empty storage class (system default)
klog.V(4).Infof("DEBUG: No storage classes available, using system default")
return forkliftv1beta1.DestinationStorage{}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/mapper/vsphere/mapper.go | Go | package vsphere
import (
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper"
)
// VSphereStorageMapper implements storage mapping for vSphere providers
type VSphereStorageMapper struct{}
// NewVSphereStorageMapper creates a new vSphere storage mapper
func NewVSphereStorageMapper() mapper.StorageMapper {
return &VSphereStorageMapper{}
}
// CreateStoragePairs creates storage mapping pairs using generic logic (no same-name matching)
func (m *VSphereStorageMapper) CreateStoragePairs(sourceStorages []ref.Ref, targetStorages []forkliftv1beta1.DestinationStorage, opts mapper.StorageMappingOptions) ([]forkliftv1beta1.StoragePair, error) {
var storagePairs []forkliftv1beta1.StoragePair
klog.V(4).Infof("DEBUG: vSphere storage mapper - Creating storage pairs - %d source storages, %d target storages", len(sourceStorages), len(targetStorages))
if len(sourceStorages) == 0 {
klog.V(4).Infof("DEBUG: No source storages to map")
return storagePairs, nil
}
// Use generic default behavior (all sources -> single default target)
defaultStorageClass := findDefaultStorageClass(targetStorages, opts)
klog.V(4).Infof("DEBUG: Selected default storage class: %s", defaultStorageClass.StorageClass)
// Map all source storages to the default storage class
for _, sourceStorage := range sourceStorages {
storagePairs = append(storagePairs, forkliftv1beta1.StoragePair{
Source: sourceStorage,
Destination: defaultStorageClass,
})
klog.V(4).Infof("DEBUG: Mapped source storage %s -> %s", sourceStorage.Name, defaultStorageClass.StorageClass)
}
klog.V(4).Infof("DEBUG: Created %d storage pairs", len(storagePairs))
return storagePairs, nil
}
// findDefaultStorageClass finds the default storage class using the original priority logic
func findDefaultStorageClass(targetStorages []forkliftv1beta1.DestinationStorage, opts mapper.StorageMappingOptions) forkliftv1beta1.DestinationStorage {
// Priority 1: If user explicitly specified a default storage class, use it
if opts.DefaultTargetStorageClass != "" {
defaultStorage := forkliftv1beta1.DestinationStorage{
StorageClass: opts.DefaultTargetStorageClass,
}
klog.V(4).Infof("DEBUG: Using user-defined default storage class: %s", opts.DefaultTargetStorageClass)
return defaultStorage
}
// Priority 2-5: Use the target storage selected by FetchTargetStorages
// (which implements: virt annotation -> k8s annotation -> name with "virtualization" -> first available)
if len(targetStorages) > 0 {
defaultStorage := targetStorages[0]
klog.V(4).Infof("DEBUG: Using auto-selected storage class: %s", defaultStorage.StorageClass)
return defaultStorage
}
// Priority 6: Fall back to empty storage class (system default)
klog.V(4).Infof("DEBUG: No storage classes available, using system default")
return forkliftv1beta1.DestinationStorage{}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/plan/storage/mapper/vsphere/mapper_test.go | Go | package vsphere
import (
"testing"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/ref"
storagemapper "github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan/storage/mapper"
)
func TestVSphereStorageMapper_CreateStoragePairs_DefaultBehavior(t *testing.T) {
tests := []struct {
name string
sourceStorages []ref.Ref
targetStorages []forkliftv1beta1.DestinationStorage
defaultTargetStorageClass string
expectedPairs int
expectedAllToSameTarget bool
expectedTargetClass string
}{
{
name: "All sources map to first target storage",
sourceStorages: []ref.Ref{
{Name: "datastore1", ID: "ds-1"},
{Name: "datastore2", ID: "ds-2"},
{Name: "datastore3", ID: "ds-3"},
},
targetStorages: []forkliftv1beta1.DestinationStorage{
{StorageClass: "ocs-storagecluster-ceph-rbd"},
{StorageClass: "ocs-storagecluster-ceph-rbd-virtualization"},
},
expectedPairs: 3,
expectedAllToSameTarget: true,
expectedTargetClass: "ocs-storagecluster-ceph-rbd",
},
{
name: "User-defined default takes priority",
sourceStorages: []ref.Ref{
{Name: "datastore1", ID: "ds-1"},
{Name: "datastore2", ID: "ds-2"},
},
targetStorages: []forkliftv1beta1.DestinationStorage{
{StorageClass: "auto-selected"},
},
defaultTargetStorageClass: "user-defined",
expectedPairs: 2,
expectedAllToSameTarget: true,
expectedTargetClass: "user-defined",
},
{
name: "Empty target storages with user default",
sourceStorages: []ref.Ref{
{Name: "datastore1", ID: "ds-1"},
},
targetStorages: []forkliftv1beta1.DestinationStorage{},
defaultTargetStorageClass: "user-defined",
expectedPairs: 1,
expectedAllToSameTarget: true,
expectedTargetClass: "user-defined",
},
{
name: "Empty target storages without user default",
sourceStorages: []ref.Ref{
{Name: "datastore1", ID: "ds-1"},
},
targetStorages: []forkliftv1beta1.DestinationStorage{},
expectedPairs: 1,
expectedAllToSameTarget: true,
expectedTargetClass: "", // System default
},
{
name: "Empty sources",
sourceStorages: []ref.Ref{},
targetStorages: []forkliftv1beta1.DestinationStorage{{StorageClass: "any"}},
expectedPairs: 0,
expectedAllToSameTarget: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
storageMapper := NewVSphereStorageMapper()
opts := storagemapper.StorageMappingOptions{
DefaultTargetStorageClass: tt.defaultTargetStorageClass,
}
pairs, err := storageMapper.CreateStoragePairs(tt.sourceStorages, tt.targetStorages, opts)
if err != nil {
t.Fatalf("CreateStoragePairs() error = %v", err)
}
if len(pairs) != tt.expectedPairs {
t.Errorf("CreateStoragePairs() got %d pairs, want %d", len(pairs), tt.expectedPairs)
}
// Verify all pairs map to the same target (generic behavior)
if tt.expectedAllToSameTarget && len(pairs) > 1 {
firstTarget := pairs[0].Destination.StorageClass
for i, pair := range pairs[1:] {
if pair.Destination.StorageClass != firstTarget {
t.Errorf("Pair %d: target %s != first target %s (expected all same)",
i+1, pair.Destination.StorageClass, firstTarget)
}
}
}
// Verify expected target class
if len(pairs) > 0 && tt.expectedTargetClass != "" {
if pairs[0].Destination.StorageClass != tt.expectedTargetClass {
t.Errorf("Target class: got %s, want %s",
pairs[0].Destination.StorageClass, tt.expectedTargetClass)
}
}
// Verify source names are preserved
for i, pair := range pairs {
if i < len(tt.sourceStorages) {
if pair.Source.Name != tt.sourceStorages[i].Name {
t.Errorf("Pair %d: source name %s != expected %s",
i, pair.Source.Name, tt.sourceStorages[i].Name)
}
}
}
})
}
}
func TestVSphereStorageMapper_NoSameNameMatching(t *testing.T) {
// This test verifies that vSphere mapper does NOT use same-name matching
// even when source and target names match
storageMapper := NewVSphereStorageMapper()
sourceStorages := []ref.Ref{
{Name: "identical-name", ID: "src-1"},
{Name: "another-name", ID: "src-2"},
}
targetStorages := []forkliftv1beta1.DestinationStorage{
{StorageClass: "identical-name"}, // Same name as source
{StorageClass: "different-name"},
}
opts := storagemapper.StorageMappingOptions{
SourceProviderType: "vsphere",
TargetProviderType: "openshift",
}
pairs, err := storageMapper.CreateStoragePairs(sourceStorages, targetStorages, opts)
if err != nil {
t.Fatalf("CreateStoragePairs() error = %v", err)
}
// Both sources should map to the same target (first one) - NOT same-name matching
if len(pairs) != 2 {
t.Fatalf("Expected 2 pairs, got %d", len(pairs))
}
expectedTarget := "identical-name" // First target
for i, pair := range pairs {
if pair.Destination.StorageClass != expectedTarget {
t.Errorf("Pair %d: got target %s, want %s (all should map to first target)",
i, pair.Destination.StorageClass, expectedTarget)
}
}
}
func TestVSphereStorageMapper_ImplementsInterface(t *testing.T) {
var _ storagemapper.StorageMapper = &VSphereStorageMapper{}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/create.go | Go | package provider
import (
"fmt"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/ec2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/generic"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/hyperv"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/openshift"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/openstack"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/ova"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/providerutil"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/vsphere"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
corev1 "k8s.io/api/core/v1"
)
// Create creates a new provider
func Create(configFlags *genericclioptions.ConfigFlags, providerType string, options providerutil.ProviderOptions) error {
// For EC2 provider, use regionName (from --provider-region-name) if ec2Region is empty
// This allows using --provider-region-name for EC2 regions as shown in documentation
if providerType == "ec2" && options.EC2Region == "" && options.RegionName != "" {
options.EC2Region = options.RegionName
}
var providerResource *forkliftv1beta1.Provider
var secretResource *corev1.Secret
var err error
// Create the provider and secret based on the specified type
switch providerType {
case "vsphere":
providerResource, secretResource, err = vsphere.CreateProvider(configFlags, options)
case "ova":
providerResource, secretResource, err = ova.CreateProvider(configFlags, options)
case "hyperv":
providerResource, secretResource, err = hyperv.CreateProvider(configFlags, options)
case "openshift":
providerResource, secretResource, err = openshift.CreateProvider(configFlags, options)
case "ovirt":
providerResource, secretResource, err = generic.CreateProvider(configFlags, options, "ovirt")
case "openstack":
providerResource, secretResource, err = openstack.CreateProvider(configFlags, options)
case "ec2":
providerResource, secretResource, err = ec2.CreateProvider(configFlags, options)
default:
// For dynamic provider types, use generic provider creation
// This allows support for DynamicProvider CRs defined in the cluster
providerResource, secretResource, err = generic.CreateProvider(configFlags, options, providerType)
}
// Handle any errors that occurred during provider creation
if err != nil {
return fmt.Errorf("failed to prepare provider: %v", err)
}
// Display the creation results to the user
fmt.Printf("provider/%s created\n", providerResource.Name)
if secretResource != nil {
fmt.Printf("Created secret '%s' for provider authentication\n", secretResource.Name)
} else if options.Secret != "" {
fmt.Printf("Using existing secret '%s' for provider authentication\n", options.Secret)
}
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/defaultprovider/openshift.go | Go | package defaultprovider
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// GetDefaultOpenShiftProvider returns the most suitable OpenShift provider in the specified namespace.
// It prioritizes providers with empty url (local cluster), then falls back to the first OpenShift provider.
// Returns an error if no OpenShift provider is found.
func GetDefaultOpenShiftProvider(configFlags *genericclioptions.ConfigFlags, namespace string) (string, error) {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return "", fmt.Errorf("failed to get client: %v", err)
}
providers, err := c.Resource(client.ProvidersGVR).Namespace(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return "", fmt.Errorf("failed to list providers: %v", err)
}
var firstOpenShiftProvider string
var emptyUrlOpenShiftProvider string
for _, provider := range providers.Items {
// Get provider type from spec
providerType, found, err := unstructured.NestedString(provider.Object, "spec", "type")
if err != nil || !found {
continue
}
// Check if provider type is OpenShift
if providerType == "openshift" {
// If this is the first OpenShift provider we've found, record it
if firstOpenShiftProvider == "" {
firstOpenShiftProvider = provider.GetName()
}
// Check if provider has empty URL
url, found, err := unstructured.NestedString(provider.Object, "spec", "url")
if err == nil && (!found || url == "") {
emptyUrlOpenShiftProvider = provider.GetName()
break // Found the preferred provider, no need to continue
}
}
}
// Prefer the empty URL provider, otherwise use the first OpenShift provider
if emptyUrlOpenShiftProvider != "" {
return emptyUrlOpenShiftProvider, nil
}
if firstOpenShiftProvider != "" {
return firstOpenShiftProvider, nil
}
return "", fmt.Errorf("no OpenShift provider found in namespace '%s'", namespace)
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/ec2/cluster.go | Go | package ec2
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
const (
// awsCredsSecretNamespace is the namespace where the AWS credentials secret is stored
awsCredsSecretNamespace = "kube-system"
// awsCredsSecretName is the name of the AWS credentials secret
awsCredsSecretName = "aws-creds"
// workerNodeLabel is the label used to identify worker nodes
workerNodeLabel = "node-role.kubernetes.io/worker"
// topologyZoneLabel is the label used to identify the availability zone
topologyZoneLabel = "topology.kubernetes.io/zone"
)
// AWSClusterCredentials holds the AWS credentials fetched from the cluster
type AWSClusterCredentials struct {
AccessKeyID string
SecretAccessKey string
}
// FetchAWSCredentialsFromCluster fetches AWS credentials from the kube-system/aws-creds secret
// This secret is typically created by the OpenShift installer on AWS clusters
func FetchAWSCredentialsFromCluster(configFlags *genericclioptions.ConfigFlags) (*AWSClusterCredentials, error) {
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to create kubernetes client: %v", err)
}
// Fetch the aws-creds secret from kube-system namespace
secret, err := k8sClient.CoreV1().Secrets(awsCredsSecretNamespace).Get(
context.Background(),
awsCredsSecretName,
metav1.GetOptions{},
)
if err != nil {
return nil, fmt.Errorf("failed to get AWS credentials secret '%s/%s': %v. "+
"This secret is typically created by the OpenShift installer on AWS clusters",
awsCredsSecretNamespace, awsCredsSecretName, err)
}
// Extract credentials from the secret
accessKeyID, ok := secret.Data["aws_access_key_id"]
if !ok {
return nil, fmt.Errorf("aws_access_key_id not found in secret '%s/%s'",
awsCredsSecretNamespace, awsCredsSecretName)
}
secretAccessKey, ok := secret.Data["aws_secret_access_key"]
if !ok {
return nil, fmt.Errorf("aws_secret_access_key not found in secret '%s/%s'",
awsCredsSecretNamespace, awsCredsSecretName)
}
klog.V(2).Infof("Successfully fetched AWS credentials from cluster secret '%s/%s'",
awsCredsSecretNamespace, awsCredsSecretName)
return &AWSClusterCredentials{
AccessKeyID: string(accessKeyID),
SecretAccessKey: string(secretAccessKey),
}, nil
}
// FetchTargetAZFromCluster detects the target availability zone from worker node labels
// It returns the first AZ found on a worker node
func FetchTargetAZFromCluster(configFlags *genericclioptions.ConfigFlags) (string, error) {
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return "", fmt.Errorf("failed to create kubernetes client: %v", err)
}
// List worker nodes
nodes, err := k8sClient.CoreV1().Nodes().List(
context.Background(),
metav1.ListOptions{
LabelSelector: workerNodeLabel,
},
)
if err != nil {
return "", fmt.Errorf("failed to list worker nodes: %v", err)
}
if len(nodes.Items) == 0 {
return "", fmt.Errorf("no worker nodes found with label '%s'", workerNodeLabel)
}
// Find the first node with a topology zone label
for _, node := range nodes.Items {
if zone, ok := node.Labels[topologyZoneLabel]; ok && zone != "" {
klog.V(2).Infof("Detected target availability zone '%s' from worker node '%s'",
zone, node.Name)
return zone, nil
}
}
return "", fmt.Errorf("no worker node found with topology zone label '%s'", topologyZoneLabel)
}
// AutoPopulateTargetOptions auto-fetches EC2 target credentials, availability zone, and region
// from the cluster when any of the target values are empty. It modifies the provided pointers
// to populate the auto-detected values.
func AutoPopulateTargetOptions(configFlags *genericclioptions.ConfigFlags, targetAccessKeyID, targetSecretKey, targetAZ, targetRegion *string) error {
// Fetch AWS credentials from cluster secret (kube-system/aws-creds) if not provided
if *targetAccessKeyID == "" || *targetSecretKey == "" {
clusterCreds, err := FetchAWSCredentialsFromCluster(configFlags)
if err != nil {
return fmt.Errorf("failed to auto-fetch target credentials: %v", err)
}
if *targetAccessKeyID == "" {
*targetAccessKeyID = clusterCreds.AccessKeyID
fmt.Printf("Auto-detected target access key ID from cluster secret\n")
}
if *targetSecretKey == "" {
*targetSecretKey = clusterCreds.SecretAccessKey
fmt.Printf("Auto-detected target secret access key from cluster secret\n")
}
}
// Auto-detect target-az from worker nodes if not provided
if *targetAZ == "" {
detectedAZ, err := FetchTargetAZFromCluster(configFlags)
if err != nil {
return fmt.Errorf("failed to auto-detect target availability zone: %v", err)
}
*targetAZ = detectedAZ
fmt.Printf("Auto-detected target availability zone: %s\n", detectedAZ)
// Also set target region from target-az if not provided
if *targetRegion == "" && len(detectedAZ) > 1 {
// Extract region from AZ (e.g., "us-east-1a" -> "us-east-1")
*targetRegion = detectedAZ[:len(detectedAZ)-1]
fmt.Printf("Auto-detected target region: %s\n", *targetRegion)
}
}
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/ec2/ec2.go | Go | package ec2
import (
"context"
"fmt"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/providerutil"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// getAWSEndpoint returns the appropriate AWS EC2 endpoint based on region
// Handles different AWS partitions (standard, China, GovCloud)
func getAWSEndpoint(region string) string {
// China regions use .cn domain
if strings.HasPrefix(region, "cn-") {
return fmt.Sprintf("https://ec2.%s.amazonaws.com.cn", region)
}
// GovCloud regions
if strings.HasPrefix(region, "us-gov-") {
return fmt.Sprintf("https://ec2.%s.amazonaws.com", region)
}
// Standard AWS partition
return fmt.Sprintf("https://ec2.%s.amazonaws.com", region)
}
// validateProviderOptions validates the options for creating an EC2 provider
func validateProviderOptions(options providerutil.ProviderOptions) error {
if options.Name == "" {
return fmt.Errorf("provider name is required")
}
if options.Namespace == "" {
return fmt.Errorf("provider namespace is required")
}
// URL is optional for EC2 providers (AWS uses regional endpoints)
if options.EC2Region == "" {
return fmt.Errorf("EC2 region is required")
}
// For EC2, CA cert and insecure skip TLS are optional (AWS certificates are typically trusted)
if options.Secret != "" && (options.Username != "" || options.Password != "") {
return fmt.Errorf("if a secret is provided, username and password should not be specified")
}
if options.Secret == "" && (options.Username == "" || options.Password == "") {
return fmt.Errorf("if no secret is provided, both access key ID (username) and secret access key (password) must be specified")
}
return nil
}
// cleanupCreatedResources deletes any resources created during the provider creation process
func cleanupCreatedResources(configFlags *genericclioptions.ConfigFlags, namespace string, secret *corev1.Secret) {
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return
}
if secret != nil {
_ = dynamicClient.Resource(client.SecretsGVR).Namespace(namespace).Delete(
context.Background(),
secret.Name,
metav1.DeleteOptions{},
)
}
}
// createTypedProvider creates an unstructured provider and converts it to a typed Provider
func createTypedProvider(configFlags *genericclioptions.ConfigFlags, namespace string, provider *forkliftv1beta1.Provider) (*forkliftv1beta1.Provider, error) {
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to get client: %v", err)
}
// Convert the provider object to unstructured format
providerMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(provider)
if err != nil {
return nil, fmt.Errorf("failed to convert provider to unstructured format: %v", err)
}
// Create an *unstructured.Unstructured from the map
providerUnstructured := &unstructured.Unstructured{Object: providerMap}
createdUnstructProvider, err := dynamicClient.Resource(client.ProvidersGVR).Namespace(namespace).Create(
context.Background(),
providerUnstructured,
metav1.CreateOptions{},
)
if err != nil {
return nil, err
}
// Convert unstructured provider to typed provider
createdProvider := &forkliftv1beta1.Provider{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdUnstructProvider.Object, createdProvider); err != nil {
return nil, fmt.Errorf("failed to convert provider from unstructured: %v", err)
}
return createdProvider, nil
}
// CreateProvider implements the ProviderCreator interface for EC2
func CreateProvider(configFlags *genericclioptions.ConfigFlags, options providerutil.ProviderOptions) (*forkliftv1beta1.Provider, *corev1.Secret, error) {
// Auto-fetch target credentials and target-az from cluster if requested
if options.AutoTargetCredentials {
if err := AutoPopulateTargetOptions(configFlags, &options.EC2TargetAccessKeyID, &options.EC2TargetSecretKey, &options.EC2TargetAZ, &options.EC2TargetRegion); err != nil {
return nil, nil, err
}
}
// Validate required fields
if err := validateProviderOptions(options); err != nil {
return nil, nil, err
}
// Create basic provider structure
provider := &forkliftv1beta1.Provider{}
provider.SetName(options.Name)
provider.SetNamespace(options.Namespace)
provider.APIVersion = forkliftv1beta1.SchemeGroupVersion.String()
provider.Kind = "Provider"
// Set provider type
providerTypeValue := forkliftv1beta1.ProviderType("ec2")
provider.Spec.Type = &providerTypeValue
// Set URL - use provided URL or construct default AWS regional endpoint
providerURL := options.URL
if providerURL == "" {
// Construct default AWS EC2 regional endpoint
// Handle different AWS partitions (China regions use .cn domain)
providerURL = getAWSEndpoint(options.EC2Region)
}
provider.Spec.URL = providerURL
// Always set target-region: use provided value, or default to provider region
targetRegion := options.EC2TargetRegion
if targetRegion == "" {
targetRegion = options.EC2Region
}
// Always set target-az: use provided value, or default to target-region + 'a'
targetAZ := options.EC2TargetAZ
if targetAZ == "" {
targetAZ = targetRegion + "a"
}
// Initialize settings map and set EC2-specific settings
if provider.Spec.Settings == nil {
provider.Spec.Settings = map[string]string{}
}
provider.Spec.Settings["target-region"] = targetRegion
provider.Spec.Settings["target-az"] = targetAZ
// Create and set the Secret
var createdSecret *corev1.Secret
var err error
if options.Secret == "" {
// Pass the providerURL (which may be default or custom) to secret creation
createdSecret, err = createSecret(configFlags, options.Namespace, options.Name,
options.Username, options.Password, providerURL, options.CACert, options.EC2Region, options.InsecureSkipTLS,
options.EC2TargetAccessKeyID, options.EC2TargetSecretKey)
if err != nil {
return nil, nil, fmt.Errorf("failed to create EC2 secret: %v", err)
}
provider.Spec.Secret = corev1.ObjectReference{
Name: createdSecret.Name,
Namespace: createdSecret.Namespace,
}
} else {
provider.Spec.Secret = corev1.ObjectReference{
Name: options.Secret,
Namespace: options.Namespace,
}
}
// Create the provider
createdProvider, err := createTypedProvider(configFlags, options.Namespace, provider)
if err != nil {
// Clean up the created secret if provider creation fails
cleanupCreatedResources(configFlags, options.Namespace, createdSecret)
return nil, nil, fmt.Errorf("failed to create EC2 provider: %v", err)
}
// Set the secret ownership to the provider
if createdSecret != nil {
if err := setSecretOwnership(configFlags, createdProvider, createdSecret); err != nil {
return nil, createdSecret, fmt.Errorf("provider created but %v", err)
}
}
return createdProvider, createdSecret, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/ec2/secrets.go | Go | package ec2
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// Helper function to create an EC2 secret
func createSecret(configFlags *genericclioptions.ConfigFlags, namespace, providerName, accessKeyID, secretAccessKey, url, cacert, region string, insecureSkipTLS bool, targetAccessKeyID, targetSecretAccessKey string) (*corev1.Secret, error) {
// Get the Kubernetes client using configFlags
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to create kubernetes client: %v", err)
}
// Create secret data without base64 encoding (the API handles this automatically)
// URL is always included (either custom or default AWS endpoint)
secretData := map[string][]byte{
"accessKeyId": []byte(accessKeyID),
"secretAccessKey": []byte(secretAccessKey),
"url": []byte(url),
"region": []byte(region),
}
// Add optional fields
if insecureSkipTLS {
secretData["insecureSkipVerify"] = []byte("true")
}
if cacert != "" {
secretData["cacert"] = []byte(cacert)
}
// Add cross-account migration credentials (optional)
if targetAccessKeyID != "" {
secretData["targetAccessKeyId"] = []byte(targetAccessKeyID)
}
if targetSecretAccessKey != "" {
secretData["targetSecretAccessKey"] = []byte(targetSecretAccessKey)
}
// Generate a name prefix for the secret
secretName := fmt.Sprintf("%s-ec2-", providerName)
// Create the secret object directly as a typed Secret
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: secretName,
Namespace: namespace,
Labels: map[string]string{
"createdForProviderType": "ec2",
"createdForResourceType": "providers",
},
},
Data: secretData,
Type: corev1.SecretTypeOpaque,
}
return k8sClient.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{})
}
// setSecretOwnership sets the provider as the owner of the secret
func setSecretOwnership(configFlags *genericclioptions.ConfigFlags, provider *forkliftv1beta1.Provider, secret *corev1.Secret) error {
// Get the Kubernetes client using configFlags
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return fmt.Errorf("failed to create kubernetes client: %v", err)
}
// Get the current secret to safely append owner reference
currentSecret, err := k8sClient.CoreV1().Secrets(secret.Namespace).Get(
context.Background(),
secret.Name,
metav1.GetOptions{},
)
if err != nil {
return fmt.Errorf("failed to get secret for ownership update: %v", err)
}
// Create the owner reference
ownerRef := metav1.OwnerReference{
APIVersion: provider.APIVersion,
Kind: provider.Kind,
Name: provider.Name,
UID: provider.UID,
}
// Check if this provider is already an owner to avoid duplicates
for _, existingOwner := range currentSecret.OwnerReferences {
if existingOwner.UID == provider.UID {
return nil // Already an owner, nothing to do
}
}
// Append the new owner reference to existing ones
currentSecret.OwnerReferences = append(currentSecret.OwnerReferences, ownerRef)
// Update the secret with the new owner reference
_, err = k8sClient.CoreV1().Secrets(secret.Namespace).Update(
context.Background(),
currentSecret,
metav1.UpdateOptions{},
)
if err != nil {
return fmt.Errorf("failed to update secret with owner reference: %v", err)
}
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/generic/generic.go | Go | package generic
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/providerutil"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// validateProviderOptions validates the options for creating a generic provider
func validateProviderOptions(options providerutil.ProviderOptions, providerType string) error {
if options.Name == "" {
return fmt.Errorf("provider name is required")
}
if options.Namespace == "" {
return fmt.Errorf("provider namespace is required")
}
if options.URL == "" {
return fmt.Errorf("provider URL is required")
}
// If token is provided, username and password are not required
if options.Token == "" {
if options.Username == "" {
return fmt.Errorf("provider username is required (unless token is provided)")
}
if options.Password == "" {
return fmt.Errorf("provider password is required (unless token is provided)")
}
}
if options.Secret != "" && (options.Username != "" || options.Password != "" || options.Token != "") {
return fmt.Errorf("if a secret is provided, username, password, and token should not be specified")
}
if options.Secret == "" && options.Token == "" && (options.Username == "" || options.Password == "") {
return fmt.Errorf("if no secret is provided, either token or username and password must be specified")
}
return nil
}
// cleanupCreatedResources deletes any resources created during the provider creation process
func cleanupCreatedResources(configFlags *genericclioptions.ConfigFlags, namespace string, secret *corev1.Secret) {
if secret != nil {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return
}
err = c.Resource(client.SecretsGVR).Namespace(namespace).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{})
if err != nil {
fmt.Printf("Warning: failed to clean up secret %s: %v\n", secret.Name, err)
}
}
}
// setSecretOwnership sets the provider as the owner of the secret
func setSecretOwnership(configFlags *genericclioptions.ConfigFlags, provider *forkliftv1beta1.Provider, secret *corev1.Secret) error {
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return fmt.Errorf("failed to get kubernetes client: %v", err)
}
// Get the current secret to safely append owner reference
currentSecret, err := k8sClient.CoreV1().Secrets(secret.Namespace).Get(
context.TODO(),
secret.Name,
metav1.GetOptions{},
)
if err != nil {
return fmt.Errorf("failed to get secret for ownership update: %v", err)
}
// Create the owner reference
ownerRef := metav1.OwnerReference{
APIVersion: provider.APIVersion,
Kind: provider.Kind,
Name: provider.Name,
UID: provider.UID,
}
// Check if this provider is already an owner to avoid duplicates
for _, existingOwner := range currentSecret.OwnerReferences {
if existingOwner.UID == provider.UID {
return nil // Already an owner, nothing to do
}
}
// Append the new owner reference to existing ones
currentSecret.OwnerReferences = append(currentSecret.OwnerReferences, ownerRef)
// Update the secret with the new owner reference
_, err = k8sClient.CoreV1().Secrets(secret.Namespace).Update(
context.TODO(),
currentSecret,
metav1.UpdateOptions{},
)
if err != nil {
return fmt.Errorf("failed to update secret with owner reference: %v", err)
}
return nil
}
// createTypedProvider creates an unstructured provider and converts it to a typed Provider
func createTypedProvider(configFlags *genericclioptions.ConfigFlags, namespace string, provider *forkliftv1beta1.Provider) (*forkliftv1beta1.Provider, error) {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to get client: %v", err)
}
// Convert provider to unstructured
unstructProvider, err := runtime.DefaultUnstructuredConverter.ToUnstructured(provider)
if err != nil {
return nil, fmt.Errorf("failed to convert provider to unstructured: %v", err)
}
unstructuredProvider := &unstructured.Unstructured{Object: unstructProvider}
// Create the provider
createdUnstructProvider, err := c.Resource(client.ProvidersGVR).Namespace(namespace).Create(
context.TODO(), unstructuredProvider, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create provider: %v", err)
}
// Convert unstructured provider to typed provider
createdProvider := &forkliftv1beta1.Provider{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdUnstructProvider.Object, createdProvider); err != nil {
return nil, fmt.Errorf("failed to convert provider from unstructured: %v", err)
}
return createdProvider, nil
}
// CreateProvider implements a generic provider creator for oVirt and OpenStack
func CreateProvider(configFlags *genericclioptions.ConfigFlags, options providerutil.ProviderOptions, providerType string) (*forkliftv1beta1.Provider, *corev1.Secret, error) {
// Validate required fields
if err := validateProviderOptions(options, providerType); err != nil {
return nil, nil, err
}
// Create basic provider structure
provider := &forkliftv1beta1.Provider{}
provider.SetName(options.Name)
provider.SetNamespace(options.Namespace)
provider.APIVersion = forkliftv1beta1.SchemeGroupVersion.String()
provider.Kind = "Provider"
// Set provider type and URL
providerTypeValue := forkliftv1beta1.ProviderType(providerType)
provider.Spec.Type = &providerTypeValue
provider.Spec.URL = options.URL
// Create and set the Secret
var createdSecret *corev1.Secret
var err error
if options.Secret == "" {
createdSecret, err = createSecret(configFlags, options.Namespace, options.Name,
options.Username, options.Password, options.URL, options.CACert, options.Token, options.InsecureSkipTLS,
options.DomainName, options.ProjectName, options.RegionName, providerType)
if err != nil {
return nil, nil, fmt.Errorf("failed to create %s secret: %v", providerType, err)
}
provider.Spec.Secret = corev1.ObjectReference{
Name: createdSecret.Name,
Namespace: createdSecret.Namespace,
}
} else {
provider.Spec.Secret = corev1.ObjectReference{
Name: options.Secret,
Namespace: options.Namespace,
}
}
// Create the provider
createdProvider, err := createTypedProvider(configFlags, options.Namespace, provider)
if err != nil {
// Clean up the created secret if provider creation fails
cleanupCreatedResources(configFlags, options.Namespace, createdSecret)
return nil, nil, fmt.Errorf("failed to create %s provider: %v", providerType, err)
}
// Set the secret ownership to the provider
if createdSecret != nil {
if err := setSecretOwnership(configFlags, createdProvider, createdSecret); err != nil {
return nil, createdSecret, fmt.Errorf("provider created but %v", err)
}
}
return createdProvider, createdSecret, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/generic/secrets.go | Go | package generic
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// createSecret creates a secret for generic providers (oVirt, OpenStack)
func createSecret(configFlags *genericclioptions.ConfigFlags, namespace, providerName, user, password, url, cacert, token string, insecureSkipTLS bool, domainName, projectName, regionName, providerType string) (*corev1.Secret, error) {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to get client: %v", err)
}
secretName := fmt.Sprintf("%s-provider-secret", providerName)
// Prepare secret data
secretData := map[string][]byte{
"url": []byte(url),
}
// Add authentication data based on what's provided
if token != "" {
secretData["token"] = []byte(token)
} else {
secretData["user"] = []byte(user)
secretData["password"] = []byte(password)
}
// Add CA certificate if provided
if cacert != "" {
secretData["cacert"] = []byte(cacert)
}
// Add insecureSkipVerify if true
if insecureSkipTLS {
secretData["insecureSkipVerify"] = []byte("true")
}
// Add OpenStack specific fields if provided
if providerType == "openstack" {
if domainName != "" {
secretData["domainName"] = []byte(domainName)
}
if projectName != "" {
secretData["projectName"] = []byte(projectName)
}
if regionName != "" {
secretData["regionName"] = []byte(regionName)
}
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: namespace,
},
Type: corev1.SecretTypeOpaque,
Data: secretData,
}
// Convert secret to unstructured
unstructSecret, err := runtime.DefaultUnstructuredConverter.ToUnstructured(secret)
if err != nil {
return nil, fmt.Errorf("failed to convert secret to unstructured: %v", err)
}
unstructuredSecret := &unstructured.Unstructured{Object: unstructSecret}
// Create the secret
createdUnstructSecret, err := c.Resource(client.SecretsGVR).Namespace(namespace).Create(context.TODO(), unstructuredSecret, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create secret: %v", err)
}
// Convert unstructured secret back to typed secret
createdSecret := &corev1.Secret{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdUnstructSecret.Object, createdSecret); err != nil {
return nil, fmt.Errorf("failed to convert secret from unstructured: %v", err)
}
return createdSecret, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/hyperv/hyperv.go | Go | package hyperv
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/providerutil"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// validateProviderOptions validates the options for creating a HyperV provider
func validateProviderOptions(options providerutil.ProviderOptions) error {
if options.Name == "" {
return fmt.Errorf("provider name is required")
}
if options.Namespace == "" {
return fmt.Errorf("provider namespace is required")
}
if options.URL == "" {
return fmt.Errorf("provider URL is required (HyperV host IP or URL, e.g. 192.168.1.100)")
}
if options.SMBUrl == "" {
return fmt.Errorf("--smb-url is required for HyperV provider (SMB share path, e.g. //server/share)")
}
if options.Secret == "" {
if options.Username == "" {
return fmt.Errorf("username is required for HyperV provider")
}
if options.Password == "" {
return fmt.Errorf("password is required for HyperV provider")
}
}
if options.Secret != "" && (options.Username != "" || options.Password != "") {
return fmt.Errorf("if a secret is provided, username and password should not be specified")
}
return nil
}
// cleanupCreatedResources deletes any resources created during the provider creation process
func cleanupCreatedResources(configFlags *genericclioptions.ConfigFlags, namespace string, secret *corev1.Secret) {
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return
}
if secret != nil {
_ = dynamicClient.Resource(client.SecretsGVR).Namespace(namespace).Delete(
context.Background(),
secret.Name,
metav1.DeleteOptions{},
)
}
}
// createTypedProvider creates an unstructured provider and converts it to a typed Provider
func createTypedProvider(configFlags *genericclioptions.ConfigFlags, namespace string, provider *forkliftv1beta1.Provider) (*forkliftv1beta1.Provider, error) {
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to get client: %v", err)
}
// Convert the provider object to unstructured format
providerMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(provider)
if err != nil {
return nil, fmt.Errorf("failed to convert provider to unstructured format: %v", err)
}
// Create an *unstructured.Unstructured from the map
providerUnstructured := &unstructured.Unstructured{Object: providerMap}
createdUnstructProvider, err := dynamicClient.Resource(client.ProvidersGVR).Namespace(namespace).Create(
context.Background(),
providerUnstructured,
metav1.CreateOptions{},
)
if err != nil {
return nil, err
}
// Convert unstructured provider to typed provider
createdProvider := &forkliftv1beta1.Provider{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdUnstructProvider.Object, createdProvider); err != nil {
return nil, fmt.Errorf("failed to convert provider from unstructured: %v", err)
}
return createdProvider, nil
}
// CreateProvider implements the ProviderCreator interface for HyperV
func CreateProvider(configFlags *genericclioptions.ConfigFlags, options providerutil.ProviderOptions) (*forkliftv1beta1.Provider, *corev1.Secret, error) {
// Validate required fields
if err := validateProviderOptions(options); err != nil {
return nil, nil, err
}
// Create basic provider structure
provider := &forkliftv1beta1.Provider{}
provider.SetName(options.Name)
provider.SetNamespace(options.Namespace)
provider.APIVersion = forkliftv1beta1.SchemeGroupVersion.String()
provider.Kind = "Provider"
// Set provider type and URL
providerTypeValue := forkliftv1beta1.ProviderType("hyperv")
provider.Spec.Type = &providerTypeValue
provider.Spec.URL = options.URL
// Create or use the Secret
var createdSecret *corev1.Secret
var err error
if options.Secret == "" {
// Create a new secret if none is provided
createdSecret, err = createSecret(configFlags, options.Namespace, options.Name, options)
if err != nil {
return nil, nil, fmt.Errorf("failed to create HyperV secret: %v", err)
}
provider.Spec.Secret = corev1.ObjectReference{
Name: createdSecret.Name,
Namespace: createdSecret.Namespace,
}
} else {
// Use the existing secret
provider.Spec.Secret = corev1.ObjectReference{
Name: options.Secret,
Namespace: options.Namespace,
}
}
// Create the provider
createdProvider, err := createTypedProvider(configFlags, options.Namespace, provider)
if err != nil {
// Clean up the created secret if provider creation fails and we created it
if createdSecret != nil {
cleanupCreatedResources(configFlags, options.Namespace, createdSecret)
}
return nil, nil, fmt.Errorf("failed to create HyperV provider: %v", err)
}
// Set the secret ownership to the provider if we created the secret
if createdSecret != nil {
if err := setSecretOwnership(configFlags, createdProvider, createdSecret); err != nil {
return nil, createdSecret, fmt.Errorf("provider created but %v", err)
}
}
return createdProvider, createdSecret, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/hyperv/secrets.go | Go | package hyperv
import (
"context"
"encoding/json"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/providerutil"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// Helper function to create a HyperV secret
func createSecret(configFlags *genericclioptions.ConfigFlags, namespace, providerName string, options providerutil.ProviderOptions) (*corev1.Secret, error) {
// Get the Kubernetes client using configFlags
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to create kubernetes client: %v", err)
}
// Validate required credentials
if options.Username == "" || options.Password == "" {
return nil, fmt.Errorf("username and password are required for HyperV provider")
}
// Create secret data without base64 encoding (the API handles this automatically)
secretData := map[string][]byte{
"username": []byte(options.Username),
"password": []byte(options.Password),
}
if options.SMBUrl != "" {
secretData["smbUrl"] = []byte(options.SMBUrl)
}
if options.SMBUser != "" {
secretData["smbUser"] = []byte(options.SMBUser)
}
if options.SMBPassword != "" {
secretData["smbPassword"] = []byte(options.SMBPassword)
}
if options.InsecureSkipTLS {
secretData["insecureSkipVerify"] = []byte("true")
}
if options.CACert != "" {
secretData["cacert"] = []byte(options.CACert)
}
// Generate a name prefix for the secret
secretName := fmt.Sprintf("%s-hyperv-", providerName)
// Create the secret object directly as a typed Secret
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: secretName,
Namespace: namespace,
Labels: map[string]string{
"createdForProviderType": "hyperv",
"createdForResourceType": "providers",
},
},
Data: secretData,
Type: corev1.SecretTypeOpaque,
}
return k8sClient.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{})
}
// setSecretOwnership sets the provider as the owner of the secret
func setSecretOwnership(configFlags *genericclioptions.ConfigFlags, provider *forkliftv1beta1.Provider, secret *corev1.Secret) error {
// Get the Kubernetes client using configFlags
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return fmt.Errorf("failed to create kubernetes client: %v", err)
}
// Create the owner reference
ownerRef := metav1.OwnerReference{
APIVersion: provider.APIVersion,
Kind: provider.Kind,
Name: provider.Name,
UID: provider.UID,
}
// Patch secret to add the owner reference
patch := map[string]interface{}{
"metadata": map[string]interface{}{
"ownerReferences": []metav1.OwnerReference{ownerRef},
},
}
// Convert patch to JSON bytes
patchBytes, err := json.Marshal(patch)
if err != nil {
return fmt.Errorf("failed to marshal patch data: %v", err)
}
// Apply the patch to the secret
_, err = k8sClient.CoreV1().Secrets(secret.Namespace).Patch(
context.Background(),
secret.Name,
types.MergePatchType,
patchBytes,
metav1.PatchOptions{},
)
if err != nil {
return fmt.Errorf("failed to patch secret with owner reference: %v", err)
}
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/openshift/openshift.go | Go | package openshift
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/providerutil"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// validateProviderOptions validates the options for creating an OpenShift provider
func validateProviderOptions(options providerutil.ProviderOptions) error {
if options.Name == "" {
return fmt.Errorf("provider name is required")
}
if options.Namespace == "" {
return fmt.Errorf("provider namespace is required")
}
// URL is only required when token is provided
if options.Token != "" && options.URL == "" {
return fmt.Errorf("provider URL is required when token is provided")
}
return nil
}
// cleanupCreatedResources deletes any resources created during the provider creation process
func cleanupCreatedResources(configFlags *genericclioptions.ConfigFlags, namespace string, secret *corev1.Secret) {
if secret == nil {
return
}
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return
}
_ = dynamicClient.Resource(client.SecretsGVR).Namespace(namespace).Delete(
context.Background(),
secret.Name,
metav1.DeleteOptions{},
)
}
// createTypedProvider creates an unstructured provider and converts it to a typed Provider
func createTypedProvider(configFlags *genericclioptions.ConfigFlags, namespace string, provider *forkliftv1beta1.Provider) (*forkliftv1beta1.Provider, error) {
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to get client: %v", err)
}
// Convert the provider object to unstructured format
providerMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(provider)
if err != nil {
return nil, fmt.Errorf("failed to convert provider to unstructured format: %v", err)
}
// Create an *unstructured.Unstructured from the map
providerUnstructured := &unstructured.Unstructured{Object: providerMap}
createdUnstructProvider, err := dynamicClient.Resource(client.ProvidersGVR).Namespace(namespace).Create(
context.Background(),
providerUnstructured,
metav1.CreateOptions{},
)
if err != nil {
return nil, err
}
// Convert unstructured provider to typed provider
createdProvider := &forkliftv1beta1.Provider{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdUnstructProvider.Object, createdProvider); err != nil {
return nil, fmt.Errorf("failed to convert provider from unstructured: %v", err)
}
return createdProvider, nil
}
// CreateProvider implements the ProviderCreator interface for OpenShift
func CreateProvider(configFlags *genericclioptions.ConfigFlags, options providerutil.ProviderOptions) (*forkliftv1beta1.Provider, *corev1.Secret, error) {
// Validate required fields
if err := validateProviderOptions(options); err != nil {
return nil, nil, err
}
// Create basic provider structure
provider := &forkliftv1beta1.Provider{}
provider.SetName(options.Name)
provider.SetNamespace(options.Namespace)
provider.APIVersion = forkliftv1beta1.SchemeGroupVersion.String()
provider.Kind = "Provider"
// Set provider type and URL (if provided)
providerTypeValue := forkliftv1beta1.ProviderType("openshift")
provider.Spec.Type = &providerTypeValue
if options.URL != "" {
provider.Spec.URL = options.URL
}
// Handle secret and token logic
var createdSecret *corev1.Secret
var err error
if options.Token != "" {
createdSecret, err = createSecret(configFlags, options.Namespace, options.Name,
options.URL, options.Token, options.CACert, options.InsecureSkipTLS)
if err != nil {
return nil, nil, fmt.Errorf("failed to create vSphere secret: %v", err)
}
// If token is provided
if options.Secret != "" {
// Use the provided secret if specified alongside the token
provider.Spec.Secret = corev1.ObjectReference{
Name: options.Secret,
Namespace: options.Namespace,
}
} else {
// Create a new secret if no existing secret is specified
createdSecret, err = createSecret(configFlags, options.Namespace, options.Name, options.URL, options.Token, options.CACert, options.InsecureSkipTLS)
if err != nil {
return nil, nil, fmt.Errorf("failed to create OpenShift secret: %v", err)
}
provider.Spec.Secret = corev1.ObjectReference{
Name: createdSecret.Name,
Namespace: createdSecret.Namespace,
}
}
}
// Create the provider
createdProvider, err := createTypedProvider(configFlags, options.Namespace, provider)
if err != nil {
// Clean up the created secret if provider creation fails and we created it
if createdSecret != nil {
cleanupCreatedResources(configFlags, options.Namespace, createdSecret)
}
return nil, nil, fmt.Errorf("failed to create OpenShift provider: %v", err)
}
// Set the secret ownership to the provider if we created a secret
if createdSecret != nil {
if err := setSecretOwnership(configFlags, createdProvider, createdSecret); err != nil {
return nil, createdSecret, fmt.Errorf("provider created but %v", err)
}
}
return createdProvider, createdSecret, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/openshift/secrets.go | Go | package openshift
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// Helper function to create an OpenShift secret
func createSecret(configFlags *genericclioptions.ConfigFlags, namespace, providerName, url, token, cacert string, insecureSkipTLS bool) (*corev1.Secret, error) {
// Get the Kubernetes client using configFlags
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to create kubernetes client: %v", err)
}
// Create secret data without base64 encoding (the API handles this automatically)
secretData := map[string][]byte{
"token": []byte(token),
"url": []byte(url),
}
// Add optional fields
if insecureSkipTLS {
secretData["insecureSkipVerify"] = []byte("true")
}
if cacert != "" {
secretData["cacert"] = []byte(cacert)
}
// Generate a name prefix for the secret
secretName := fmt.Sprintf("%s-openshift-", providerName)
// Create the secret object directly as a typed Secret
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: secretName,
Namespace: namespace,
Labels: map[string]string{
"createdForProviderType": "openshift",
"createdForResourceType": "providers",
},
},
Data: secretData,
Type: corev1.SecretTypeOpaque,
}
return k8sClient.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{})
}
// setSecretOwnership sets the provider as the owner of the secret
func setSecretOwnership(configFlags *genericclioptions.ConfigFlags, provider *forkliftv1beta1.Provider, secret *corev1.Secret) error {
// Get the Kubernetes client using configFlags
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return fmt.Errorf("failed to create kubernetes client: %v", err)
}
// Get the current secret to safely append owner reference
currentSecret, err := k8sClient.CoreV1().Secrets(secret.Namespace).Get(
context.Background(),
secret.Name,
metav1.GetOptions{},
)
if err != nil {
return fmt.Errorf("failed to get secret for ownership update: %v", err)
}
// Create the owner reference
ownerRef := metav1.OwnerReference{
APIVersion: provider.APIVersion,
Kind: provider.Kind,
Name: provider.Name,
UID: provider.UID,
}
// Check if this provider is already an owner to avoid duplicates
for _, existingOwner := range currentSecret.OwnerReferences {
if existingOwner.UID == provider.UID {
return nil // Already an owner, nothing to do
}
}
// Append the new owner reference to existing ones
currentSecret.OwnerReferences = append(currentSecret.OwnerReferences, ownerRef)
// Update the secret with the new owner reference
_, err = k8sClient.CoreV1().Secrets(secret.Namespace).Update(
context.Background(),
currentSecret,
metav1.UpdateOptions{},
)
if err != nil {
return fmt.Errorf("failed to update secret with owner reference: %v", err)
}
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/openstack/openstack.go | Go | package openstack
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/providerutil"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// validateProviderOptions validates the options for creating an OpenStack provider
func validateProviderOptions(options providerutil.ProviderOptions) error {
if options.Name == "" {
return fmt.Errorf("provider name is required")
}
if options.Namespace == "" {
return fmt.Errorf("provider namespace is required")
}
if options.URL == "" {
return fmt.Errorf("provider URL is required")
}
// If token is provided, username and password are not required
if options.Token == "" {
if options.Username == "" {
return fmt.Errorf("provider username is required (unless token is provided)")
}
if options.Password == "" {
return fmt.Errorf("provider password is required (unless token is provided)")
}
}
if options.Secret != "" && (options.Username != "" || options.Password != "" || options.Token != "") {
return fmt.Errorf("if a secret is provided, username, password, and token should not be specified")
}
if options.Secret == "" && options.Token == "" && (options.Username == "" || options.Password == "") {
return fmt.Errorf("if no secret is provided, either token or username and password must be specified")
}
return nil
}
// cleanupCreatedResources deletes any resources created during the provider creation process
func cleanupCreatedResources(configFlags *genericclioptions.ConfigFlags, namespace string, secret *corev1.Secret) {
if secret != nil {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return
}
err = c.Resource(client.SecretsGVR).Namespace(namespace).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{})
if err != nil {
fmt.Printf("Warning: failed to clean up secret %s: %v\n", secret.Name, err)
}
}
}
// setSecretOwnership sets the provider as the owner of the secret
func setSecretOwnership(configFlags *genericclioptions.ConfigFlags, provider *forkliftv1beta1.Provider, secret *corev1.Secret) error {
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return fmt.Errorf("failed to get kubernetes client: %v", err)
}
// Get the current secret to safely append owner reference
currentSecret, err := k8sClient.CoreV1().Secrets(secret.Namespace).Get(
context.TODO(),
secret.Name,
metav1.GetOptions{},
)
if err != nil {
return fmt.Errorf("failed to get secret for ownership update: %v", err)
}
// Create the owner reference
ownerRef := metav1.OwnerReference{
APIVersion: provider.APIVersion,
Kind: provider.Kind,
Name: provider.Name,
UID: provider.UID,
}
// Check if this provider is already an owner to avoid duplicates
for _, existingOwner := range currentSecret.OwnerReferences {
if existingOwner.UID == provider.UID {
return nil // Already an owner, nothing to do
}
}
// Append the new owner reference to existing ones
currentSecret.OwnerReferences = append(currentSecret.OwnerReferences, ownerRef)
// Update the secret with the new owner reference
_, err = k8sClient.CoreV1().Secrets(secret.Namespace).Update(
context.TODO(),
currentSecret,
metav1.UpdateOptions{},
)
if err != nil {
return fmt.Errorf("failed to update secret with owner reference: %v", err)
}
return nil
}
// createTypedProvider creates an unstructured provider and converts it to a typed Provider
func createTypedProvider(configFlags *genericclioptions.ConfigFlags, namespace string, provider *forkliftv1beta1.Provider) (*forkliftv1beta1.Provider, error) {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to get client: %v", err)
}
// Convert provider to unstructured
unstructProvider, err := runtime.DefaultUnstructuredConverter.ToUnstructured(provider)
if err != nil {
return nil, fmt.Errorf("failed to convert provider to unstructured: %v", err)
}
unstructuredProvider := &unstructured.Unstructured{Object: unstructProvider}
// Create the provider
createdUnstructProvider, err := c.Resource(client.ProvidersGVR).Namespace(namespace).Create(context.TODO(), unstructuredProvider, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create provider: %v", err)
}
// Convert unstructured provider back to typed provider
createdProvider := &forkliftv1beta1.Provider{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdUnstructProvider.Object, createdProvider); err != nil {
return nil, fmt.Errorf("failed to convert provider from unstructured: %v", err)
}
return createdProvider, nil
}
// CreateProvider implements the ProviderCreator interface for OpenStack
func CreateProvider(configFlags *genericclioptions.ConfigFlags, options providerutil.ProviderOptions) (*forkliftv1beta1.Provider, *corev1.Secret, error) {
// Validate required fields
if err := validateProviderOptions(options); err != nil {
return nil, nil, err
}
// Create basic provider structure
provider := &forkliftv1beta1.Provider{}
provider.SetName(options.Name)
provider.SetNamespace(options.Namespace)
provider.APIVersion = forkliftv1beta1.SchemeGroupVersion.String()
provider.Kind = "Provider"
// Set provider type and URL
providerTypeValue := forkliftv1beta1.ProviderType("openstack")
provider.Spec.Type = &providerTypeValue
provider.Spec.URL = options.URL
var createdSecret *corev1.Secret
var err error
// Handle secret creation
if options.Secret != "" {
// Use existing secret
provider.Spec.Secret = corev1.ObjectReference{
Name: options.Secret,
Namespace: options.Namespace,
}
} else {
// Create new secret
createdSecret, err = createSecret(configFlags, options.Namespace, options.Name,
options.Username, options.Password, options.URL, options.CACert, options.Token,
options.InsecureSkipTLS, options.DomainName, options.ProjectName, options.RegionName)
if err != nil {
return nil, nil, fmt.Errorf("failed to create OpenStack secret: %v", err)
}
provider.Spec.Secret = corev1.ObjectReference{
Name: createdSecret.Name,
Namespace: createdSecret.Namespace,
}
}
// Create the provider
createdProvider, err := createTypedProvider(configFlags, options.Namespace, provider)
if err != nil {
// Clean up the created secret if provider creation fails and we created it
if createdSecret != nil {
cleanupCreatedResources(configFlags, options.Namespace, createdSecret)
}
return nil, nil, fmt.Errorf("failed to create OpenStack provider: %v", err)
}
// Set the secret ownership to the provider if we created a secret
if createdSecret != nil {
if err := setSecretOwnership(configFlags, createdProvider, createdSecret); err != nil {
return nil, createdSecret, fmt.Errorf("provider created but %v", err)
}
}
return createdProvider, createdSecret, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/openstack/secrets.go | Go | package openstack
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// createSecret creates a secret for OpenStack providers with correct field names
func createSecret(configFlags *genericclioptions.ConfigFlags, namespace, providerName, username, password, url, cacert, token string, insecureSkipTLS bool, domainName, projectName, regionName string) (*corev1.Secret, error) {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to get client: %v", err)
}
secretName := fmt.Sprintf("%s-openstack-secret", providerName)
// Prepare secret data
secretData := map[string][]byte{
"url": []byte(url),
}
// Add authentication data based on what's provided
if token != "" {
secretData["token"] = []byte(token)
} else {
// Use 'username' instead of 'user' for OpenStack
secretData["username"] = []byte(username)
secretData["password"] = []byte(password)
}
// Add CA certificate if provided
if cacert != "" {
secretData["cacert"] = []byte(cacert)
}
// Add insecureSkipVerify if true
if insecureSkipTLS {
secretData["insecureSkipVerify"] = []byte("true")
}
// Add OpenStack specific fields
if domainName != "" {
secretData["domainName"] = []byte(domainName)
}
if projectName != "" {
secretData["projectName"] = []byte(projectName)
}
if regionName != "" {
secretData["regionName"] = []byte(regionName)
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: namespace,
Labels: map[string]string{
"createdForProviderType": "openstack",
"createdForResourceType": "providers",
},
},
Type: corev1.SecretTypeOpaque,
Data: secretData,
}
// Convert secret to unstructured
unstructSecret, err := runtime.DefaultUnstructuredConverter.ToUnstructured(secret)
if err != nil {
return nil, fmt.Errorf("failed to convert secret to unstructured: %v", err)
}
unstructuredSecret := &unstructured.Unstructured{Object: unstructSecret}
// Create the secret
createdUnstructSecret, err := c.Resource(client.SecretsGVR).Namespace(namespace).Create(context.TODO(), unstructuredSecret, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create secret: %v", err)
}
// Convert unstructured secret back to typed secret
createdSecret := &corev1.Secret{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdUnstructSecret.Object, createdSecret); err != nil {
return nil, fmt.Errorf("failed to convert secret from unstructured: %v", err)
}
return createdSecret, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/ova/ova.go | Go | package ova
import (
"context"
"fmt"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/providerutil"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// validateProviderOptions validates the options for creating an OVA provider
func validateProviderOptions(options providerutil.ProviderOptions) error {
if options.Name == "" {
return fmt.Errorf("provider name is required")
}
if options.Namespace == "" {
return fmt.Errorf("provider namespace is required")
}
if options.URL == "" {
return fmt.Errorf("provider URL is required")
}
// Validate that OVA URLs are in NFS format (server:path)
// OVA providers only support NFS URLs, not file://, http://, or https://
if !isValidNFSURL(options.URL) {
return fmt.Errorf("OVA provider URL must be in NFS format (server:path), e.g., 'nfs.example.com:/path/to/ova-files' or '192.168.1.100:/exports/vm-images'")
}
return nil
}
// isValidNFSURL checks if the URL is in valid NFS format (server:path)
func isValidNFSURL(url string) bool {
// NFS URLs should not have protocol prefixes and should contain a colon
// Examples: "nfs.example.com:/path" or "192.168.1.100:/exports/vms"
// Reject URLs with protocol prefixes
if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") ||
strings.HasPrefix(url, "file://") || strings.HasPrefix(url, "nfs://") {
return false
}
// Must contain exactly one colon and have content on both sides
parts := strings.Split(url, ":")
if len(parts) != 2 {
return false
}
// Both server and path parts must be non-empty
server := strings.TrimSpace(parts[0])
path := strings.TrimSpace(parts[1])
return server != "" && path != ""
}
// cleanupCreatedResources deletes any resources created during the provider creation process
func cleanupCreatedResources(configFlags *genericclioptions.ConfigFlags, namespace string, secret *corev1.Secret) {
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return
}
if secret != nil {
_ = dynamicClient.Resource(client.SecretsGVR).Namespace(namespace).Delete(
context.Background(),
secret.Name,
metav1.DeleteOptions{},
)
}
}
// createTypedProvider creates an unstructured provider and converts it to a typed Provider
func createTypedProvider(configFlags *genericclioptions.ConfigFlags, namespace string, provider *forkliftv1beta1.Provider) (*forkliftv1beta1.Provider, error) {
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to get client: %v", err)
}
// Convert the provider object to unstructured format
providerMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(provider)
if err != nil {
return nil, fmt.Errorf("failed to convert provider to unstructured format: %v", err)
}
// Create an *unstructured.Unstructured from the map
providerUnstructured := &unstructured.Unstructured{Object: providerMap}
createdUnstructProvider, err := dynamicClient.Resource(client.ProvidersGVR).Namespace(namespace).Create(
context.Background(),
providerUnstructured,
metav1.CreateOptions{},
)
if err != nil {
return nil, err
}
// Convert unstructured provider to typed provider
createdProvider := &forkliftv1beta1.Provider{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdUnstructProvider.Object, createdProvider); err != nil {
return nil, fmt.Errorf("failed to convert provider from unstructured: %v", err)
}
return createdProvider, nil
}
// CreateProvider implements the ProviderCreator interface for OVA
func CreateProvider(configFlags *genericclioptions.ConfigFlags, options providerutil.ProviderOptions) (*forkliftv1beta1.Provider, *corev1.Secret, error) {
// Validate required fields
if err := validateProviderOptions(options); err != nil {
return nil, nil, err
}
// Create basic provider structure
provider := &forkliftv1beta1.Provider{}
provider.SetName(options.Name)
provider.SetNamespace(options.Namespace)
provider.APIVersion = forkliftv1beta1.SchemeGroupVersion.String()
provider.Kind = "Provider"
// Set provider type and URL
providerTypeValue := forkliftv1beta1.ProviderType("ova")
provider.Spec.Type = &providerTypeValue
provider.Spec.URL = options.URL
// Create or use the Secret
var createdSecret *corev1.Secret
var err error
if options.Secret == "" {
// Create a new secret if none is provided
createdSecret, err = createSecret(configFlags, options.Namespace, options.Name, options.URL)
if err != nil {
return nil, nil, fmt.Errorf("failed to create OVA secret: %v", err)
}
provider.Spec.Secret = corev1.ObjectReference{
Name: createdSecret.Name,
Namespace: createdSecret.Namespace,
}
} else {
// Use the existing secret
provider.Spec.Secret = corev1.ObjectReference{
Name: options.Secret,
Namespace: options.Namespace,
}
}
// Create the provider
createdProvider, err := createTypedProvider(configFlags, options.Namespace, provider)
if err != nil {
// Clean up the created secret if provider creation fails and we created it
if createdSecret != nil {
cleanupCreatedResources(configFlags, options.Namespace, createdSecret)
}
return nil, nil, fmt.Errorf("failed to create OVA provider: %v", err)
}
// Set the secret ownership to the provider if we created the secret
if createdSecret != nil {
if err := setSecretOwnership(configFlags, createdProvider, createdSecret); err != nil {
return nil, createdSecret, fmt.Errorf("provider created but %v", err)
}
}
return createdProvider, createdSecret, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/ova/secrets.go | Go | package ova
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// Helper function to create an OVA secret
func createSecret(configFlags *genericclioptions.ConfigFlags, namespace, providerName, url string) (*corev1.Secret, error) {
// Get the Kubernetes client using configFlags
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to create kubernetes client: %v", err)
}
// Create secret data without base64 encoding (the API handles this automatically)
secretData := map[string][]byte{
"url": []byte(url),
}
// Generate a name prefix for the secret
secretName := fmt.Sprintf("%s-ova-", providerName)
// Create the secret object directly as a typed Secret
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: secretName,
Namespace: namespace,
Labels: map[string]string{
"createdForProviderType": "ova",
"createdForResourceType": "providers",
},
},
Data: secretData,
Type: corev1.SecretTypeOpaque,
}
return k8sClient.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{})
}
// setSecretOwnership sets the provider as the owner of the secret
func setSecretOwnership(configFlags *genericclioptions.ConfigFlags, provider *forkliftv1beta1.Provider, secret *corev1.Secret) error {
// Get the Kubernetes client using configFlags
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return fmt.Errorf("failed to create kubernetes client: %v", err)
}
// Get the current secret to safely append owner reference
currentSecret, err := k8sClient.CoreV1().Secrets(secret.Namespace).Get(
context.Background(),
secret.Name,
metav1.GetOptions{},
)
if err != nil {
return fmt.Errorf("failed to get secret for ownership update: %v", err)
}
// Create the owner reference
ownerRef := metav1.OwnerReference{
APIVersion: provider.APIVersion,
Kind: provider.Kind,
Name: provider.Name,
UID: provider.UID,
}
// Check if this provider is already an owner to avoid duplicates
for _, existingOwner := range currentSecret.OwnerReferences {
if existingOwner.UID == provider.UID {
return nil // Already an owner, nothing to do
}
}
// Append the new owner reference to existing ones
currentSecret.OwnerReferences = append(currentSecret.OwnerReferences, ownerRef)
// Update the secret with the new owner reference
_, err = k8sClient.CoreV1().Secrets(secret.Namespace).Update(
context.Background(),
currentSecret,
metav1.UpdateOptions{},
)
if err != nil {
return fmt.Errorf("failed to update secret with owner reference: %v", err)
}
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/providerutil/conditions.go | Go | package providerutil
import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// ProviderConditionStatuses holds the formatted status values for different provider condition types
type ProviderConditionStatuses struct {
ConnectionStatus string
ValidationStatus string
InventoryStatus string
ReadyStatus string
}
// ExtractProviderConditionStatuses extracts and formats provider condition statuses from an unstructured object
func ExtractProviderConditionStatuses(obj map[string]interface{}) ProviderConditionStatuses {
// Default condition values
statuses := ProviderConditionStatuses{
ConnectionStatus: "Unknown",
ValidationStatus: "Unknown",
InventoryStatus: "Unknown",
ReadyStatus: "Unknown",
}
// Extract conditions
conditions, exists, _ := unstructured.NestedSlice(obj, "status", "conditions")
if !exists {
return statuses
}
for _, c := range conditions {
condition, ok := c.(map[string]interface{})
if !ok {
continue
}
condType, _, _ := unstructured.NestedString(condition, "type")
condStatus, _, _ := unstructured.NestedString(condition, "status")
// Convert status to a simpler display
displayStatus := "Unknown"
if condStatus == "True" {
displayStatus = "True"
} else if condStatus == "False" {
displayStatus = "False"
}
// Map each condition to its corresponding status
switch condType {
case "ConnectionTestSucceeded":
statuses.ConnectionStatus = displayStatus
case "Validated":
statuses.ValidationStatus = displayStatus
case "InventoryCreated":
statuses.InventoryStatus = displayStatus
case "Ready":
statuses.ReadyStatus = displayStatus
}
}
return statuses
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/providerutil/options.go | Go | package providerutil
// ProviderOptions contains the options for creating a provider
type ProviderOptions struct {
Name string
Namespace string
Secret string
URL string
Username string
Password string
CACert string
InsecureSkipTLS bool
// VSphere specific options
VddkInitImage string
SdkEndpoint string
UseVddkAioOptimization bool
VddkBufSizeIn64K int
VddkBufCount int
// OpenShift specific options
Token string
// OpenStack specific options
DomainName string
ProjectName string
RegionName string
// HyperV specific options
SMBUrl string
SMBUser string
SMBPassword string
// EC2 specific options
EC2Region string
EC2TargetRegion string
EC2TargetAZ string
EC2TargetAccessKeyID string // Target account access key (cross-account migrations)
EC2TargetSecretKey string // Target account secret key (cross-account migrations)
AutoTargetCredentials bool // Auto-fetch target credentials from cluster
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/vsphere/secrets.go | Go | package vsphere
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// Helper function to create a vSphere secret
func createSecret(configFlags *genericclioptions.ConfigFlags, namespace, providerName, user, password, url, cacert string, insecureSkipTLS bool) (*corev1.Secret, error) {
// Get the Kubernetes client using configFlags
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to create kubernetes client: %v", err)
}
// Create secret data without base64 encoding (the API handles this automatically)
secretData := map[string][]byte{
"user": []byte(user),
"password": []byte(password),
"url": []byte(url),
}
// Add optional fields
if insecureSkipTLS {
secretData["insecureSkipVerify"] = []byte("true")
}
if cacert != "" {
secretData["cacert"] = []byte(cacert)
}
// Generate a name prefix for the secret
secretName := fmt.Sprintf("%s-vsphere-", providerName)
// Create the secret object directly as a typed Secret
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: secretName,
Namespace: namespace,
Labels: map[string]string{
"createdForProviderType": "vsphere",
"createdForResourceType": "providers",
},
},
Data: secretData,
Type: corev1.SecretTypeOpaque,
}
return k8sClient.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{})
}
// setSecretOwnership sets the provider as the owner of the secret
func setSecretOwnership(configFlags *genericclioptions.ConfigFlags, provider *forkliftv1beta1.Provider, secret *corev1.Secret) error {
// Get the Kubernetes client using configFlags
k8sClient, err := client.GetKubernetesClientset(configFlags)
if err != nil {
return fmt.Errorf("failed to create kubernetes client: %v", err)
}
// Get the current secret to safely append owner reference
currentSecret, err := k8sClient.CoreV1().Secrets(secret.Namespace).Get(
context.Background(),
secret.Name,
metav1.GetOptions{},
)
if err != nil {
return fmt.Errorf("failed to get secret for ownership update: %v", err)
}
// Create the owner reference
ownerRef := metav1.OwnerReference{
APIVersion: provider.APIVersion,
Kind: provider.Kind,
Name: provider.Name,
UID: provider.UID,
}
// Check if this provider is already an owner to avoid duplicates
for _, existingOwner := range currentSecret.OwnerReferences {
if existingOwner.UID == provider.UID {
return nil // Already an owner, nothing to do
}
}
// Append the new owner reference to existing ones
currentSecret.OwnerReferences = append(currentSecret.OwnerReferences, ownerRef)
// Update the secret with the new owner reference
_, err = k8sClient.CoreV1().Secrets(secret.Namespace).Update(
context.Background(),
currentSecret,
metav1.UpdateOptions{},
)
if err != nil {
return fmt.Errorf("failed to update secret with owner reference: %v", err)
}
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/provider/vsphere/vsphere.go | Go | package vsphere
import (
"context"
"fmt"
"strconv"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/providerutil"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// validateProviderOptions validates the options for creating a vSphere provider
func validateProviderOptions(options providerutil.ProviderOptions) error {
if options.Name == "" {
return fmt.Errorf("provider name is required")
}
if options.Namespace == "" {
return fmt.Errorf("provider namespace is required")
}
if options.URL == "" {
return fmt.Errorf("provider URL is required")
}
if options.Username == "" {
return fmt.Errorf("provider username is required")
}
if options.Password == "" {
return fmt.Errorf("provider password is required")
}
if options.CACert == "" && !options.InsecureSkipTLS {
return fmt.Errorf("either CA certificate or insecure skip TLS must be provided")
}
if options.Secret != "" && (options.Username != "" || options.Password != "") {
return fmt.Errorf("if a secret is provided, username and password should not be specified")
}
if options.Secret == "" && (options.Username == "" || options.Password == "") {
return fmt.Errorf("if no secret is provided, username and password must be specified")
}
return nil
}
// cleanupCreatedResources deletes any resources created during the provider creation process
func cleanupCreatedResources(configFlags *genericclioptions.ConfigFlags, namespace string, secret *corev1.Secret) {
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return
}
if secret != nil {
_ = dynamicClient.Resource(client.SecretsGVR).Namespace(namespace).Delete(
context.Background(),
secret.Name,
metav1.DeleteOptions{},
)
}
}
// createTypedProvider creates an unstructured provider and converts it to a typed Provider
func createTypedProvider(configFlags *genericclioptions.ConfigFlags, namespace string, provider *forkliftv1beta1.Provider) (*forkliftv1beta1.Provider, error) {
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return nil, fmt.Errorf("failed to get client: %v", err)
}
// Convert the provider object to unstructured format
providerMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(provider)
if err != nil {
return nil, fmt.Errorf("failed to convert provider to unstructured format: %v", err)
}
// Create an *unstructured.Unstructured from the map
providerUnstructured := &unstructured.Unstructured{Object: providerMap}
createdUnstructProvider, err := dynamicClient.Resource(client.ProvidersGVR).Namespace(namespace).Create(
context.Background(),
providerUnstructured,
metav1.CreateOptions{},
)
if err != nil {
return nil, err
}
// Convert unstructured provider to typed provider
createdProvider := &forkliftv1beta1.Provider{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdUnstructProvider.Object, createdProvider); err != nil {
return nil, fmt.Errorf("failed to convert provider from unstructured: %v", err)
}
return createdProvider, nil
}
// CreateProvider implements the ProviderCreator interface for VSphere
func CreateProvider(configFlags *genericclioptions.ConfigFlags, options providerutil.ProviderOptions) (*forkliftv1beta1.Provider, *corev1.Secret, error) {
// Validate required fields
if err := validateProviderOptions(options); err != nil {
return nil, nil, err
}
// Create basic provider structure
provider := &forkliftv1beta1.Provider{}
provider.SetName(options.Name)
provider.SetNamespace(options.Namespace)
provider.APIVersion = forkliftv1beta1.SchemeGroupVersion.String()
provider.Kind = "Provider"
// Set provider type and URL
providerTypeValue := forkliftv1beta1.ProviderType("vsphere")
provider.Spec.Type = &providerTypeValue
provider.Spec.URL = options.URL
// Initialize settings map if any settings are provided
if options.VddkInitImage != "" || options.SdkEndpoint != "" || options.UseVddkAioOptimization ||
options.VddkBufSizeIn64K > 0 || options.VddkBufCount > 0 {
provider.Spec.Settings = map[string]string{}
}
// Set VDDK init image if provided
if options.VddkInitImage != "" {
provider.Spec.Settings["vddkInitImage"] = options.VddkInitImage
}
// Set SDK endpoint if provided
if options.SdkEndpoint != "" {
provider.Spec.Settings["sdkEndpoint"] = options.SdkEndpoint
}
// Set VDDK AIO optimization if enabled
if options.UseVddkAioOptimization {
provider.Spec.Settings["useVddkAioOptimization"] = "true"
}
// Set VDDK configuration if buffer settings are provided
if options.VddkBufSizeIn64K > 0 || options.VddkBufCount > 0 {
var vddkConfig strings.Builder
// Start with YAML literal block scalar format
vddkConfig.WriteString("|")
if options.VddkBufSizeIn64K > 0 {
vddkConfig.WriteString("\nVixDiskLib.nfcAio.Session.BufSizeIn64K=")
vddkConfig.WriteString(strconv.Itoa(options.VddkBufSizeIn64K))
}
if options.VddkBufCount > 0 {
vddkConfig.WriteString("\nVixDiskLib.nfcAio.Session.BufCount=")
vddkConfig.WriteString(strconv.Itoa(options.VddkBufCount))
}
provider.Spec.Settings["vddkConfig"] = vddkConfig.String()
}
// Create and set the Secret
var createdSecret *corev1.Secret
var err error
if options.Secret == "" {
createdSecret, err = createSecret(configFlags, options.Namespace, options.Name,
options.Username, options.Password, options.URL, options.CACert, options.InsecureSkipTLS)
if err != nil {
return nil, nil, fmt.Errorf("failed to create vSphere secret: %v", err)
}
provider.Spec.Secret = corev1.ObjectReference{
Name: createdSecret.Name,
Namespace: createdSecret.Namespace,
}
} else {
provider.Spec.Secret = corev1.ObjectReference{
Name: options.Secret,
Namespace: options.Namespace,
}
}
// Create the provider
createdProvider, err := createTypedProvider(configFlags, options.Namespace, provider)
if err != nil {
// Clean up the created secret if provider creation fails
cleanupCreatedResources(configFlags, options.Namespace, createdSecret)
return nil, nil, fmt.Errorf("failed to create vSphere provider: %v", err)
}
// Set the secret ownership to the provider
if createdSecret != nil {
if err := setSecretOwnership(configFlags, createdProvider, createdSecret); err != nil {
return nil, createdSecret, fmt.Errorf("provider created but %v", err)
}
}
return createdProvider, createdSecret, nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/create/vddk/vddk.go | Go | package vddk
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// detectContainerRuntime checks for available container runtime (podman or docker).
// Returns the command name and true if found, or empty string and false if neither is available.
func detectContainerRuntime() (string, error) {
// Try podman first (preferred)
if _, err := exec.LookPath("podman"); err == nil {
return "podman", nil
}
// Fall back to docker
if _, err := exec.LookPath("docker"); err == nil {
return "docker", nil
}
return "", fmt.Errorf("neither podman nor docker is installed or available in PATH.\n" +
"Please install one of the following:\n" +
" - Podman: https://podman.io/getting-started/installation\n" +
" - Docker: https://docs.docker.com/get-docker/")
}
// selectContainerRuntime determines which container runtime to use based on the provided preference.
// If runtimePreference is "auto" or empty, it auto-detects. Otherwise, it validates the specified runtime.
func selectContainerRuntime(runtimePreference string) (string, error) {
// Normalize the preference
if runtimePreference == "" {
runtimePreference = "auto"
}
// Auto-detect if requested
if runtimePreference == "auto" {
return detectContainerRuntime()
}
// Validate explicit runtime choice
if runtimePreference != "podman" && runtimePreference != "docker" {
return "", fmt.Errorf("invalid runtime '%s': must be 'auto', 'podman', or 'docker'", runtimePreference)
}
// Check if the specified runtime is available
if _, err := exec.LookPath(runtimePreference); err != nil {
return "", fmt.Errorf("specified runtime '%s' is not installed or available in PATH.\n"+
"Please install it or use --runtime=auto to auto-detect an available runtime", runtimePreference)
}
return runtimePreference, nil
}
// defaultDockerfile is the default Dockerfile content used when no custom Dockerfile is provided
const defaultDockerfile = `FROM registry.access.redhat.com/ubi8/ubi-minimal
USER 1001
COPY vmware-vix-disklib-distrib /vmware-vix-disklib-distrib
RUN mkdir -p /opt
ENTRYPOINT ["cp", "-r", "/vmware-vix-disklib-distrib", "/opt"]
`
// BuildImage builds (and optionally pushes) a VDDK image for MTV.
func BuildImage(tarGzPath, tag, buildDir, runtimePreference, platform, dockerfilePath string, verbosity int, push, pushInsecureSkipTLS bool) error {
// Select container runtime based on preference
runtime, err := selectContainerRuntime(runtimePreference)
if err != nil {
return err
}
fmt.Printf("Using container runtime: %s\n", runtime)
fmt.Printf("Target platform: %s\n", platform)
if buildDir == "" {
tmp, err := os.MkdirTemp("", "vddk-build-*")
if err != nil {
return fmt.Errorf("failed to create temp dir: %w", err)
}
defer os.RemoveAll(tmp)
buildDir = tmp
}
fmt.Printf("Using build directory: %s\n", buildDir)
// Unpack tar.gz
fmt.Println("Extracting VDDK tar.gz...")
if err := extractTarGz(tarGzPath, buildDir, verbosity); err != nil {
return fmt.Errorf("failed to extract tar.gz: %w", err)
}
// Find the extracted directory
var distribDir string
files, _ := os.ReadDir(buildDir)
for _, f := range files {
if strings.HasPrefix(f.Name(), "vmware-vix-disklib-distrib") && f.IsDir() {
distribDir = f.Name()
break
}
}
if distribDir == "" {
return fmt.Errorf("could not find vmware-vix-disklib-distrib directory after extraction")
}
// Determine Dockerfile content
var df string
if dockerfilePath != "" {
// Read custom Dockerfile from provided path
fmt.Printf("Using custom Dockerfile from: %s\n", dockerfilePath)
dockerfileBytes, err := os.ReadFile(dockerfilePath)
if err != nil {
return fmt.Errorf("failed to read custom Dockerfile from %s: %w", dockerfilePath, err)
}
df = string(dockerfileBytes)
} else {
// Use default Dockerfile
df = defaultDockerfile
}
// Write Dockerfile to build directory
dockerfile := filepath.Join(buildDir, "Dockerfile")
if err := os.WriteFile(dockerfile, []byte(df), 0644); err != nil {
return fmt.Errorf("failed to write Dockerfile: %w", err)
}
// Print Dockerfile if verbosity > 1 (debug level)
if verbosity > 1 {
fmt.Println("Dockerfile contents:")
fmt.Println("---")
fmt.Print(df)
fmt.Println("---")
}
// Build image
fmt.Printf("Building image with %s...\n", runtime)
// Construct build command with platform
buildArgs := []string{"build"}
if platform != "" {
// Use linux/<platform> format for container images
buildArgs = append(buildArgs, "--platform", fmt.Sprintf("linux/%s", platform))
}
buildArgs = append(buildArgs, "-t", tag, ".")
// Print command if verbose
if verbosity > 0 {
fmt.Printf("Running: %s %s\n", runtime, strings.Join(buildArgs, " "))
}
buildCmd := exec.Command(runtime, buildArgs...)
buildCmd.Dir = buildDir
buildCmd.Stdout = os.Stdout
buildCmd.Stderr = os.Stderr
if err := buildCmd.Run(); err != nil {
return fmt.Errorf("%s build failed: %w", runtime, err)
}
// Optionally push
if push {
fmt.Printf("Pushing image with %s...\n", runtime)
// Construct push command with optional TLS skip
pushArgs := []string{"push"}
if pushInsecureSkipTLS {
if runtime == "podman" {
pushArgs = append(pushArgs, "--tls-verify=false")
} else {
// Docker does not support per-command TLS skip
fmt.Println("Warning: Docker does not support per-command TLS verification skip.")
fmt.Println("To push to an insecure registry with Docker, configure your daemon:")
fmt.Println(" Add to /etc/docker/daemon.json: {\"insecure-registries\": [\"your-registry:port\"]}")
fmt.Println(" Then restart Docker: sudo systemctl restart docker")
}
}
pushArgs = append(pushArgs, tag)
// Print command if verbose
if verbosity > 0 {
fmt.Printf("Running: %s %s\n", runtime, strings.Join(pushArgs, " "))
}
pushCmd := exec.Command(runtime, pushArgs...)
pushCmd.Stdout = os.Stdout
pushCmd.Stderr = os.Stderr
if err := pushCmd.Run(); err != nil {
return fmt.Errorf("%s push failed: %w", runtime, err)
}
}
fmt.Println("VDDK image build complete.")
return nil
}
func extractTarGz(tarGzPath, destDir string, verbosity int) error {
// Ensure destination directory exists
if err := os.MkdirAll(destDir, 0755); err != nil {
return fmt.Errorf("failed to create destination directory: %w", err)
}
// Use system tar command to extract
args := []string{"-xzf", tarGzPath, "-C", destDir}
// Print command if verbose
if verbosity > 0 {
fmt.Printf("Running: tar %s\n", strings.Join(args, " "))
}
cmd := exec.Command("tar", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("tar extraction failed: %w", err)
}
return nil
}
// SetControllerVddkImage configures the ForkliftController CR with the specified VDDK image.
// This sets the global vddk_image setting that applies to all vSphere providers unless overridden.
func SetControllerVddkImage(configFlags *genericclioptions.ConfigFlags, vddkImage string, verbosity int) error {
ctx := context.Background()
// Get the MTV operator namespace
operatorNamespace := client.GetMTVOperatorNamespace(ctx, configFlags)
if verbosity > 0 {
fmt.Printf("Using MTV operator namespace: %s\n", operatorNamespace)
}
// Get dynamic client
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get Kubernetes client: %w", err)
}
// List ForkliftController resources in the operator namespace
controllerList, err := dynamicClient.Resource(client.ForkliftControllersGVR).Namespace(operatorNamespace).List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("failed to list ForkliftController resources: %w", err)
}
if len(controllerList.Items) == 0 {
return fmt.Errorf("no ForkliftController found in namespace %s", operatorNamespace)
}
// Use the first ForkliftController (typically there's only one named "forklift-controller")
controller := controllerList.Items[0]
controllerName := controller.GetName()
fmt.Printf("Configuring ForkliftController '%s' with VDDK image: %s\n", controllerName, vddkImage)
// Create the JSON patch to set spec.vddk_image
// The ForkliftController uses snake_case for its spec fields
patchData := []byte(fmt.Sprintf(`{"spec":{"vddk_image":"%s"}}`, vddkImage))
if verbosity > 0 {
fmt.Printf("Applying patch: %s\n", string(patchData))
}
// Apply the patch
_, err = dynamicClient.Resource(client.ForkliftControllersGVR).Namespace(operatorNamespace).Patch(
ctx,
controllerName,
types.MergePatchType,
patchData,
metav1.PatchOptions{},
)
if err != nil {
return fmt.Errorf("failed to patch ForkliftController: %w", err)
}
fmt.Printf("Successfully configured ForkliftController with global VDDK image.\n")
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/cutover/plan/cutover.go | Go | package plan
import (
"context"
"encoding/json"
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/plan/status"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// Cutover sets the cutover time for a warm migration
func Cutover(configFlags *genericclioptions.ConfigFlags, planName, namespace string, cutoverTime *time.Time) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Get the plan
planObj, err := c.Resource(client.PlansGVR).Namespace(namespace).Get(context.TODO(), planName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get plan '%s': %v", planName, err)
}
// Check if the plan is warm
warm, exists, err := unstructured.NestedBool(planObj.Object, "spec", "warm")
if err != nil || !exists || !warm {
return fmt.Errorf("plan '%s' is not configured for warm migration", planName)
}
// Find the running migration for this plan
runningMigration, _, err := status.GetRunningMigration(c, namespace, planObj, client.MigrationsGVR)
if err != nil {
return err
}
if runningMigration == nil {
return fmt.Errorf("no running migration found for plan '%s'", planName)
}
// If no cutover time provided, use current time
if cutoverTime == nil {
now := time.Now()
cutoverTime = &now
}
// Format the cutover time as RFC3339 (the format Kubernetes uses for metav1.Time)
cutoverTimeRFC3339 := cutoverTime.Format(time.RFC3339)
// Prepare the patch to set the cutover field
patchObject := map[string]interface{}{
"spec": map[string]interface{}{
"cutover": cutoverTimeRFC3339,
},
}
// Convert the patch to JSON
patchBytes, err := json.Marshal(patchObject)
if err != nil {
return fmt.Errorf("failed to create patch: %v", err)
}
// Apply the patch to the migration
_, err = c.Resource(client.MigrationsGVR).Namespace(namespace).Patch(
context.TODO(),
runningMigration.GetName(),
types.MergePatchType,
patchBytes,
metav1.PatchOptions{},
)
if err != nil {
return fmt.Errorf("failed to update migration with cutover time: %v", err)
}
fmt.Printf("Successfully set cutover time to %s for plan '%s'\n", cutoverTimeRFC3339, planName)
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/delete/hook/delete.go | Go | package hook
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// Delete deletes a hook
func Delete(configFlags *genericclioptions.ConfigFlags, name, namespace string) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
err = c.Resource(client.HooksGVR).Namespace(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to delete hook: %v", err)
}
fmt.Printf("Hook '%s' deleted from namespace '%s'\n", name, namespace)
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/delete/host/delete.go | Go | package host
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// Delete deletes a host
func Delete(configFlags *genericclioptions.ConfigFlags, name, namespace string) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
err = c.Resource(client.HostsGVR).Namespace(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to delete host: %v", err)
}
fmt.Printf("Host '%s' deleted from namespace '%s'\n", name, namespace)
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/delete/mapping/delete.go | Go | package mapping
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// Delete deletes a network or storage mapping
func Delete(configFlags *genericclioptions.ConfigFlags, name, namespace, mappingType string) error {
switch mappingType {
case "network":
return DeleteNetwork(configFlags, name, namespace)
case "storage":
return DeleteStorage(configFlags, name, namespace)
default:
return fmt.Errorf("unsupported mapping type: %s. Use 'network' or 'storage'", mappingType)
}
}
// DeleteNetwork deletes a network mapping
func DeleteNetwork(configFlags *genericclioptions.ConfigFlags, name, namespace string) error {
return deleteMapping(configFlags, name, namespace, client.NetworkMapGVR, "network")
}
// DeleteStorage deletes a storage mapping
func DeleteStorage(configFlags *genericclioptions.ConfigFlags, name, namespace string) error {
return deleteMapping(configFlags, name, namespace, client.StorageMapGVR, "storage")
}
// deleteMapping deletes a mapping resource
func deleteMapping(configFlags *genericclioptions.ConfigFlags, name, namespace string, gvr schema.GroupVersionResource, mappingType string) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
err = c.Resource(gvr).Namespace(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to delete %s mapping: %v", mappingType, err)
}
fmt.Printf("%s mapping '%s' deleted from namespace '%s'\n",
fmt.Sprintf("%s%s", string(mappingType[0]-32), mappingType[1:]), name, namespace)
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/delete/plan/delete.go | Go | package plan
import (
"context"
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/dynamic"
"github.com/yaacov/kubectl-mtv/pkg/cmd/archive/plan"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// Delete removes a plan by name from the cluster
func Delete(ctx context.Context, configFlags *genericclioptions.ConfigFlags, name, namespace string, skipArchive, cleanAll bool) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Patch the plan to add deleteVmOnFailMigration=true if cleanAll is true
if cleanAll {
fmt.Printf("Clean-all mode enabled for plan '%s'\n", name)
// Patch the plan to add deleteVmOnFailMigration=true
fmt.Printf("Patching plan '%s' to enable VM deletion on failed migration...\n", name)
err = patchPlanDeleteVmOnFailMigration(ctx, c, name, namespace)
if err != nil {
return fmt.Errorf("failed to patch plan: %v", err)
}
fmt.Printf("Plan '%s' patched with deleteVmOnFailMigration=true\n", name)
}
// Archive the plan if not skipped
if skipArchive {
fmt.Printf("Skipping archive and deleting plan '%s' immediately...\n", name)
} else {
// Archive the plan
err = plan.Archive(ctx, configFlags, name, namespace, true)
if err != nil {
return fmt.Errorf("failed to archive plan: %v", err)
}
// Wait for the Archived condition to be true
fmt.Printf("Waiting for plan '%s' to be archived...\n", name)
err = waitForArchivedCondition(ctx, c, name, namespace, 60)
if err != nil {
return err
}
}
// Delete the plan
err = c.Resource(client.PlansGVR).Namespace(namespace).Delete(ctx, name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to delete plan: %v", err)
}
fmt.Printf("Plan '%s' deleted from namespace '%s'\n", name, namespace)
return nil
}
// waitForArchivedCondition waits for a plan to reach the Archived condition with a timeout
func waitForArchivedCondition(ctx context.Context, c dynamic.Interface, name, namespace string, timeoutSec int) error {
// Set timeout based on provided seconds
timeout := time.Duration(timeoutSec) * time.Second
startTime := time.Now()
for {
// Check if we've exceeded the timeout
if time.Since(startTime) > timeout {
return fmt.Errorf("timeout waiting for plan '%s' to be archived after %v", name, timeout)
}
plan, err := c.Resource(client.PlansGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get plan: %v", err)
}
conditions, exists, err := unstructured.NestedSlice(plan.Object, "status", "conditions")
if err != nil || !exists {
return fmt.Errorf("failed to get plan conditions: %v", err)
}
archived := false
for _, condition := range conditions {
cond, ok := condition.(map[string]interface{})
if !ok {
continue
}
condType, _, _ := unstructured.NestedString(cond, "type")
condStatus, _, _ := unstructured.NestedString(cond, "status")
if condType == "Archived" && condStatus == "True" {
archived = true
break
}
}
if archived {
break
}
// Wait before checking again
time.Sleep(2 * time.Second)
}
return nil
}
// patchPlanDeleteVmOnFailMigration patches a plan to add deleteVmOnFailMigration=true
func patchPlanDeleteVmOnFailMigration(ctx context.Context, c dynamic.Interface, name, namespace string) error {
// Create patch data
patchSpec := map[string]interface{}{
"deleteVmOnFailMigration": true,
}
patchData := map[string]interface{}{
"spec": patchSpec,
}
patchBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, &unstructured.Unstructured{Object: patchData})
if err != nil {
return fmt.Errorf("failed to encode patch data: %v", err)
}
// Apply the patch
_, err = c.Resource(client.PlansGVR).Namespace(namespace).Patch(
ctx,
name,
types.MergePatchType,
patchBytes,
metav1.PatchOptions{},
)
if err != nil {
return fmt.Errorf("failed to patch plan: %v", err)
}
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/delete/provider/delete.go | Go | package provider
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// Delete deletes a provider
func Delete(configFlags *genericclioptions.ConfigFlags, name, namespace string) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
err = c.Resource(client.ProvidersGVR).Namespace(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to delete provider: %v", err)
}
fmt.Printf("Provider '%s' deleted from namespace '%s'\n", name, namespace)
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/describe/hook/describe.go | Go | package hook
import (
"context"
"encoding/base64"
"fmt"
"regexp"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/output"
)
// colorizeYAML adds syntax highlighting to YAML content
func colorizeYAML(yamlContent string) string {
lines := strings.Split(yamlContent, "\n")
var colorizedLines []string
// Compile regex patterns once outside the loop for better performance
listItemRegex := regexp.MustCompile(`^\s*-\s+`)
listItemReplaceRegex := regexp.MustCompile(`^(\s*-\s+)(.*)`)
for _, line := range lines {
// Color comments
if strings.TrimSpace(line) != "" && strings.HasPrefix(strings.TrimSpace(line), "#") {
colorizedLines = append(colorizedLines, output.ColorizedString(line, output.CyanColor))
continue
}
// Color key-value pairs
if strings.Contains(line, ":") {
// Split on first colon to separate key from value
parts := strings.SplitN(line, ":", 2)
if len(parts) == 2 {
key := parts[0]
value := parts[1]
// Color the key in blue and value in yellow
coloredKey := output.Blue(key)
coloredValue := value
if strings.TrimSpace(value) != "" {
coloredValue = output.Yellow(value)
}
colorizedLines = append(colorizedLines, coloredKey+":"+coloredValue)
continue
}
}
// Color list items (lines starting with -)
if listItemRegex.MatchString(line) {
colored := listItemReplaceRegex.ReplaceAllStringFunc(line, func(match string) string {
submatches := listItemReplaceRegex.FindStringSubmatch(match)
if len(submatches) >= 3 {
return output.Green(submatches[1]) + submatches[2]
}
return match
})
colorizedLines = append(colorizedLines, colored)
continue
}
// Default: no coloring
colorizedLines = append(colorizedLines, line)
}
return strings.Join(colorizedLines, "\n")
}
// Describe describes a migration hook
func Describe(configFlags *genericclioptions.ConfigFlags, name, namespace string, useUTC bool) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Get the hook
hook, err := c.Resource(client.HooksGVR).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get hook: %v", err)
}
// Print the hook details
fmt.Printf("\n%s", output.ColorizedSeparator(105, output.YellowColor))
fmt.Printf("\n%s\n", output.Cyan("MIGRATION HOOK"))
// Basic Information
fmt.Printf("%s %s\n", output.Bold("Name:"), output.Yellow(hook.GetName()))
fmt.Printf("%s %s\n", output.Bold("Namespace:"), output.Yellow(hook.GetNamespace()))
fmt.Printf("%s %s\n", output.Bold("Created:"), output.Yellow(output.FormatTimestamp(hook.GetCreationTimestamp().Time, useUTC)))
// Hook Spec Information
if image, found, _ := unstructured.NestedString(hook.Object, "spec", "image"); found {
fmt.Printf("%s %s\n", output.Bold("Image:"), output.Yellow(image))
}
if serviceAccount, found, _ := unstructured.NestedString(hook.Object, "spec", "serviceAccount"); found && serviceAccount != "" {
fmt.Printf("%s %s\n", output.Bold("Service Account:"), output.Yellow(serviceAccount))
} else {
fmt.Printf("%s %s\n", output.Bold("Service Account:"), output.Yellow("(default)"))
}
if deadline, found, _ := unstructured.NestedInt64(hook.Object, "spec", "deadline"); found && deadline > 0 {
fmt.Printf("%s %d seconds\n", output.Bold("Deadline:"), deadline)
} else {
fmt.Printf("%s %s\n", output.Bold("Deadline:"), output.Yellow("(unlimited)"))
}
// Playbook Information
playbook, playbookFound, _ := unstructured.NestedString(hook.Object, "spec", "playbook")
if playbookFound && playbook != "" {
fmt.Printf("%s %s\n", output.Bold("Playbook:"), output.Green("Yes"))
// Decode and display playbook content
if decoded, err := base64.StdEncoding.DecodeString(playbook); err == nil {
fmt.Printf("\n%s\n", output.Cyan("PLAYBOOK CONTENT"))
fmt.Printf("%s\n%s\n%s\n",
output.ColorizedString("```yaml", output.BoldYellow),
colorizeYAML(string(decoded)),
output.ColorizedString("```", output.BoldYellow))
} else {
fmt.Printf("%s %s\n", output.Bold("Playbook Decoding:"), output.Red("Failed - invalid base64"))
}
} else {
fmt.Printf("%s %s\n", output.Bold("Playbook:"), output.Yellow("No"))
}
// Status Information
if status, found, _ := unstructured.NestedMap(hook.Object, "status"); found && status != nil {
fmt.Printf("\n%s\n", output.Cyan("STATUS"))
// Conditions
if conditions, found, _ := unstructured.NestedSlice(hook.Object, "status", "conditions"); found {
fmt.Printf("%s\n", output.Bold("Conditions:"))
for _, condition := range conditions {
if condMap, ok := condition.(map[string]interface{}); ok {
condType, _ := condMap["type"].(string)
condStatus, _ := condMap["status"].(string)
reason, _ := condMap["reason"].(string)
message, _ := condMap["message"].(string)
lastTransitionTime, _ := condMap["lastTransitionTime"].(string)
fmt.Printf(" %s: %s", output.Bold(condType), output.ColorizeStatus(condStatus))
if reason != "" {
fmt.Printf(" (%s)", reason)
}
fmt.Println()
if message != "" {
fmt.Printf(" %s\n", message)
}
if lastTransitionTime != "" {
fmt.Printf(" Last Transition: %s\n", lastTransitionTime)
}
}
}
}
// Other status fields
if observedGeneration, found, _ := unstructured.NestedInt64(hook.Object, "status", "observedGeneration"); found {
fmt.Printf("%s %d\n", output.Bold("Observed Generation:"), observedGeneration)
}
}
// Owner References
if len(hook.GetOwnerReferences()) > 0 {
fmt.Printf("\n%s\n", output.Cyan("OWNERSHIP"))
for _, owner := range hook.GetOwnerReferences() {
fmt.Printf("%s %s/%s", output.Bold("Owner:"), owner.Kind, owner.Name)
if owner.Controller != nil && *owner.Controller {
fmt.Printf(" %s", output.Green("(controller)"))
}
fmt.Println()
}
}
// Annotations
if annotations := hook.GetAnnotations(); len(annotations) > 0 {
fmt.Printf("\n%s\n", output.Cyan("ANNOTATIONS"))
for key, value := range annotations {
fmt.Printf("%s: %s\n", output.Bold(key), value)
}
}
// Labels
if labels := hook.GetLabels(); len(labels) > 0 {
fmt.Printf("\n%s\n", output.Cyan("LABELS"))
for key, value := range labels {
fmt.Printf("%s: %s\n", output.Bold(key), value)
}
}
// Usage Information
fmt.Printf("\n%s\n", output.Cyan("USAGE"))
fmt.Printf("This hook can be referenced in migration plans per VM using:\n")
// Create colored YAML example
yamlExample := fmt.Sprintf(`spec:
vms:
- id: <vm_id>
hooks:
- hook:
namespace: %s
name: %s
step: PreHook # or PostHook`, hook.GetNamespace(), hook.GetName())
fmt.Printf("%s\n%s\n%s\n",
output.ColorizedString("```yaml", output.BoldYellow),
colorizeYAML(yamlExample),
output.ColorizedString("```", output.BoldYellow))
fmt.Printf("\n%s: For a PreHook to run on a VM, the VM must be started and available via SSH.\n", output.Bold("Note"))
fmt.Println() // Add a newline at the end
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/describe/host/describe.go | Go | package host
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/output"
)
// Describe describes a migration host
func Describe(ctx context.Context, configFlags *genericclioptions.ConfigFlags, name, namespace string, useUTC bool, insecureSkipTLS bool) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Get the host
host, err := c.Resource(client.HostsGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get host: %v", err)
}
// Print the host details
fmt.Printf("\n%s", output.ColorizedSeparator(105, output.YellowColor))
fmt.Printf("\n%s\n", output.Bold("MIGRATION HOST"))
// Basic Information
fmt.Printf("%s %s\n", output.Bold("Name:"), output.Yellow(host.GetName()))
fmt.Printf("%s %s\n", output.Bold("Namespace:"), output.Yellow(host.GetNamespace()))
fmt.Printf("%s %s\n", output.Bold("Created:"), output.Yellow(output.FormatTimestamp(host.GetCreationTimestamp().Time, useUTC)))
// Host Spec Information
if hostID, found, _ := unstructured.NestedString(host.Object, "spec", "id"); found {
fmt.Printf("%s %s\n", output.Bold("Host ID:"), output.Yellow(hostID))
}
if hostName, found, _ := unstructured.NestedString(host.Object, "spec", "name"); found {
fmt.Printf("%s %s\n", output.Bold("Host Name:"), output.Yellow(hostName))
}
if ipAddress, found, _ := unstructured.NestedString(host.Object, "spec", "ipAddress"); found {
fmt.Printf("%s %s\n", output.Bold("IP Address:"), output.Yellow(ipAddress))
}
// Provider Information
if providerMap, found, _ := unstructured.NestedMap(host.Object, "spec", "provider"); found {
if providerName, ok := providerMap["name"].(string); ok {
fmt.Printf("%s %s\n", output.Bold("Provider:"), output.Yellow(providerName))
}
}
// Secret Information
if secretMap, found, _ := unstructured.NestedMap(host.Object, "spec", "secret"); found {
if secretName, ok := secretMap["name"].(string); ok {
fmt.Printf("%s %s\n", output.Bold("Secret:"), output.Yellow(secretName))
}
}
// Owner References
if len(host.GetOwnerReferences()) > 0 {
fmt.Printf("\n%s\n", output.Bold("OWNERSHIP"))
for _, owner := range host.GetOwnerReferences() {
fmt.Printf("%s %s/%s", output.Bold("Owner:"), owner.Kind, owner.Name)
if owner.Controller != nil && *owner.Controller {
fmt.Printf(" %s", output.Green("(controller)"))
}
fmt.Println()
}
}
// Network Adapters Information from Provider Inventory
if err := displayNetworkAdapters(ctx, configFlags, host, namespace, insecureSkipTLS); err != nil {
// Log the error but don't fail the command - network adapter info is supplementary
fmt.Printf("\n%s: %v\n", output.Bold("Network Adapters Info"), output.Red("Failed to fetch"))
}
// Status Information
if status, found, _ := unstructured.NestedMap(host.Object, "status"); found && status != nil {
fmt.Printf("\n%s\n", output.Bold("STATUS"))
// Conditions
if conditions, found, _ := unstructured.NestedSlice(host.Object, "status", "conditions"); found {
fmt.Printf("%s\n", output.Bold("Conditions:"))
for _, condition := range conditions {
if condMap, ok := condition.(map[string]interface{}); ok {
condType, _ := condMap["type"].(string)
condStatus, _ := condMap["status"].(string)
reason, _ := condMap["reason"].(string)
message, _ := condMap["message"].(string)
lastTransitionTime, _ := condMap["lastTransitionTime"].(string)
fmt.Printf(" %s: %s", output.Bold(condType), output.ColorizeStatus(condStatus))
if reason != "" {
fmt.Printf(" (%s)", reason)
}
fmt.Println()
if message != "" {
fmt.Printf(" %s\n", message)
}
if lastTransitionTime != "" {
fmt.Printf(" Last Transition: %s\n", lastTransitionTime)
}
}
}
}
// Other status fields
if observedGeneration, found, _ := unstructured.NestedInt64(host.Object, "status", "observedGeneration"); found {
fmt.Printf("%s %d\n", output.Bold("Observed Generation:"), observedGeneration)
}
}
// Annotations
if annotations := host.GetAnnotations(); len(annotations) > 0 {
fmt.Printf("\n%s\n", output.Bold("ANNOTATIONS"))
for key, value := range annotations {
fmt.Printf("%s: %s\n", output.Bold(key), value)
}
}
// Labels
if labels := host.GetLabels(); len(labels) > 0 {
fmt.Printf("\n%s\n", output.Bold("LABELS"))
for key, value := range labels {
fmt.Printf("%s: %s\n", output.Bold(key), value)
}
}
fmt.Println() // Add a newline at the end
return nil
}
// displayNetworkAdapters fetches and displays network adapter information from provider inventory
func displayNetworkAdapters(ctx context.Context, configFlags *genericclioptions.ConfigFlags, host *unstructured.Unstructured, namespace string, insecureSkipTLS bool) error {
// Extract host ID and provider name from host resource
hostID, found, _ := unstructured.NestedString(host.Object, "spec", "id")
if !found || hostID == "" {
return fmt.Errorf("host ID not found in host spec")
}
providerMap, found, _ := unstructured.NestedMap(host.Object, "spec", "provider")
if !found {
return fmt.Errorf("provider information not found in host spec")
}
providerName, ok := providerMap["name"].(string)
if !ok || providerName == "" {
return fmt.Errorf("provider name not found in host spec")
}
// Get the provider object
provider, err := inventory.GetProviderByName(ctx, configFlags, providerName, namespace)
if err != nil {
return fmt.Errorf("failed to get provider: %v", err)
}
// Create provider client with inventory URL discovery
inventoryURL := client.DiscoverInventoryURL(ctx, configFlags, namespace)
providerClient := inventory.NewProviderClientWithInsecure(configFlags, provider, inventoryURL, insecureSkipTLS)
// Get provider type to verify host support
providerType, err := providerClient.GetProviderType()
if err != nil {
return fmt.Errorf("failed to get provider type: %v", err)
}
// Only fetch network adapters for supported provider types
if providerType != "ovirt" && providerType != "vsphere" {
return fmt.Errorf("provider type '%s' does not support host inventory", providerType)
}
// Fetch specific host data from provider inventory
hostData, err := providerClient.GetHost(ctx, hostID, 4) // detail level 4 for full info
if err != nil {
return fmt.Errorf("failed to fetch host inventory data: %v", err)
}
// Extract network adapters from host data
hostMap, ok := hostData.(map[string]interface{})
if !ok {
return fmt.Errorf("unexpected host data format")
}
networkAdapters, found, _ := unstructured.NestedSlice(hostMap, "networkAdapters")
if !found || len(networkAdapters) == 0 {
return fmt.Errorf("no network adapters found")
}
// Display network adapters information
fmt.Printf("\n%s\n", output.Bold("NETWORK ADAPTERS"))
for i, adapter := range networkAdapters {
if adapterMap, ok := adapter.(map[string]interface{}); ok {
fmt.Printf("%s %d:\n", output.Bold("Adapter"), i+1)
if name, ok := adapterMap["name"].(string); ok {
fmt.Printf(" %s %s\n", output.Bold("Name:"), output.Yellow(name))
}
if ipAddress, ok := adapterMap["ipAddress"].(string); ok {
fmt.Printf(" %s %s\n", output.Bold("IP Address:"), output.Yellow(ipAddress))
}
if subnetMask, ok := adapterMap["subnetMask"].(string); ok {
fmt.Printf(" %s %s\n", output.Bold("Subnet Mask:"), output.Yellow(subnetMask))
}
if mtu, ok := adapterMap["mtu"].(float64); ok {
fmt.Printf(" %s %.0f\n", output.Bold("MTU:"), mtu)
}
if linkSpeed, ok := adapterMap["linkSpeed"].(float64); ok {
fmt.Printf(" %s %.0f Mbps\n", output.Bold("Link Speed:"), linkSpeed)
}
// Add spacing between adapters if there are multiple
if i < len(networkAdapters)-1 {
fmt.Println()
}
}
}
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/describe/mapping/describe.go | Go | package mapping
import (
"context"
"fmt"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/output"
)
// Describe describes a network or storage mapping
func Describe(configFlags *genericclioptions.ConfigFlags, mappingType, name, namespace string, useUTC bool) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Select appropriate GVR based on mapping type
var gvr = client.NetworkMapGVR
var resourceType = "NETWORK MAPPING"
if mappingType == "storage" {
gvr = client.StorageMapGVR
resourceType = "STORAGE MAPPING"
}
// Get the mapping
mapping, err := c.Resource(gvr).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get %s mapping: %v", mappingType, err)
}
// Print the mapping details
fmt.Printf("\n%s", output.ColorizedSeparator(105, output.YellowColor))
fmt.Printf("\n%s\n", output.Cyan(resourceType))
// Basic Information
fmt.Printf("%s %s\n", output.Bold("Name:"), output.Yellow(mapping.GetName()))
fmt.Printf("%s %s\n", output.Bold("Namespace:"), output.Yellow(mapping.GetNamespace()))
fmt.Printf("%s %s\n", output.Bold("Created:"), output.Yellow(output.FormatTimestamp(mapping.GetCreationTimestamp().Time, useUTC)))
// Provider Information
if sourceProvider, found, _ := unstructured.NestedMap(mapping.Object, "spec", "provider", "source"); found {
if sourceName, ok := sourceProvider["name"].(string); ok {
fmt.Printf("%s %s\n", output.Bold("Source Provider:"), output.Yellow(sourceName))
}
}
if destProvider, found, _ := unstructured.NestedMap(mapping.Object, "spec", "provider", "destination"); found {
if destName, ok := destProvider["name"].(string); ok {
fmt.Printf("%s %s\n", output.Bold("Destination Provider:"), output.Yellow(destName))
}
}
// Owner References
if len(mapping.GetOwnerReferences()) > 0 {
fmt.Printf("\n%s\n", output.Cyan("OWNERSHIP"))
for _, owner := range mapping.GetOwnerReferences() {
fmt.Printf("%s %s/%s", output.Bold("Owner:"), owner.Kind, owner.Name)
if owner.Controller != nil && *owner.Controller {
fmt.Printf(" %s", output.Green("(controller)"))
}
fmt.Println()
}
}
// Mapping Specification
if err := displayMappingSpec(mapping, mappingType); err != nil {
fmt.Printf("\n%s: %v\n", output.Bold("Mapping Details"), output.Red("Failed to display"))
}
// Status Information
if status, found, _ := unstructured.NestedMap(mapping.Object, "status"); found && status != nil {
fmt.Printf("\n%s\n", output.Cyan("STATUS"))
// Conditions
if conditions, found, _ := unstructured.NestedSlice(mapping.Object, "status", "conditions"); found {
output.PrintConditions(conditions)
}
// Other status fields
if observedGeneration, found, _ := unstructured.NestedInt64(mapping.Object, "status", "observedGeneration"); found {
fmt.Printf("%s %d\n", output.Bold("Observed Generation:"), observedGeneration)
}
}
// Annotations
if annotations := mapping.GetAnnotations(); len(annotations) > 0 {
fmt.Printf("\n%s\n", output.Cyan("ANNOTATIONS"))
for key, value := range annotations {
fmt.Printf("%s: %s\n", output.Bold(key), value)
}
}
// Labels
if labels := mapping.GetLabels(); len(labels) > 0 {
fmt.Printf("\n%s\n", output.Cyan("LABELS"))
for key, value := range labels {
fmt.Printf("%s: %s\n", output.Bold(key), value)
}
}
fmt.Println() // Add a newline at the end
return nil
}
// displayMappingSpec displays the mapping specification details with custom formatting
func displayMappingSpec(mapping *unstructured.Unstructured, mappingType string) error {
// Get the map entries
mapEntries, found, _ := unstructured.NestedSlice(mapping.Object, "spec", "map")
if !found || len(mapEntries) == 0 {
return fmt.Errorf("no mapping entries found")
}
fmt.Printf("\n%s\n", output.Cyan("MAPPING ENTRIES"))
return output.PrintMappingTable(mapEntries, formatMappingEntry)
}
// formatMappingEntry formats a single mapping entry (source or destination) as a string
func formatMappingEntry(entryMap map[string]interface{}, entryType string) string {
entry, found, _ := unstructured.NestedMap(entryMap, entryType)
if !found {
return ""
}
var parts []string
// Common fields that might be present
if id, ok := entry["id"].(string); ok && id != "" {
parts = append(parts, fmt.Sprintf("ID: %s", id))
}
if name, ok := entry["name"].(string); ok && name != "" {
parts = append(parts, fmt.Sprintf("Name: %s", name))
}
if path, ok := entry["path"].(string); ok && path != "" {
parts = append(parts, fmt.Sprintf("Path: %s", path))
}
// For storage mappings
if storageClass, ok := entry["storageClass"].(string); ok && storageClass != "" {
parts = append(parts, fmt.Sprintf("Storage Class: %s", storageClass))
}
if accessMode, ok := entry["accessMode"].(string); ok && accessMode != "" {
parts = append(parts, fmt.Sprintf("Access Mode: %s", accessMode))
}
// For network mappings
if vlan, ok := entry["vlan"].(string); ok && vlan != "" {
parts = append(parts, fmt.Sprintf("VLAN: %s", vlan))
}
if multus, found, _ := unstructured.NestedMap(entry, "multus"); found {
if networkName, ok := multus["networkName"].(string); ok && networkName != "" {
parts = append(parts, fmt.Sprintf("Multus Network: %s", networkName))
}
}
// Any other string fields that might be interesting
for key, value := range entry {
if strValue, ok := value.(string); ok && strValue != "" {
// Skip fields we've already handled
if key != "id" && key != "name" && key != "path" && key != "storageClass" &&
key != "accessMode" && key != "vlan" && key != "multus" {
// Capitalize first letter for display
displayKey := strings.ToUpper(key[:1]) + key[1:]
parts = append(parts, fmt.Sprintf("%s: %s", displayKey, strValue))
}
}
}
// Join all parts with newlines for multi-line cell display
return strings.Join(parts, "\n")
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/describe/plan/describe.go | Go | package plan
import (
"context"
"fmt"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/dynamic"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/plan/status"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/output"
)
// Describe describes a migration plan
func Describe(configFlags *genericclioptions.ConfigFlags, name, namespace string, withVMs bool, useUTC bool) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Get the plan
plan, err := c.Resource(client.PlansGVR).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get plan: %v", err)
}
// Print the plan details
fmt.Printf("\n%s", output.ColorizedSeparator(105, output.YellowColor))
fmt.Printf("\n%s\n", output.Cyan("MIGRATION PLAN"))
// Basic Information
fmt.Printf("%s %s\n", output.Bold("Name:"), output.Yellow(plan.GetName()))
fmt.Printf("%s %s\n", output.Bold("Namespace:"), output.Yellow(plan.GetNamespace()))
fmt.Printf("%s %s\n", output.Bold("Created:"), output.Yellow(output.FormatTimestamp(plan.GetCreationTimestamp().Time, useUTC)))
// Get archived status
archived, exists, _ := unstructured.NestedBool(plan.Object, "spec", "archived")
if exists {
fmt.Printf("%s %s\n", output.Bold("Archived:"), output.Yellow(fmt.Sprintf("%t", archived)))
} else {
fmt.Printf("%s %s\n", output.Bold("Archived:"), output.Yellow("false"))
}
// Plan Details
planDetails, _ := status.GetPlanDetails(c, namespace, plan, client.MigrationsGVR)
fmt.Printf("%s %s\n", output.Bold("Ready:"), output.ColorizeBoolean(planDetails.IsReady))
fmt.Printf("%s %s\n", output.Bold("Status:"), output.ColorizeStatus(planDetails.Status))
// Display enhanced spec section
displayPlanSpec(plan)
// Display enhanced mappings section
networkMapping, _, _ := unstructured.NestedString(plan.Object, "spec", "map", "network", "name")
storageMapping, _, _ := unstructured.NestedString(plan.Object, "spec", "map", "storage", "name")
migrationType, _, _ := unstructured.NestedString(plan.Object, "spec", "type")
displayPlanMappings(networkMapping, storageMapping, migrationType)
// Running Migration
if planDetails.RunningMigration != nil {
fmt.Printf("\n%s\n", output.Cyan("RUNNING MIGRATION"))
fmt.Printf("%s %s\n", output.Bold("Name:"), output.Yellow(planDetails.RunningMigration.GetName()))
fmt.Printf("%s Total: %s, Completed: %s\n",
output.Bold("Migration Progress:"),
output.Blue(fmt.Sprintf("%3d", planDetails.VMStats.Total)),
output.Blue(fmt.Sprintf("%3d", planDetails.VMStats.Completed)))
fmt.Printf("%s Succeeded: %s, Failed: %s, Canceled: %s\n",
output.Bold("VM Status: "),
output.Green(fmt.Sprintf("%3d", planDetails.VMStats.Succeeded)),
output.Red(fmt.Sprintf("%3d", planDetails.VMStats.Failed)),
output.Yellow(fmt.Sprintf("%3d", planDetails.VMStats.Canceled)))
printDiskProgress(planDetails.DiskProgress)
}
// Latest Migration
if planDetails.LatestMigration != nil {
fmt.Printf("\n%s\n", output.Cyan("LATEST MIGRATION"))
fmt.Printf("%s %s\n", output.Bold("Name:"), output.Yellow(planDetails.LatestMigration.GetName()))
fmt.Printf("%s Total: %s, Completed: %s\n",
output.Bold("Migration Progress:"),
output.Blue(fmt.Sprintf("%3d", planDetails.VMStats.Total)),
output.Blue(fmt.Sprintf("%3d", planDetails.VMStats.Completed)))
fmt.Printf("%s Succeeded: %s, Failed: %s, Canceled: %s\n",
output.Bold("VM Status: "),
output.Green(fmt.Sprintf("%3d", planDetails.VMStats.Succeeded)),
output.Red(fmt.Sprintf("%3d", planDetails.VMStats.Failed)),
output.Yellow(fmt.Sprintf("%3d", planDetails.VMStats.Canceled)))
printDiskProgress(planDetails.DiskProgress)
}
// Display network mapping
if networkMapping != "" {
if err := displayNetworkMapping(c, namespace, networkMapping); err != nil {
fmt.Printf("Failed to display network mapping: %v\n", err)
}
}
// Display storage mapping
if storageMapping != "" {
if err := displayStorageMapping(c, namespace, storageMapping); err != nil {
fmt.Printf("Failed to display storage mapping: %v\n", err)
}
}
// Display conditions
conditions, exists, _ := unstructured.NestedSlice(plan.Object, "status", "conditions")
if exists {
displayConditions(conditions)
}
// Display VMs if --with-vms flag is set
if withVMs {
if err := displayPlanVMs(plan); err != nil {
fmt.Printf("Failed to display VMs: %v\n", err)
}
}
return nil
}
// printDiskProgress prints disk transfer progress information
func printDiskProgress(progress status.ProgressStats) {
if progress.Total > 0 {
percentage := float64(progress.Completed) / float64(progress.Total) * 100
progressText := fmt.Sprintf("%.1f%% (%d/%d GB)",
percentage,
progress.Completed/(1024),
progress.Total/(1024))
if percentage >= 100 {
fmt.Printf("%s %s\n", output.Bold("Disk Transfer:"), output.Green(progressText))
} else {
fmt.Printf("%s %s\n", output.Bold("Disk Transfer:"), output.Yellow(progressText))
}
}
}
// displayNetworkMapping prints network mapping details
func displayNetworkMapping(c dynamic.Interface, namespace, networkMapping string) error {
networkMap, err := c.Resource(client.NetworkMapGVR).Namespace(namespace).Get(context.TODO(), networkMapping, metav1.GetOptions{})
if err != nil {
return err
}
networkPairs, exists, _ := unstructured.NestedSlice(networkMap.Object, "spec", "map")
if exists && len(networkPairs) > 0 {
fmt.Printf("\n%s\n", output.Cyan("NETWORK MAPPING DETAILS"))
return output.PrintMappingTable(networkPairs, formatPlanMappingEntry)
}
return nil
}
// displayStorageMapping prints storage mapping details
func displayStorageMapping(c dynamic.Interface, namespace, storageMapping string) error {
storageMap, err := c.Resource(client.StorageMapGVR).Namespace(namespace).Get(context.TODO(), storageMapping, metav1.GetOptions{})
if err != nil {
return err
}
storagePairs, exists, _ := unstructured.NestedSlice(storageMap.Object, "spec", "map")
if exists && len(storagePairs) > 0 {
fmt.Printf("\n%s\n", output.Cyan("STORAGE MAPPING DETAILS"))
return output.PrintMappingTable(storagePairs, formatPlanMappingEntry)
}
return nil
}
// displayConditions prints conditions information using shared formatting
func displayConditions(conditions []interface{}) {
if len(conditions) > 0 {
fmt.Printf("\n%s\n", output.Cyan("STATUS"))
output.PrintConditions(conditions)
}
}
// displayPlanSpec displays the plan specification in a beautified format
func displayPlanSpec(plan *unstructured.Unstructured) {
source, _, _ := unstructured.NestedString(plan.Object, "spec", "provider", "source", "name")
target, _, _ := unstructured.NestedString(plan.Object, "spec", "provider", "destination", "name")
targetNamespace, _, _ := unstructured.NestedString(plan.Object, "spec", "targetNamespace")
transferNetwork, _, _ := unstructured.NestedString(plan.Object, "spec", "transferNetwork", "name")
description, _, _ := unstructured.NestedString(plan.Object, "spec", "description")
preserveCPUModel, _, _ := unstructured.NestedBool(plan.Object, "spec", "preserveClusterCPUModel")
preserveStaticIPs, _, _ := unstructured.NestedBool(plan.Object, "spec", "preserveStaticIPs")
// Determine migration type
migrationType := "cold" // Default
if migrationTypeValue, exists, _ := unstructured.NestedString(plan.Object, "spec", "type"); exists && migrationTypeValue != "" {
migrationType = migrationTypeValue
} else {
// Fall back to legacy 'warm' boolean field
if warm, exists, _ := unstructured.NestedBool(plan.Object, "spec", "warm"); exists && warm {
migrationType = "warm"
}
}
fmt.Printf("\n%s\n", output.Cyan("SPECIFICATION"))
// Provider section
fmt.Printf("%s\n", output.Bold("Providers:"))
fmt.Printf(" %s %s\n", output.Bold("Source:"), output.Yellow(source))
fmt.Printf(" %s %s\n", output.Bold("Target:"), output.Yellow(target))
// Migration settings
fmt.Printf("\n%s\n", output.Bold("Migration Settings:"))
fmt.Printf(" %s %s\n", output.Bold("Target Namespace:"), output.Yellow(targetNamespace))
fmt.Printf(" %s %s\n", output.Bold("Migration Type:"), output.Yellow(migrationType))
if transferNetwork != "" {
fmt.Printf(" %s %s\n", output.Bold("Transfer Network:"), output.Yellow(transferNetwork))
}
// Advanced settings
fmt.Printf("\n%s\n", output.Bold("Advanced Settings:"))
fmt.Printf(" %s %s\n", output.Bold("Preserve CPU Model:"), output.ColorizeBoolean(preserveCPUModel))
fmt.Printf(" %s %s\n", output.Bold("Preserve Static IPs:"), output.ColorizeBoolean(preserveStaticIPs))
// Description
if description != "" {
fmt.Printf("\n%s\n", output.Bold("Description:"))
fmt.Printf(" %s\n", description)
}
}
// displayPlanMappings displays the mapping references in a beautified format
func displayPlanMappings(networkMapping, storageMapping, migrationType string) {
fmt.Printf("\n%s\n", output.Cyan("MAPPINGS"))
if networkMapping != "" {
fmt.Printf("%s %s\n", output.Bold("Network Mapping:"), output.Yellow(networkMapping))
} else {
fmt.Printf("%s %s\n", output.Bold("Network Mapping:"), output.Red("Not specified"))
}
if storageMapping != "" {
fmt.Printf("%s %s\n", output.Bold("Storage Mapping:"), output.Yellow(storageMapping))
} else {
// Special message for conversion-only migrations
if migrationType == "conversion" {
fmt.Printf("%s %s\n", output.Bold("Storage Mapping:"), output.Green("Not required (conversion-only)"))
} else {
fmt.Printf("%s %s\n", output.Bold("Storage Mapping:"), output.Red("Not specified"))
}
}
}
// formatPlanMappingEntry formats a single mapping entry (source or destination) as a string
func formatPlanMappingEntry(entryMap map[string]interface{}, entryType string) string {
entry, found, _ := unstructured.NestedMap(entryMap, entryType)
if !found {
return ""
}
var parts []string
// Common fields that might be present
if id, ok := entry["id"].(string); ok && id != "" {
parts = append(parts, fmt.Sprintf("ID: %s", id))
}
if name, ok := entry["name"].(string); ok && name != "" {
parts = append(parts, fmt.Sprintf("Name: %s", name))
}
if path, ok := entry["path"].(string); ok && path != "" {
parts = append(parts, fmt.Sprintf("Path: %s", path))
}
// For storage mappings
if storageClass, ok := entry["storageClass"].(string); ok && storageClass != "" {
parts = append(parts, fmt.Sprintf("Storage Class: %s", storageClass))
}
if accessMode, ok := entry["accessMode"].(string); ok && accessMode != "" {
parts = append(parts, fmt.Sprintf("Access Mode: %s", accessMode))
}
// For network mappings
if vlan, ok := entry["vlan"].(string); ok && vlan != "" {
parts = append(parts, fmt.Sprintf("VLAN: %s", vlan))
}
if destType, ok := entry["type"].(string); ok && destType != "" {
parts = append(parts, fmt.Sprintf("Type: %s", destType))
}
if namespace, ok := entry["namespace"].(string); ok && namespace != "" {
parts = append(parts, fmt.Sprintf("Namespace: %s", namespace))
}
if multus, found, _ := unstructured.NestedMap(entry, "multus"); found {
if networkName, ok := multus["networkName"].(string); ok && networkName != "" {
parts = append(parts, fmt.Sprintf("Multus Network: %s", networkName))
}
}
// Join all parts with newlines for multi-line cell display
return strings.Join(parts, "\n")
}
// displayPlanVMs displays the VMs from the plan specification with detailed information
func displayPlanVMs(plan *unstructured.Unstructured) error {
specVMs, exists, err := unstructured.NestedSlice(plan.Object, "spec", "vms")
if err != nil {
return fmt.Errorf("failed to get VMs from plan spec: %v", err)
}
if !exists || len(specVMs) == 0 {
fmt.Printf("\n%s\n", output.Cyan("VIRTUAL MACHINES"))
fmt.Printf("%s\n", output.Red("No VMs specified in the plan"))
return nil
}
fmt.Printf("\n%s\n", output.Cyan("VIRTUAL MACHINES"))
fmt.Printf("%s %s\n", output.Bold("VM Count:"), output.Blue(fmt.Sprintf("%d", len(specVMs))))
// Display each VM with detailed information
for i, v := range specVMs {
vm, ok := v.(map[string]interface{})
if !ok {
continue
}
// Extract VM fields
vmName, _, _ := unstructured.NestedString(vm, "name")
vmID, _, _ := unstructured.NestedString(vm, "id")
targetName, _, _ := unstructured.NestedString(vm, "targetName")
instanceType, _, _ := unstructured.NestedString(vm, "instanceType")
rootDisk, _, _ := unstructured.NestedString(vm, "rootDisk")
targetPowerState, _, _ := unstructured.NestedString(vm, "targetPowerState")
pvcNameTemplate, _, _ := unstructured.NestedString(vm, "pvcNameTemplate")
volumeNameTemplate, _, _ := unstructured.NestedString(vm, "volumeNameTemplate")
networkNameTemplate, _, _ := unstructured.NestedString(vm, "networkNameTemplate")
// Get hooks array
hooks, _, _ := unstructured.NestedSlice(vm, "hooks")
// Get LUKS object reference
luks, _, _ := unstructured.NestedMap(vm, "luks")
// Print VM header with separator
fmt.Printf("\n%s", output.ColorizedSeparator(80, output.BlueColor))
fmt.Printf("\n%s #%d\n", output.Bold(output.Cyan("VM")), i+1)
// Basic Information
fmt.Printf("%s\n", output.Bold("Basic Information:"))
fmt.Printf(" %s %s\n", output.Bold("Name:"), output.Yellow(getStringOrDefault(vmName, "-")))
fmt.Printf(" %s %s\n", output.Bold("ID:"), output.Cyan(getStringOrDefault(vmID, "-")))
if targetName != "" {
fmt.Printf(" %s %s\n", output.Bold("Target Name:"), output.Green(targetName))
}
// Configuration
hasConfig := instanceType != "" || rootDisk != "" || targetPowerState != ""
if hasConfig {
fmt.Printf("\n%s\n", output.Bold("Configuration:"))
if instanceType != "" {
fmt.Printf(" %s %s\n", output.Bold("Instance Type:"), output.Yellow(instanceType))
}
if rootDisk != "" {
fmt.Printf(" %s %s\n", output.Bold("Root Disk:"), output.Blue(rootDisk))
}
if targetPowerState != "" {
powerStateColor := output.Green(targetPowerState)
switch targetPowerState {
case "off":
powerStateColor = output.Red(targetPowerState)
case "auto":
powerStateColor = output.Yellow(targetPowerState)
}
fmt.Printf(" %s %s\n", output.Bold("Target Power State:"), powerStateColor)
}
}
// Name Templates
hasTemplates := pvcNameTemplate != "" || volumeNameTemplate != "" || networkNameTemplate != ""
if hasTemplates {
fmt.Printf("\n%s\n", output.Bold("Name Templates:"))
if pvcNameTemplate != "" {
fmt.Printf(" %s %s\n", output.Bold("PVC Template:"), output.Cyan(pvcNameTemplate))
}
if volumeNameTemplate != "" {
fmt.Printf(" %s %s\n", output.Bold("Volume Template:"), output.Cyan(volumeNameTemplate))
}
if networkNameTemplate != "" {
fmt.Printf(" %s %s\n", output.Bold("Network Template:"), output.Cyan(networkNameTemplate))
}
}
// Hooks
if len(hooks) > 0 {
fmt.Printf("\n%s\n", output.Bold("Hooks:"))
for j, h := range hooks {
hook, ok := h.(map[string]interface{})
if !ok {
continue
}
hookName, _, _ := unstructured.NestedString(hook, "name")
hookKind, _, _ := unstructured.NestedString(hook, "kind")
hookNamespace, _, _ := unstructured.NestedString(hook, "namespace")
fmt.Printf(" %s %d: %s", output.Bold("Hook"), j+1, output.Green(getStringOrDefault(hookName, "-")))
if hookKind != "" || hookNamespace != "" {
fmt.Printf(" (%s/%s)", getStringOrDefault(hookNamespace, "default"), getStringOrDefault(hookKind, "Hook"))
}
fmt.Println()
}
} else {
fmt.Printf("\n%s %s\n", output.Bold("Hooks:"), "None")
}
// LUKS Configuration
if len(luks) > 0 {
fmt.Printf("\n%s\n", output.Bold("Disk Encryption (LUKS):"))
luksName, _, _ := unstructured.NestedString(luks, "name")
luksNamespace, _, _ := unstructured.NestedString(luks, "namespace")
luksKind, _, _ := unstructured.NestedString(luks, "kind")
if luksName != "" {
fmt.Printf(" %s %s\n", output.Bold("Secret:"), output.Yellow(luksName))
if luksNamespace != "" {
fmt.Printf(" %s %s\n", output.Bold("Namespace:"), output.Blue(luksNamespace))
}
if luksKind != "" {
fmt.Printf(" %s %s\n", output.Bold("Kind:"), output.Cyan(luksKind))
}
}
} else {
fmt.Printf("\n%s %s\n", output.Bold("Disk Encryption:"), "None")
}
}
return nil
}
// getStringOrDefault returns the string value or a default if empty
func getStringOrDefault(value, defaultValue string) string {
if value == "" {
return defaultValue
}
return value
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/describe/vm/describe_vm.go | Go | package plan
import (
"context"
"fmt"
"time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/cli-runtime/pkg/genericclioptions"
planutil "github.com/yaacov/kubectl-mtv/pkg/cmd/get/plan"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/plan/status"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/output"
"github.com/yaacov/kubectl-mtv/pkg/util/watch"
)
// DescribeVM describes a specific VM in a migration plan
func DescribeVM(configFlags *genericclioptions.ConfigFlags, name, namespace, vmName string, watchMode bool, useUTC bool) error {
if watchMode {
return watch.Watch(func() error {
return describeVMOnce(configFlags, name, namespace, vmName, useUTC)
}, 20*time.Second)
}
return describeVMOnce(configFlags, name, namespace, vmName, useUTC)
}
// Helper function to truncate strings to a maximum length
func truncateString(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
// Reserve 3 characters for the ellipsis
return s[:maxLen-3] + "..."
}
func describeVMOnce(configFlags *genericclioptions.ConfigFlags, name, namespace, vmName string, useUTC bool) error {
c, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Get the plan
plan, err := c.Resource(client.PlansGVR).Namespace(namespace).Get(context.TODO(), name, v1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get plan: %v", err)
}
// First check if VM exists in plan spec
specVMs, exists, err := unstructured.NestedSlice(plan.Object, "spec", "vms")
if err != nil || !exists {
fmt.Printf("No VMs found in plan '%s' specification\n", output.Yellow(name))
return nil
}
// Find VM ID from spec
var vmID string
for _, v := range specVMs {
vm, ok := v.(map[string]interface{})
if !ok {
continue
}
currentVMName, _, _ := unstructured.NestedString(vm, "name")
if currentVMName == vmName {
vmID, _, _ = unstructured.NestedString(vm, "id")
break
}
}
if vmID == "" {
fmt.Printf("VM '%s' is not part of plan '%s'\n", output.Yellow(vmName), output.Yellow(name))
return nil
}
// Get plan details
planDetails, _ := status.GetPlanDetails(c, namespace, plan, client.MigrationsGVR)
// Get migration object to display VM details
migration := planDetails.RunningMigration
if migration == nil {
migration = planDetails.LatestMigration
}
if migration == nil {
fmt.Printf("No migration found for plan '%s'. VM details will be available after the plan starts running.\n", output.Yellow(name))
return nil
}
// Get VMs list from migration status
vms, exists, err := unstructured.NestedSlice(migration.Object, "status", "vms")
if err != nil {
return fmt.Errorf("failed to get VM list: %v", err)
}
if !exists {
fmt.Printf("No VM status information found in migration. Please wait for the migration to start.\n")
return nil
}
// Find the specified VM using vmID
var targetVM map[string]interface{}
for _, v := range vms {
vm, ok := v.(map[string]interface{})
if !ok {
continue
}
currentVMID, _, _ := unstructured.NestedString(vm, "id")
if currentVMID == vmID {
targetVM = vm
break
}
}
if targetVM == nil {
fmt.Printf("VM '%s' (vmID=%s) status not yet available in migration\n", output.Yellow(vmName), output.Cyan(vmID))
return nil
}
// Print VM details
fmt.Print("\n", output.ColorizedSeparator(105, output.YellowColor))
fmt.Printf("\n%s", output.Bold("MIGRATION PLAN"))
fmt.Printf("\n%s %s\n", output.Bold("VM Details for:"), output.Yellow(vmName))
fmt.Printf("%s %s\n", output.Bold("Migration Plan:"), output.Yellow(name))
fmt.Printf("%s %s\n", output.Bold("Migration:"), output.Yellow(migration.GetName()))
fmt.Print("\n", output.ColorizedSeparator(105, output.YellowColor), "\n")
// Print basic VM information
vmID, _, _ = unstructured.NestedString(targetVM, "id")
vmPhase, _, _ := unstructured.NestedString(targetVM, "phase")
vmOS, _, _ := unstructured.NestedString(targetVM, "operatingSystem")
started, _, _ := unstructured.NestedString(targetVM, "started")
completed, _, _ := unstructured.NestedString(targetVM, "completed")
newName, _, _ := unstructured.NestedString(targetVM, "newName")
fmt.Printf("%s %s\n", output.Bold("ID:"), output.Cyan(vmID))
fmt.Printf("%s %s\n", output.Bold("Phase:"), output.ColorizeStatus(vmPhase))
fmt.Printf("%s %s\n", output.Bold("OS:"), output.Blue(vmOS))
if newName != "" {
fmt.Printf("%s %s\n", output.Bold("New Name:"), output.Yellow(newName))
}
if started != "" {
fmt.Printf("%s %s\n", output.Bold("Started:"), planutil.FormatTime(started, useUTC))
}
if completed != "" {
fmt.Printf("%s %s\n", output.Bold("Completed:"), planutil.FormatTime(completed, useUTC))
}
// Print conditions
conditions, exists, _ := unstructured.NestedSlice(targetVM, "conditions")
if exists && len(conditions) > 0 {
fmt.Print("\n", output.ColorizedSeparator(105, output.YellowColor))
fmt.Printf("\n%s\n", output.Bold("Conditions:"))
headers := []string{"TYPE", "STATUS", "CATEGORY", "MESSAGE"}
colWidths := []int{15, 10, 15, 50}
rows := make([][]string, 0, len(conditions))
for _, c := range conditions {
condition, ok := c.(map[string]interface{})
if !ok {
continue
}
condType, _, _ := unstructured.NestedString(condition, "type")
status, _, _ := unstructured.NestedString(condition, "status")
category, _, _ := unstructured.NestedString(condition, "category")
message, _, _ := unstructured.NestedString(condition, "message")
// Apply color to status
switch status {
case "True":
status = output.Green(status)
case "False":
status = output.Red(status)
}
rows = append(rows, []string{output.Bold(condType), status, category, message})
}
if len(rows) > 0 {
planutil.PrintTable(headers, rows, colWidths)
}
}
// Print pipeline information
pipeline, exists, _ := unstructured.NestedSlice(targetVM, "pipeline")
if exists {
fmt.Print("\n", output.ColorizedSeparator(105, output.YellowColor))
fmt.Printf("\n%s\n", output.Bold("Pipeline:"))
for _, p := range pipeline {
phase, ok := p.(map[string]interface{})
if !ok {
continue
}
phaseName, _, _ := unstructured.NestedString(phase, "name")
phaseDesc, _, _ := unstructured.NestedString(phase, "description")
phaseStatus, _, _ := unstructured.NestedString(phase, "phase")
phaseStarted, _, _ := unstructured.NestedString(phase, "started")
phaseCompleted, _, _ := unstructured.NestedString(phase, "completed")
fmt.Printf("\n%s\n", output.Yellow(fmt.Sprintf("[%s] %s", output.Bold(phaseName), phaseDesc)))
fmt.Printf("%s %s\n", output.Bold("Status:"), output.ColorizeStatus(phaseStatus))
fmt.Printf("%s %s\n", output.Bold("Started:"), planutil.FormatTime(phaseStarted, useUTC))
if phaseCompleted != "" {
fmt.Printf("%s %s\n", output.Bold("Completed:"), planutil.FormatTime(phaseCompleted, useUTC))
}
// Print progress
progressMap, exists, _ := unstructured.NestedMap(phase, "progress")
if exists {
completed, _, _ := unstructured.NestedInt64(progressMap, "completed")
total, _, _ := unstructured.NestedInt64(progressMap, "total")
if total > 0 {
percentage := float64(completed) / float64(total) * 100
progressText := fmt.Sprintf("%.1f%% (%d/%d)", percentage, completed, total)
if percentage >= 100 {
fmt.Printf("%s %s\n", output.Bold("Progress:"), output.Green(progressText))
} else if percentage >= 75 {
fmt.Printf("%s %s\n", output.Bold("Progress:"), output.Blue(progressText))
} else if percentage >= 25 {
fmt.Printf("%s %s\n", output.Bold("Progress:"), output.Yellow(progressText))
} else {
fmt.Printf("%s %s\n", output.Bold("Progress:"), output.Cyan(progressText))
}
}
}
// Print tasks if they exist
tasks, exists, _ := unstructured.NestedSlice(phase, "tasks")
if exists && len(tasks) > 0 {
fmt.Printf("\n%s\n", output.Bold("Tasks:"))
headers := []string{"NAME", "PHASE", "PROGRESS", "STARTED", "COMPLETED"}
colWidths := []int{40, 10, 15, 20, 20}
rows := make([][]string, 0, len(tasks))
for _, t := range tasks {
task, ok := t.(map[string]interface{})
if !ok {
continue
}
taskName, _, _ := unstructured.NestedString(task, "name")
// Truncate task name if longer than column width
taskName = truncateString(taskName, colWidths[0])
taskPhase, _, _ := unstructured.NestedString(task, "phase")
taskStarted, _, _ := unstructured.NestedString(task, "started")
taskCompleted, _, _ := unstructured.NestedString(task, "completed")
progress := "-"
progressMap, exists, _ := unstructured.NestedMap(task, "progress")
if exists {
completed, _, _ := unstructured.NestedInt64(progressMap, "completed")
total, _, _ := unstructured.NestedInt64(progressMap, "total")
if total > 0 {
percentage := float64(completed) / float64(total) * 100
progressText := fmt.Sprintf("%.1f%%", percentage)
if percentage >= 100 {
progress = output.Green(progressText)
} else if percentage >= 75 {
progress = output.Blue(progressText)
} else if percentage >= 25 {
progress = output.Yellow(progressText)
} else {
progress = output.Cyan(progressText)
}
}
}
rows = append(rows, []string{
taskName,
output.ColorizeStatus(taskPhase),
progress,
planutil.FormatTime(taskStarted, useUTC),
planutil.FormatTime(taskCompleted, useUTC),
})
}
planutil.PrintTable(headers, rows, colWidths)
}
}
}
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/get/hook/list.go | Go | package hook
import (
"context"
"encoding/base64"
"fmt"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/dynamic"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/output"
"github.com/yaacov/kubectl-mtv/pkg/util/watch"
)
// extractHookImage gets the image from the hook spec
func extractHookImage(hook unstructured.Unstructured) string {
image, found, _ := unstructured.NestedString(hook.Object, "spec", "image")
if !found {
return ""
}
return image
}
// extractHookServiceAccount gets the service account from the hook spec
func extractHookServiceAccount(hook unstructured.Unstructured) string {
serviceAccount, found, _ := unstructured.NestedString(hook.Object, "spec", "serviceAccount")
if !found {
return ""
}
return serviceAccount
}
// extractHookDeadline gets the deadline from the hook spec
func extractHookDeadline(hook unstructured.Unstructured) string {
deadline, found, _ := unstructured.NestedInt64(hook.Object, "spec", "deadline")
if !found || deadline == 0 {
return ""
}
return fmt.Sprintf("%ds", deadline)
}
// extractHookPlaybookStatus gets the playbook status (whether it has content)
func extractHookPlaybookStatus(hook unstructured.Unstructured) string {
playbook, found, _ := unstructured.NestedString(hook.Object, "spec", "playbook")
if !found || playbook == "" {
return "No"
}
return "Yes"
}
// extractHookStatus gets the status from the hook
func extractHookStatus(hook unstructured.Unstructured) string {
// Check for conditions array
conditions, found, _ := unstructured.NestedSlice(hook.Object, "status", "conditions")
if found && len(conditions) > 0 {
// Look for Ready condition
for _, condition := range conditions {
if condMap, ok := condition.(map[string]interface{}); ok {
if condType, ok := condMap["type"].(string); ok && condType == "Ready" {
if status, ok := condMap["status"].(string); ok {
if status == "True" {
return "Ready"
}
return "Not Ready"
}
}
}
}
}
return "Unknown"
}
// createHookItem creates a standardized hook item for output
func createHookItem(hook unstructured.Unstructured, useUTC bool) map[string]interface{} {
item := map[string]interface{}{
"name": hook.GetName(),
"namespace": hook.GetNamespace(),
"image": extractHookImage(hook),
"serviceAccount": extractHookServiceAccount(hook),
"deadline": extractHookDeadline(hook),
"playbook": extractHookPlaybookStatus(hook),
"status": extractHookStatus(hook),
"created": output.FormatTimestamp(hook.GetCreationTimestamp().Time, useUTC),
"object": hook.Object, // Include the original object
}
return item
}
// ListHooks lists hooks without watch functionality
func ListHooks(ctx context.Context, configFlags *genericclioptions.ConfigFlags, namespace, outputFormat string, hookName string, useUTC bool) error {
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Format validation
outputFormat = strings.ToLower(outputFormat)
if outputFormat != "table" && outputFormat != "json" && outputFormat != "yaml" {
return fmt.Errorf("unsupported output format: %s. Supported formats: table, json, yaml", outputFormat)
}
var allItems []map[string]interface{}
// If hookName is specified, get that specific hook
if hookName != "" {
allItems, err = getSpecificHook(ctx, dynamicClient, namespace, hookName, useUTC)
} else {
// Get all hooks
allItems, err = getAllHooks(ctx, dynamicClient, namespace, useUTC)
}
// Handle error if no items found
if err != nil {
return err
}
// Handle output based on format
switch outputFormat {
case "json":
return output.PrintJSONWithEmpty(allItems, "No hooks found.")
case "yaml":
return output.PrintYAMLWithEmpty(allItems, "No hooks found.")
default: // table
return printHookTable(allItems)
}
}
// getAllHooks retrieves all hooks from the given namespace
func getAllHooks(ctx context.Context, dynamicClient dynamic.Interface, namespace string, useUTC bool) ([]map[string]interface{}, error) {
hooks, err := dynamicClient.Resource(client.HooksGVR).Namespace(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to list hooks: %v", err)
}
allItems := make([]map[string]interface{}, 0, len(hooks.Items))
for _, hook := range hooks.Items {
allItems = append(allItems, createHookItem(hook, useUTC))
}
return allItems, nil
}
// getSpecificHook retrieves a specific hook by name
func getSpecificHook(ctx context.Context, dynamicClient dynamic.Interface, namespace string, hookName string, useUTC bool) ([]map[string]interface{}, error) {
hook, err := dynamicClient.Resource(client.HooksGVR).Namespace(namespace).Get(ctx, hookName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get hook '%s': %v", hookName, err)
}
allItems := []map[string]interface{}{createHookItem(*hook, useUTC)}
return allItems, nil
}
// printHookTable prints hooks in table format
func printHookTable(items []map[string]interface{}) error {
if len(items) == 0 {
fmt.Println("No hooks found.")
return nil
}
// Create table headers
headers := []string{"NAME", "IMAGE", "SERVICE ACCOUNT", "DEADLINE", "PLAYBOOK", "STATUS", "CREATED"}
// Prepare table data
var data [][]string
for _, item := range items {
serviceAccount := fmt.Sprintf("%v", item["serviceAccount"])
if serviceAccount == "" {
serviceAccount = "-"
}
deadline := fmt.Sprintf("%v", item["deadline"])
if deadline == "" {
deadline = "-"
}
row := []string{
fmt.Sprintf("%v", item["name"]),
fmt.Sprintf("%v", item["image"]),
serviceAccount,
deadline,
fmt.Sprintf("%v", item["playbook"]),
fmt.Sprintf("%v", item["status"]),
fmt.Sprintf("%v", item["created"]),
}
data = append(data, row)
}
// Print the table using TablePrinter
printer := output.NewTablePrinter()
// Create headers using Header struct
var tableHeaders []output.Header
headerMappings := map[string]string{
"NAME": "name",
"IMAGE": "image",
"SERVICE ACCOUNT": "serviceaccount",
"DEADLINE": "deadline",
"PLAYBOOK": "playbook",
"STATUS": "status",
"CREATED": "created",
}
for _, header := range headers {
tableHeaders = append(tableHeaders, output.Header{
DisplayName: header,
JSONPath: headerMappings[header],
})
}
printer.WithHeaders(tableHeaders...)
// Convert data to map format for the table printer
for _, row := range data {
item := map[string]interface{}{
"name": row[0],
"image": row[1],
"serviceaccount": row[2],
"deadline": row[3],
"playbook": row[4],
"status": row[5],
"created": row[6],
}
printer.AddItem(item)
}
return printer.Print()
}
// GetHookPlaybookContent extracts and decodes the playbook content from a hook
func GetHookPlaybookContent(hook unstructured.Unstructured) (string, error) {
playbook, found, _ := unstructured.NestedString(hook.Object, "spec", "playbook")
if !found || playbook == "" {
return "", nil // No playbook content
}
// Decode the base64 content
decoded, err := base64.StdEncoding.DecodeString(playbook)
if err != nil {
return "", fmt.Errorf("failed to decode playbook content: %v", err)
}
return string(decoded), nil
}
// List lists hooks with optional watch mode
func List(ctx context.Context, configFlags *genericclioptions.ConfigFlags, namespace string, watchMode bool, outputFormat string, hookName string, useUTC bool) error {
return watch.WrapWithWatch(watchMode, outputFormat, func() error {
return ListHooks(ctx, configFlags, namespace, outputFormat, hookName, useUTC)
}, watch.DefaultInterval)
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/get/host/list.go | Go | package host
import (
"context"
"fmt"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/dynamic"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/output"
"github.com/yaacov/kubectl-mtv/pkg/util/watch"
)
// extractProviderName gets the provider name from the host spec
func extractProviderName(host unstructured.Unstructured) string {
provider, found, _ := unstructured.NestedMap(host.Object, "spec", "provider")
if !found || provider == nil {
return ""
}
if name, ok := provider["name"].(string); ok {
return name
}
return ""
}
// extractHostID gets the host ID from the host spec
func extractHostID(host unstructured.Unstructured) string {
id, found, _ := unstructured.NestedString(host.Object, "spec", "id")
if !found {
return ""
}
return id
}
// extractHostIPAddress gets the IP address from the host spec
func extractHostIPAddress(host unstructured.Unstructured) string {
ip, found, _ := unstructured.NestedString(host.Object, "spec", "ipAddress")
if !found {
return ""
}
return ip
}
// extractHostStatus gets the status from the host
func extractHostStatus(host unstructured.Unstructured) string {
ready, found, _ := unstructured.NestedBool(host.Object, "status", "conditions", "Ready")
if found {
if ready {
return "Ready"
}
return "Not Ready"
}
// Check for conditions array
conditions, found, _ := unstructured.NestedSlice(host.Object, "status", "conditions")
if found && len(conditions) > 0 {
// Look for Ready condition
for _, condition := range conditions {
if condMap, ok := condition.(map[string]interface{}); ok {
if condType, ok := condMap["type"].(string); ok && condType == "Ready" {
if status, ok := condMap["status"].(string); ok {
if status == "True" {
return "Ready"
}
return "Not Ready"
}
}
}
}
}
return "Unknown"
}
// createHostItem creates a standardized host item for output
func createHostItem(host unstructured.Unstructured, useUTC bool) map[string]interface{} {
item := map[string]interface{}{
"name": host.GetName(),
"namespace": host.GetNamespace(),
"id": extractHostID(host),
"provider": extractProviderName(host),
"ipAddress": extractHostIPAddress(host),
"status": extractHostStatus(host),
"created": output.FormatTimestamp(host.GetCreationTimestamp().Time, useUTC),
"object": host.Object, // Include the original object
}
// Add owner information if available
if len(host.GetOwnerReferences()) > 0 {
ownerRef := host.GetOwnerReferences()[0]
item["owner"] = ownerRef.Name
item["ownerKind"] = ownerRef.Kind
}
return item
}
// ListHosts lists hosts without watch functionality
func ListHosts(ctx context.Context, configFlags *genericclioptions.ConfigFlags, namespace, outputFormat string, hostName string, useUTC bool) error {
dynamicClient, err := client.GetDynamicClient(configFlags)
if err != nil {
return fmt.Errorf("failed to get client: %v", err)
}
// Format validation
outputFormat = strings.ToLower(outputFormat)
if outputFormat != "table" && outputFormat != "json" && outputFormat != "yaml" {
return fmt.Errorf("unsupported output format: %s. Supported formats: table, json, yaml", outputFormat)
}
var allItems []map[string]interface{}
// If hostName is specified, get that specific host
if hostName != "" {
allItems, err = getSpecificHost(ctx, dynamicClient, namespace, hostName, useUTC)
} else {
// Get all hosts
allItems, err = getAllHosts(ctx, dynamicClient, namespace, useUTC)
}
// Handle error if no items found
if err != nil {
return err
}
// Handle output based on format
switch outputFormat {
case "json":
return output.PrintJSONWithEmpty(allItems, "No hosts found.")
case "yaml":
return output.PrintYAMLWithEmpty(allItems, "No hosts found.")
default: // table
return printHostTable(allItems)
}
}
// getAllHosts retrieves all hosts from the given namespace
func getAllHosts(ctx context.Context, dynamicClient dynamic.Interface, namespace string, useUTC bool) ([]map[string]interface{}, error) {
hosts, err := dynamicClient.Resource(client.HostsGVR).Namespace(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to list hosts: %v", err)
}
allItems := make([]map[string]interface{}, 0, len(hosts.Items))
for _, host := range hosts.Items {
allItems = append(allItems, createHostItem(host, useUTC))
}
return allItems, nil
}
// getSpecificHost retrieves a specific host by name
func getSpecificHost(ctx context.Context, dynamicClient dynamic.Interface, namespace string, hostName string, useUTC bool) ([]map[string]interface{}, error) {
host, err := dynamicClient.Resource(client.HostsGVR).Namespace(namespace).Get(ctx, hostName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get host '%s': %v", hostName, err)
}
allItems := []map[string]interface{}{createHostItem(*host, useUTC)}
return allItems, nil
}
// printHostTable prints hosts in table format
func printHostTable(items []map[string]interface{}) error {
if len(items) == 0 {
fmt.Println("No hosts found.")
return nil
}
// Create table headers
headers := []string{"NAME", "ID", "PROVIDER", "IP ADDRESS", "STATUS", "CREATED"}
// Prepare table data
var data [][]string
for _, item := range items {
row := []string{
fmt.Sprintf("%v", item["name"]),
fmt.Sprintf("%v", item["id"]),
fmt.Sprintf("%v", item["provider"]),
fmt.Sprintf("%v", item["ipAddress"]),
fmt.Sprintf("%v", item["status"]),
fmt.Sprintf("%v", item["created"]),
}
data = append(data, row)
}
// Print the table using TablePrinter
printer := output.NewTablePrinter()
// Create headers using Header struct
var tableHeaders []output.Header
for _, header := range headers {
tableHeaders = append(tableHeaders, output.Header{
DisplayName: header,
JSONPath: strings.ToLower(strings.ReplaceAll(header, " ", "")),
})
}
printer.WithHeaders(tableHeaders...)
// Convert data to map format for the table printer
for _, row := range data {
item := map[string]interface{}{
"name": row[0],
"id": row[1],
"provider": row[2],
"ipaddress": row[3],
"status": row[4],
"created": row[5],
}
printer.AddItem(item)
}
return printer.Print()
}
// List lists hosts with optional watch mode
func List(ctx context.Context, configFlags *genericclioptions.ConfigFlags, namespace string, watchMode bool, outputFormat string, hostName string, useUTC bool) error {
return watch.WrapWithWatch(watchMode, outputFormat, func() error {
return ListHosts(ctx, configFlags, namespace, outputFormat, hostName, useUTC)
}, watch.DefaultInterval)
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/get/inventory/client.go | Go | package inventory
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// ProviderClient provides a unified client for all provider types
type ProviderClient struct {
configFlags *genericclioptions.ConfigFlags
provider *unstructured.Unstructured
inventoryURL string
insecureSkipTLS bool
}
// NewProviderClientWithInsecure creates a new provider client with optional insecure TLS skip verification
func NewProviderClientWithInsecure(configFlags *genericclioptions.ConfigFlags, provider *unstructured.Unstructured, inventoryURL string, insecureSkipTLS bool) *ProviderClient {
return &ProviderClient{
configFlags: configFlags,
provider: provider,
inventoryURL: inventoryURL,
insecureSkipTLS: insecureSkipTLS,
}
}
// GetResource fetches a resource from the provider using the specified path
func (pc *ProviderClient) GetResource(ctx context.Context, resourcePath string) (interface{}, error) {
// Get provider info for logging
providerName := pc.GetProviderName()
providerNamespace := pc.GetProviderNamespace()
providerType, _ := pc.GetProviderType()
providerUID, _ := pc.GetProviderUID()
// Check if provider has a ready condition
if err := pc.checkProviderReady(); err != nil {
return nil, err
}
// Log the inventory fetch request
klog.V(2).Infof("Fetching inventory from provider %s/%s (type=%s, uid=%s) - path: %s, baseURL: %s, insecure=%v",
providerNamespace, providerName, providerType, providerUID, resourcePath, pc.inventoryURL, pc.insecureSkipTLS)
result, err := client.FetchProviderInventoryWithInsecure(ctx, pc.configFlags, pc.inventoryURL, pc.provider, resourcePath, pc.insecureSkipTLS)
if err != nil {
klog.V(1).Infof("Failed to fetch inventory from provider %s/%s - path: %s, error: %v",
providerNamespace, providerName, resourcePath, err)
return nil, err
}
// Log success with some response details
resultType := "unknown"
resultSize := 0
switch v := result.(type) {
case []interface{}:
resultType = "array"
resultSize = len(v)
case map[string]interface{}:
resultType = "object"
resultSize = len(v)
}
klog.V(2).Infof("Successfully fetched inventory from provider %s/%s - path: %s, result_type: %s, result_size: %d",
providerNamespace, providerName, resourcePath, resultType, resultSize)
// Dump the full response at trace level (v=3)
klog.V(3).Infof("Full inventory response from provider %s/%s - path: %s, response: %+v",
providerNamespace, providerName, resourcePath, result)
return result, nil
}
// GetResourceWithQuery fetches a resource with query parameters
func (pc *ProviderClient) GetResourceWithQuery(ctx context.Context, resourcePath, query string) (interface{}, error) {
if query != "" {
resourcePath = fmt.Sprintf("%s?%s", resourcePath, query)
}
return pc.GetResource(ctx, resourcePath)
}
// GetResourceCollection fetches a collection of resources
func (pc *ProviderClient) GetResourceCollection(ctx context.Context, collection string, detail int) (interface{}, error) {
return pc.GetResourceWithQuery(ctx, collection, fmt.Sprintf("detail=%d", detail))
}
// GetResourceByID fetches a specific resource by ID
func (pc *ProviderClient) GetResourceByID(ctx context.Context, collection, id string, detail int) (interface{}, error) {
return pc.GetResourceWithQuery(ctx, fmt.Sprintf("%s/%s", collection, id), fmt.Sprintf("detail=%d", detail))
}
// oVirt Provider Resources
func (pc *ProviderClient) GetDataCenters(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "datacenters", detail)
}
func (pc *ProviderClient) GetDataCenter(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "datacenters", id, detail)
}
func (pc *ProviderClient) GetClusters(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "clusters", detail)
}
func (pc *ProviderClient) GetCluster(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "clusters", id, detail)
}
func (pc *ProviderClient) GetHosts(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "hosts", detail)
}
func (pc *ProviderClient) GetHost(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "hosts", id, detail)
}
func (pc *ProviderClient) GetVMs(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "vms", detail)
}
func (pc *ProviderClient) GetVM(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "vms", id, detail)
}
func (pc *ProviderClient) GetStorageDomains(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "storagedomains", detail)
}
func (pc *ProviderClient) GetStorageDomain(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "storagedomains", id, detail)
}
func (pc *ProviderClient) GetNetworks(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "networks", detail)
}
func (pc *ProviderClient) GetNetwork(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "networks", id, detail)
}
func (pc *ProviderClient) GetDisks(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "disks", detail)
}
func (pc *ProviderClient) GetDisk(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "disks", id, detail)
}
func (pc *ProviderClient) GetDiskProfiles(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "diskprofiles", detail)
}
func (pc *ProviderClient) GetDiskProfile(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "diskprofiles", id, detail)
}
func (pc *ProviderClient) GetNICProfiles(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "nicprofiles", detail)
}
func (pc *ProviderClient) GetNICProfile(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "nicprofiles", id, detail)
}
func (pc *ProviderClient) GetWorkloads(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "workloads", detail)
}
func (pc *ProviderClient) GetWorkload(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "workloads", id, detail)
}
func (pc *ProviderClient) GetTree(ctx context.Context) (interface{}, error) {
return pc.GetResource(ctx, "tree")
}
func (pc *ProviderClient) GetClusterTree(ctx context.Context) (interface{}, error) {
return pc.GetResource(ctx, "tree/cluster")
}
// vSphere Provider Resources (aliases to generic resources with vSphere context)
func (pc *ProviderClient) GetDatastores(ctx context.Context, detail int) (interface{}, error) {
// vSphere datastores map to generic storage resources
return pc.GetResourceCollection(ctx, "datastores", detail)
}
func (pc *ProviderClient) GetDatastore(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "datastores", id, detail)
}
func (pc *ProviderClient) GetResourcePools(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "resourcepools", detail)
}
func (pc *ProviderClient) GetResourcePool(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "resourcepools", id, detail)
}
func (pc *ProviderClient) GetFolders(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "folders", detail)
}
func (pc *ProviderClient) GetFolder(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "folders", id, detail)
}
// OpenStack Provider Resources
func (pc *ProviderClient) GetInstances(ctx context.Context, detail int) (interface{}, error) {
// OpenStack instances are equivalent to VMs
return pc.GetResourceCollection(ctx, "instances", detail)
}
func (pc *ProviderClient) GetInstance(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "instances", id, detail)
}
func (pc *ProviderClient) GetImages(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "images", detail)
}
func (pc *ProviderClient) GetImage(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "images", id, detail)
}
func (pc *ProviderClient) GetFlavors(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "flavors", detail)
}
func (pc *ProviderClient) GetFlavor(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "flavors", id, detail)
}
func (pc *ProviderClient) GetSubnets(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "subnets", detail)
}
func (pc *ProviderClient) GetSubnet(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "subnets", id, detail)
}
func (pc *ProviderClient) GetPorts(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "ports", detail)
}
func (pc *ProviderClient) GetPort(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "ports", id, detail)
}
func (pc *ProviderClient) GetVolumeTypes(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "volumetypes", detail)
}
func (pc *ProviderClient) GetVolumeType(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "volumetypes", id, detail)
}
func (pc *ProviderClient) GetVolumes(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "volumes", detail)
}
func (pc *ProviderClient) GetVolume(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "volumes", id, detail)
}
func (pc *ProviderClient) GetSecurityGroups(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "securitygroups", detail)
}
func (pc *ProviderClient) GetSecurityGroup(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "securitygroups", id, detail)
}
func (pc *ProviderClient) GetFloatingIPs(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "floatingips", detail)
}
func (pc *ProviderClient) GetFloatingIP(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "floatingips", id, detail)
}
func (pc *ProviderClient) GetProjects(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "projects", detail)
}
func (pc *ProviderClient) GetProject(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "projects", id, detail)
}
func (pc *ProviderClient) GetSnapshots(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "snapshots", detail)
}
func (pc *ProviderClient) GetSnapshot(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "snapshots", id, detail)
}
// Kubernetes/OpenShift Provider Resources
func (pc *ProviderClient) GetStorageClasses(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "storageclasses", detail)
}
func (pc *ProviderClient) GetStorageClass(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "storageclasses", id, detail)
}
func (pc *ProviderClient) GetPersistentVolumeClaims(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "persistentvolumeclaims", detail)
}
func (pc *ProviderClient) GetPersistentVolumeClaim(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "persistentvolumeclaims", id, detail)
}
func (pc *ProviderClient) GetNamespaces(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "namespaces", detail)
}
func (pc *ProviderClient) GetNamespace(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "namespaces", id, detail)
}
func (pc *ProviderClient) GetDataVolumes(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "datavolumes", detail)
}
func (pc *ProviderClient) GetDataVolume(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "datavolumes", id, detail)
}
// OVA Provider Resources
func (pc *ProviderClient) GetOVAFiles(ctx context.Context, detail int) (interface{}, error) {
return pc.GetResourceCollection(ctx, "ovafiles", detail)
}
func (pc *ProviderClient) GetOVAFile(ctx context.Context, id string, detail int) (interface{}, error) {
return pc.GetResourceByID(ctx, "ovafiles", id, detail)
}
// Generic helper functions for provider-agnostic operations
func (pc *ProviderClient) GetProviderType() (string, error) {
providerType, found, err := unstructured.NestedString(pc.provider.Object, "spec", "type")
if err != nil || !found {
return "", fmt.Errorf("provider type not found or error retrieving it: %v", err)
}
return providerType, nil
}
func (pc *ProviderClient) GetProviderUID() (string, error) {
providerUID, found, err := unstructured.NestedString(pc.provider.Object, "metadata", "uid")
if err != nil || !found {
return "", fmt.Errorf("provider UID not found or error retrieving it: %v", err)
}
return providerUID, nil
}
func (pc *ProviderClient) GetProviderName() string {
return pc.provider.GetName()
}
func (pc *ProviderClient) GetProviderNamespace() string {
return pc.provider.GetNamespace()
}
// checkProviderReady checks if the provider has a ready condition in its status
func (pc *ProviderClient) checkProviderReady() error {
// Get the status conditions from the provider
conditions, found, err := unstructured.NestedSlice(pc.provider.Object, "status", "conditions")
if err != nil {
return fmt.Errorf("error retrieving provider status conditions: %v", err)
}
if !found || len(conditions) == 0 {
return fmt.Errorf("provider %s/%s does not have ready condition", pc.GetProviderNamespace(), pc.GetProviderName())
}
// Look for a "Ready" condition
for _, conditionInterface := range conditions {
condition, ok := conditionInterface.(map[string]interface{})
if !ok {
continue
}
conditionType, typeOk := condition["type"].(string)
conditionStatus, statusOk := condition["status"].(string)
if typeOk && statusOk && conditionType == "Ready" {
if conditionStatus == "True" {
return nil // Provider is ready
}
// Ready condition exists but is not True
reason, _ := condition["reason"].(string)
message, _ := condition["message"].(string)
return fmt.Errorf("provider %s/%s is not ready (status: %s, reason: %s, message: %s)",
pc.GetProviderNamespace(), pc.GetProviderName(), conditionStatus, reason, message)
}
}
// Ready condition not found
return fmt.Errorf("provider %s/%s does not have ready condition", pc.GetProviderNamespace(), pc.GetProviderName())
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/get/inventory/clusters.go | Go | package inventory
import (
"context"
"fmt"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/util/output"
querypkg "github.com/yaacov/kubectl-mtv/pkg/util/query"
"github.com/yaacov/kubectl-mtv/pkg/util/watch"
)
// ListClustersWithInsecure queries the provider's cluster inventory with optional insecure TLS skip verification
func ListClustersWithInsecure(ctx context.Context, kubeConfigFlags *genericclioptions.ConfigFlags, providerName, namespace string, inventoryURL string, outputFormat string, query string, watchMode bool, insecureSkipTLS bool) error {
if watchMode {
return watch.Watch(func() error {
return listClustersOnce(ctx, kubeConfigFlags, providerName, namespace, inventoryURL, outputFormat, query, insecureSkipTLS)
}, watch.DefaultInterval)
}
return listClustersOnce(ctx, kubeConfigFlags, providerName, namespace, inventoryURL, outputFormat, query, insecureSkipTLS)
}
func listClustersOnce(ctx context.Context, kubeConfigFlags *genericclioptions.ConfigFlags, providerName, namespace string, inventoryURL string, outputFormat string, query string, insecureSkipTLS bool) error {
// Get the provider object
provider, err := GetProviderByName(ctx, kubeConfigFlags, providerName, namespace)
if err != nil {
return err
}
// Create a new provider client
providerClient := NewProviderClientWithInsecure(kubeConfigFlags, provider, inventoryURL, insecureSkipTLS)
// Get provider type to verify cluster support
providerType, err := providerClient.GetProviderType()
if err != nil {
return fmt.Errorf("failed to get provider type: %v", err)
}
// Define default headers based on provider type
var defaultHeaders []output.Header
switch providerType {
case "ovirt":
defaultHeaders = []output.Header{
{DisplayName: "NAME", JSONPath: "name"},
{DisplayName: "ID", JSONPath: "id"},
{DisplayName: "DATACENTER", JSONPath: "dataCenter.name"},
{DisplayName: "HA-RESERVATION", JSONPath: "haReservation"},
{DisplayName: "KSM-ENABLED", JSONPath: "ksmEnabled"},
}
case "vsphere":
defaultHeaders = []output.Header{
{DisplayName: "NAME", JSONPath: "name"},
{DisplayName: "ID", JSONPath: "id"},
{DisplayName: "DATACENTER", JSONPath: "dataCenter.name"},
{DisplayName: "DRS", JSONPath: "drsEnabled"},
{DisplayName: "HA", JSONPath: "haEnabled"},
}
default:
defaultHeaders = []output.Header{
{DisplayName: "NAME", JSONPath: "name"},
{DisplayName: "ID", JSONPath: "id"},
{DisplayName: "DATACENTER", JSONPath: "dataCenter.name"},
}
}
// Fetch clusters inventory from the provider based on provider type
var data interface{}
switch providerType {
case "ovirt", "vsphere":
data, err = providerClient.GetClusters(ctx, 4)
default:
return fmt.Errorf("provider type '%s' does not support cluster inventory", providerType)
}
if err != nil {
return fmt.Errorf("failed to get clusters from provider: %v", err)
}
// Parse query options for advanced query features
var queryOpts *querypkg.QueryOptions
if query != "" {
queryOpts, err = querypkg.ParseQueryString(query)
if err != nil {
return fmt.Errorf("failed to parse query: %v", err)
}
// Apply query filter
data, err = querypkg.ApplyQueryInterface(data, query)
if err != nil {
return fmt.Errorf("failed to apply query: %v", err)
}
}
// Format and display the results
emptyMessage := fmt.Sprintf("No clusters found for provider %s", providerName)
switch outputFormat {
case "json":
return output.PrintJSONWithEmpty(data, emptyMessage)
case "yaml":
return output.PrintYAMLWithEmpty(data, emptyMessage)
case "table":
return output.PrintTableWithQuery(data, defaultHeaders, queryOpts, emptyMessage)
default:
return fmt.Errorf("unsupported output format: %s", outputFormat)
}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/cmd/get/inventory/datacenters.go | Go | package inventory
import (
"context"
"fmt"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/util/output"
querypkg "github.com/yaacov/kubectl-mtv/pkg/util/query"
"github.com/yaacov/kubectl-mtv/pkg/util/watch"
)
// ListDataCentersWithInsecure queries the provider's datacenter inventory with optional insecure TLS skip verification
func ListDataCentersWithInsecure(ctx context.Context, kubeConfigFlags *genericclioptions.ConfigFlags, providerName, namespace string, inventoryURL string, outputFormat string, query string, watchMode bool, insecureSkipTLS bool) error {
if watchMode {
return watch.Watch(func() error {
return listDataCentersOnce(ctx, kubeConfigFlags, providerName, namespace, inventoryURL, outputFormat, query, insecureSkipTLS)
}, watch.DefaultInterval)
}
return listDataCentersOnce(ctx, kubeConfigFlags, providerName, namespace, inventoryURL, outputFormat, query, insecureSkipTLS)
}
func listDataCentersOnce(ctx context.Context, kubeConfigFlags *genericclioptions.ConfigFlags, providerName, namespace string, inventoryURL string, outputFormat string, query string, insecureSkipTLS bool) error {
// Get the provider object
provider, err := GetProviderByName(ctx, kubeConfigFlags, providerName, namespace)
if err != nil {
return err
}
// Create a new provider client
providerClient := NewProviderClientWithInsecure(kubeConfigFlags, provider, inventoryURL, insecureSkipTLS)
// Get provider type to verify datacenter support
providerType, err := providerClient.GetProviderType()
if err != nil {
return fmt.Errorf("failed to get provider type: %v", err)
}
// Define default headers
defaultHeaders := []output.Header{
{DisplayName: "NAME", JSONPath: "name"},
{DisplayName: "ID", JSONPath: "id"},
{DisplayName: "DESCRIPTION", JSONPath: "description"},
{DisplayName: "STATUS", JSONPath: "status"},
}
// Fetch datacenters inventory from the provider based on provider type
var data interface{}
switch providerType {
case "ovirt", "vsphere":
data, err = providerClient.GetDataCenters(ctx, 4)
default:
return fmt.Errorf("provider type '%s' does not support datacenter inventory", providerType)
}
if err != nil {
return fmt.Errorf("failed to get datacenters from provider: %v", err)
}
// Parse query options for advanced query features
var queryOpts *querypkg.QueryOptions
if query != "" {
queryOpts, err = querypkg.ParseQueryString(query)
if err != nil {
return fmt.Errorf("failed to parse query: %v", err)
}
// Apply query filter
data, err = querypkg.ApplyQueryInterface(data, query)
if err != nil {
return fmt.Errorf("failed to apply query: %v", err)
}
}
// Format and display the results
emptyMessage := fmt.Sprintf("No datacenters found for provider %s", providerName)
switch outputFormat {
case "json":
return output.PrintJSONWithEmpty(data, emptyMessage)
case "yaml":
return output.PrintYAMLWithEmpty(data, emptyMessage)
case "table":
return output.PrintTableWithQuery(data, defaultHeaders, queryOpts, emptyMessage)
default:
return fmt.Errorf("unsupported output format: %s", outputFormat)
}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.