file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
ui/src/components/app-shell.ts | TypeScript | /**
* App Shell - Main application layout component
* Uses RHDS rh-navigation-primary with built-in hamburger toggle
*/
import { LitElement, html, css } from 'lit';
import { customElement, state } from 'lit/decorators.js';
import { k8sClient } from '../lib/k8s-client.js';
import { navigate } from '../lib/router.js';
@customElement('app-shell')
export class AppShell extends LitElement {
@state() private namespace = 'default';
@state() private namespaces: string[] = ['default'];
@state() private currentPath = '/pipelines';
static styles = css`
:host {
display: flex;
flex-direction: column;
min-height: 100vh;
background: var(--rh-color-surface-lighter, #f5f5f5);
}
/* Skip link for accessibility */
.skip-link {
position: absolute;
left: -9999px;
top: auto;
width: 1px;
height: 1px;
overflow: hidden;
}
.skip-link:focus {
position: fixed;
top: 0;
left: 0;
width: auto;
height: auto;
padding: var(--rh-space-md, 16px);
background: var(--rh-color-surface-darkest, #151515);
color: var(--rh-color-text-primary-on-dark, #ffffff);
z-index: 1000;
}
/* ===== PRIMARY NAVIGATION ===== */
rh-navigation-primary {
--rh-navigation-primary-background-color: var(--rh-color-surface-darkest, #151515);
}
/* Logo styling */
.logo {
display: flex;
align-items: center;
text-decoration: none;
color: var(--rh-color-text-primary-on-dark, #ffffff);
}
.logo:hover {
text-decoration: none;
}
.logo-text {
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
font-size: 1.125rem;
font-weight: 600;
letter-spacing: -0.01em;
}
/* ===== LAYOUT CONTAINER ===== */
.layout {
display: flex;
flex: 1;
}
/* ===== VERTICAL SIDEBAR ===== */
.sidebar {
display: flex;
flex-direction: column;
width: 240px;
background: var(--rh-color-surface-lightest, #ffffff);
border-inline-end: 1px solid var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.sidebar-section {
padding: var(--rh-space-md, 16px);
border-block-end: 1px solid var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.sidebar-label {
display: block;
font-size: 0.6875rem;
font-weight: 600;
text-transform: uppercase;
letter-spacing: 0.05em;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
margin-block-end: var(--rh-space-sm, 8px);
}
.namespace-select {
width: 100%;
padding: var(--rh-space-sm, 8px) var(--rh-space-xl, 32px) var(--rh-space-sm, 8px)
var(--rh-space-sm, 8px);
border: 1px solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
background: var(--rh-color-surface-lightest, #ffffff);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
font-size: 0.875rem;
cursor: pointer;
appearance: none;
background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='0 0 12 12'%3E%3Cpath fill='%236a6e73' d='M6 8L1 3h10z'/%3E%3C/svg%3E");
background-repeat: no-repeat;
background-position: right 8px center;
}
.namespace-select:focus {
outline: 2px solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: 1px;
}
/* Navigation section */
.nav-section {
flex: 1;
padding: var(--rh-space-sm, 8px) 0;
}
rh-navigation-vertical {
--rh-navigation-vertical-background-color: transparent;
}
rh-navigation-link rh-icon {
--rh-icon-size: 18px;
margin-inline-end: var(--rh-space-sm, 8px);
}
/* ===== MAIN CONTENT ===== */
main {
flex: 1;
padding: var(--rh-space-xl, 32px);
overflow: auto;
}
.main-container {
width: 100%;
}
/* ===== RESPONSIVE ===== */
@media (max-width: 992px) {
.sidebar {
display: none;
}
}
@media (max-width: 768px) {
main {
padding: var(--rh-space-md, 16px);
}
}
`;
connectedCallback() {
super.connectedCallback();
this.loadNamespaces();
this.updateCurrentPath();
// Listen for navigation events
window.addEventListener('popstate', () => this.updateCurrentPath());
window.addEventListener('router-navigate', () => this.updateCurrentPath());
}
private async loadNamespaces() {
try {
this.namespaces = await k8sClient.listNamespaces();
} catch (e) {
console.warn('Could not load namespaces:', e);
}
}
private updateCurrentPath() {
this.currentPath = window.location.pathname;
}
private isActive(path: string): boolean {
return this.currentPath.startsWith(path);
}
private handleNavClick(e: Event, path: string) {
e.preventDefault();
navigate(path);
}
private handleNamespaceChange(e: Event) {
const select = e.target as HTMLSelectElement;
this.namespace = select.value;
// Dispatch custom event for child components
this.dispatchEvent(
new CustomEvent('namespace-change', {
detail: { namespace: this.namespace },
bubbles: true,
composed: true,
})
);
}
render() {
return html`
<a href="#main-content" class="skip-link">Skip to main content</a>
<!-- Primary Navigation -->
<rh-navigation-primary color-palette="darkest">
<!-- Logo -->
<a
slot="logo"
href="/"
class="logo"
@click=${(e: Event) => this.handleNavClick(e, '/pipelines')}
>
<span class="logo-text">JobRunner</span>
</a>
</rh-navigation-primary>
<!-- Layout: Sidebar + Main -->
<div class="layout">
<!-- Vertical Sidebar (desktop only) -->
<aside class="sidebar" role="navigation">
<!-- Namespace Selector -->
<div class="sidebar-section">
<span class="sidebar-label">Namespace</span>
<select
class="namespace-select"
.value=${this.namespace}
@change=${this.handleNamespaceChange}
aria-label="Select namespace"
>
${this.namespaces.map(
ns => html` <option value=${ns} ?selected=${ns === this.namespace}>${ns}</option> `
)}
</select>
</div>
<!-- Navigation Links -->
<div class="nav-section">
<rh-navigation-vertical>
<rh-navigation-link
href="/pipelines"
?current-page=${this.isActive('/pipelines')}
@click=${(e: Event) => this.handleNavClick(e, '/pipelines')}
>
<rh-icon set="ui" icon="monitoring" loading="eager"></rh-icon>
Pipelines
</rh-navigation-link>
<rh-navigation-link
href="/storage"
?current-page=${this.isActive('/storage')}
@click=${(e: Event) => this.handleNavClick(e, '/storage')}
>
<rh-icon set="ui" icon="data" loading="eager"></rh-icon>
Storage
</rh-navigation-link>
<rh-navigation-link
href="/secrets"
?current-page=${this.isActive('/secrets')}
@click=${(e: Event) => this.handleNavClick(e, '/secrets')}
>
<rh-icon set="ui" icon="lock" loading="eager"></rh-icon>
Secrets
</rh-navigation-link>
</rh-navigation-vertical>
</div>
</aside>
<!-- Main Content -->
<main id="main-content" role="main">
<div class="main-container">
<slot></slot>
</div>
</main>
</div>
`;
}
}
declare global {
interface HTMLElementTagNameMap {
'app-shell': AppShell;
}
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/components/builder/global-settings.ts | TypeScript | /**
* Global Settings - Pipeline-wide configuration
* Following RHDS tab and form patterns
*/
import { LitElement, html, css } from 'lit';
import { customElement, property, state } from 'lit/decorators.js';
import type { Pipeline, EnvVar } from '../../types/pipeline.js';
import { k8sClient } from '../../lib/k8s-client.js';
interface PVCInfo {
name: string;
storage?: string;
phase: string;
}
@customElement('global-settings')
export class GlobalSettings extends LitElement {
@property({ type: Object }) pipeline?: Pipeline;
@state() private activeTab = 0;
@state() private availablePVCs: PVCInfo[] = [];
@state() private loadingPVCs = false;
static styles = css`
:host {
display: block;
}
.tabs {
display: flex;
gap: var(--rh-space-xs, 4px);
margin-block-end: var(--rh-space-lg, 24px);
border-block-end: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.tab {
padding: var(--rh-space-sm, 8px) var(--rh-space-md, 16px);
background: none;
border: none;
border-block-end: 2px solid transparent;
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
cursor: pointer;
transition: all 150ms ease;
}
.tab:hover {
color: var(--rh-color-text-primary-on-light, #151515);
}
.tab:focus-visible {
outline: var(--rh-border-width-md, 2px) solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: -2px;
}
.tab.active {
color: var(--rh-color-interactive-blue-darker, #0066cc);
border-block-end-color: var(--rh-color-interactive-blue-darker, #0066cc);
}
.form-group {
margin-block-end: var(--rh-space-md, 16px);
}
label {
display: block;
margin-block-end: var(--rh-space-xs, 4px);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-weight: var(--rh-font-weight-body-text-medium, 500);
color: var(--rh-color-text-primary-on-light, #151515);
}
.label-hint {
font-weight: normal;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
font-size: var(--rh-font-size-body-text-xs, 0.75rem);
}
input[type='text'],
input[type='number'],
select {
width: 100%;
padding: var(--rh-space-sm, 8px);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
transition: border-color 150ms ease;
}
input:focus,
select:focus {
outline: none;
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
box-shadow: 0 0 0 1px var(--rh-color-interactive-blue-darker, #0066cc);
}
.checkbox-row {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
margin-block-end: var(--rh-space-md, 16px);
}
.checkbox-row input {
width: auto;
}
.checkbox-row label {
margin: 0;
font-weight: normal;
}
.env-list {
display: flex;
flex-direction: column;
gap: var(--rh-space-sm, 8px);
}
.env-row {
display: grid;
grid-template-columns: 1fr 1fr auto;
gap: var(--rh-space-sm, 8px);
align-items: start;
}
.icon-btn {
display: inline-flex;
align-items: center;
justify-content: center;
padding: var(--rh-space-sm, 8px);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
background: var(--rh-color-surface-lightest, #ffffff);
cursor: pointer;
transition: all 150ms ease;
}
.icon-btn:hover {
background: var(--rh-color-surface-lighter, #f5f5f5);
}
.icon-btn:focus-visible {
outline: var(--rh-border-width-md, 2px) solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: 2px;
}
.icon-btn.danger:hover {
background: var(--rh-color-red-100, #fce8e6);
border-color: var(--rh-color-red-500, #c9190b);
color: var(--rh-color-red-700, #a30d05);
}
.icon-btn rh-icon {
--rh-icon-size: 16px;
}
.section-info {
padding: var(--rh-space-md, 16px);
background: var(--rh-color-surface-lighter, #f5f5f5);
border-radius: var(--rh-border-radius-default, 3px);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
margin-block-end: var(--rh-space-lg, 24px);
}
.section-info rh-icon {
--rh-icon-size: 16px;
vertical-align: middle;
margin-inline-end: var(--rh-space-xs, 4px);
}
.pvc-selector-row {
display: flex;
gap: var(--rh-space-sm, 8px);
align-items: stretch;
}
.pvc-selector-row select,
.pvc-selector-row input {
flex: 1;
}
.pvc-selector-row .icon-btn {
flex-shrink: 0;
}
.pvc-selector-row .icon-btn.loading {
opacity: 0.6;
pointer-events: none;
}
@keyframes spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
.pvc-selector-row .icon-btn.loading rh-icon {
animation: spin 1s linear infinite;
}
`;
private lastFetchedNamespace = '';
updated(changedProperties: Map<string, unknown>) {
super.updated(changedProperties);
// Fetch PVCs when switching to volume tab or when namespace changes
const namespace = this.pipeline?.metadata.namespace || 'default';
if (this.activeTab === 2 && namespace !== this.lastFetchedNamespace) {
this.fetchPVCs(namespace);
}
}
private async fetchPVCs(namespace: string) {
this.loadingPVCs = true;
this.lastFetchedNamespace = namespace;
try {
const pvcs = await k8sClient.listPVCs(namespace);
this.availablePVCs = pvcs.map(pvc => ({
name: pvc.metadata.name,
storage: pvc.spec.resources?.requests?.storage,
phase: pvc.status.phase,
}));
} catch (error) {
console.error('Failed to fetch PVCs:', error);
this.availablePVCs = [];
} finally {
this.loadingPVCs = false;
}
}
private dispatchUpdate() {
this.dispatchEvent(
new CustomEvent('update', {
detail: { pipeline: this.pipeline },
})
);
}
private updateSpec(field: string, value: unknown) {
if (!this.pipeline) return;
this.pipeline = {
...this.pipeline,
spec: {
...this.pipeline.spec,
[field]: value,
},
};
this.dispatchUpdate();
this.requestUpdate();
}
private updatePodTemplate(field: string, value: unknown) {
if (!this.pipeline) return;
this.pipeline = {
...this.pipeline,
spec: {
...this.pipeline.spec,
podTemplate: {
...this.pipeline.spec.podTemplate,
[field]: value || undefined,
},
},
};
this.dispatchUpdate();
this.requestUpdate();
}
private updateSharedVolume(field: string, value: unknown) {
if (!this.pipeline) return;
const currentVolume = this.pipeline.spec.sharedVolume || {};
this.pipeline = {
...this.pipeline,
spec: {
...this.pipeline.spec,
sharedVolume: {
...currentVolume,
[field]: value,
},
},
};
this.dispatchUpdate();
this.requestUpdate();
}
private toggleSharedVolume(enabled: boolean) {
if (!this.pipeline) return;
if (enabled) {
this.updateSpec('sharedVolume', {
name: 'workspace',
mountPath: '/workspace',
emptyDir: {},
});
} else {
this.updateSpec('sharedVolume', undefined);
}
}
private getEnvVars(): EnvVar[] {
return this.pipeline?.spec.podTemplate?.env || [];
}
private updateEnvVars(envVars: EnvVar[]) {
this.updatePodTemplate('env', envVars.length > 0 ? envVars : undefined);
}
private addEnvVar() {
const envVars = [...this.getEnvVars(), { name: '', value: '' }];
this.updateEnvVars(envVars);
}
private removeEnvVar(index: number) {
const envVars = this.getEnvVars().filter((_, i) => i !== index);
this.updateEnvVars(envVars);
}
private updateEnvVar(index: number, field: 'name' | 'value', value: string) {
const envVars = this.getEnvVars().map((env, i) =>
i === index ? { ...env, [field]: value } : env
);
this.updateEnvVars(envVars);
}
render() {
if (!this.pipeline) {
return html`<p>No pipeline</p>`;
}
return html`
<nav class="tabs" role="tablist">
<button
class="tab ${this.activeTab === 0 ? 'active' : ''}"
role="tab"
aria-selected=${this.activeTab === 0}
@click=${() => (this.activeTab = 0)}
>
General
</button>
<button
class="tab ${this.activeTab === 1 ? 'active' : ''}"
role="tab"
aria-selected=${this.activeTab === 1}
@click=${() => (this.activeTab = 1)}
>
Pod Template
</button>
<button
class="tab ${this.activeTab === 2 ? 'active' : ''}"
role="tab"
aria-selected=${this.activeTab === 2}
@click=${() => (this.activeTab = 2)}
>
Shared Volume
</button>
<button
class="tab ${this.activeTab === 3 ? 'active' : ''}"
role="tab"
aria-selected=${this.activeTab === 3}
@click=${() => (this.activeTab = 3)}
>
Environment
</button>
</nav>
<div role="tabpanel">${this.renderTabContent()}</div>
`;
}
private renderTabContent() {
switch (this.activeTab) {
case 0:
return this.renderGeneralTab();
case 1:
return this.renderPodTab();
case 2:
return this.renderVolumeTab();
case 3:
return this.renderEnvTab();
}
}
private renderGeneralTab() {
return html`
<div class="section-info">
<rh-icon set="ui" icon="information"></rh-icon>
Configure general pipeline settings that apply to all steps.
</div>
<div class="form-group">
<label for="namespace">Namespace</label>
<input
type="text"
id="namespace"
.value=${this.pipeline?.metadata.namespace || 'default'}
@input=${(e: Event) => {
if (this.pipeline) {
this.pipeline = {
...this.pipeline,
metadata: {
...this.pipeline.metadata,
namespace: (e.target as HTMLInputElement).value,
},
};
this.dispatchUpdate();
}
}}
/>
</div>
<div class="form-group">
<label for="service-account">
Service Account
<span class="label-hint">(optional)</span>
</label>
<input
type="text"
id="service-account"
.value=${this.pipeline?.spec.serviceAccountName || ''}
placeholder="default"
@input=${(e: Event) => {
const value = (e.target as HTMLInputElement).value;
this.updateSpec('serviceAccountName', value || undefined);
}}
/>
</div>
`;
}
private renderPodTab() {
const podTemplate = this.pipeline?.spec.podTemplate;
return html`
<div class="section-info">
<rh-icon set="ui" icon="information"></rh-icon>
Default pod settings applied to all steps unless overridden.
</div>
<div class="form-group">
<label for="default-image">
Default Image
<span class="label-hint">(used when step doesn't specify one)</span>
</label>
<input
type="text"
id="default-image"
.value=${podTemplate?.image || ''}
placeholder="e.g., registry.access.redhat.com/ubi9/ubi-minimal:latest"
@input=${(e: Event) => {
this.updatePodTemplate('image', (e.target as HTMLInputElement).value);
}}
/>
</div>
<div class="form-group">
<label for="priority-class">
Priority Class
<span class="label-hint">(optional)</span>
</label>
<input
type="text"
id="priority-class"
.value=${podTemplate?.priorityClassName || ''}
placeholder="e.g., high-priority"
@input=${(e: Event) => {
this.updatePodTemplate('priorityClassName', (e.target as HTMLInputElement).value);
}}
/>
</div>
<div class="form-group">
<label for="scheduler">
Scheduler Name
<span class="label-hint">(optional)</span>
</label>
<input
type="text"
id="scheduler"
.value=${podTemplate?.schedulerName || ''}
placeholder="default-scheduler"
@input=${(e: Event) => {
this.updatePodTemplate('schedulerName', (e.target as HTMLInputElement).value);
}}
/>
</div>
`;
}
private renderVolumeTab() {
const sharedVolume = this.pipeline?.spec.sharedVolume;
const hasVolume = !!sharedVolume;
return html`
<div class="section-info">
<rh-icon set="ui" icon="information"></rh-icon>
A shared volume is mounted to all steps, allowing them to share files.
</div>
<div class="checkbox-row">
<input
type="checkbox"
id="enable-volume"
?checked=${hasVolume}
@change=${(e: Event) => {
this.toggleSharedVolume((e.target as HTMLInputElement).checked);
}}
/>
<label for="enable-volume">Enable shared volume</label>
</div>
${hasVolume
? html`
<div class="form-group">
<label for="volume-name">Volume Name</label>
<input
type="text"
id="volume-name"
.value=${sharedVolume?.name || 'workspace'}
@input=${(e: Event) => {
this.updateSharedVolume('name', (e.target as HTMLInputElement).value);
}}
/>
</div>
<div class="form-group">
<label for="mount-path">Mount Path</label>
<input
type="text"
id="mount-path"
.value=${sharedVolume?.mountPath || '/workspace'}
@input=${(e: Event) => {
this.updateSharedVolume('mountPath', (e.target as HTMLInputElement).value);
}}
/>
</div>
<div class="form-group">
<label for="volume-type">Volume Type</label>
<select
id="volume-type"
@change=${(e: Event) => {
const type = (e.target as HTMLSelectElement).value;
if (type === 'emptyDir') {
this.updateSharedVolume('emptyDir', {});
this.updateSharedVolume('persistentVolumeClaim', undefined);
} else {
this.updateSharedVolume('emptyDir', undefined);
this.updateSharedVolume('persistentVolumeClaim', { claimName: '' });
}
}}
>
<option value="emptyDir" ?selected=${!!sharedVolume?.emptyDir}>
EmptyDir (temporary)
</option>
<option value="pvc" ?selected=${!!sharedVolume?.persistentVolumeClaim}>
PersistentVolumeClaim
</option>
</select>
</div>
${sharedVolume?.persistentVolumeClaim !== undefined
? html`
<div class="form-group">
<label for="pvc-name">PVC Name</label>
<div class="pvc-selector-row">
${this.loadingPVCs
? html`
<select id="pvc-name" disabled>
<option>Loading PVCs...</option>
</select>
`
: this.availablePVCs.length > 0
? html`
<select
id="pvc-name"
@change=${(e: Event) => {
this.updateSharedVolume('persistentVolumeClaim', {
claimName: (e.target as HTMLSelectElement).value,
});
}}
>
<option
value=""
?selected=${!sharedVolume.persistentVolumeClaim?.claimName}
>
-- Select a PVC --
</option>
${this.availablePVCs.map(
pvc => html`
<option
value=${pvc.name}
?selected=${sharedVolume.persistentVolumeClaim?.claimName ===
pvc.name}
>
${pvc.name}${pvc.storage
? ` (${pvc.storage})`
: ''}${pvc.phase !== 'Bound' ? ` [${pvc.phase}]` : ''}
</option>
`
)}
</select>
`
: html`
<input
type="text"
id="pvc-name"
.value=${sharedVolume.persistentVolumeClaim?.claimName || ''}
placeholder="No PVCs found - enter name manually"
@input=${(e: Event) => {
this.updateSharedVolume('persistentVolumeClaim', {
claimName: (e.target as HTMLInputElement).value,
});
}}
/>
`}
<button
class="icon-btn ${this.loadingPVCs ? 'loading' : ''}"
@click=${() =>
this.fetchPVCs(this.pipeline?.metadata.namespace || 'default')}
title="Refresh PVC list"
aria-label="Refresh PVC list"
?disabled=${this.loadingPVCs}
>
<rh-icon set="ui" icon="sync"></rh-icon>
</button>
</div>
</div>
`
: ''}
`
: ''}
`;
}
private renderEnvTab() {
const envVars = this.getEnvVars();
return html`
<div class="section-info">
<rh-icon set="ui" icon="information"></rh-icon>
Environment variables injected into all step containers.
</div>
<div class="env-list">
${envVars.map(
(env, index) => html`
<div class="env-row">
<input
type="text"
placeholder="Name"
.value=${env.name}
@input=${(e: Event) =>
this.updateEnvVar(index, 'name', (e.target as HTMLInputElement).value)}
aria-label="Variable name"
/>
<input
type="text"
placeholder="Value"
.value=${env.value || ''}
@input=${(e: Event) =>
this.updateEnvVar(index, 'value', (e.target as HTMLInputElement).value)}
aria-label="Variable value"
/>
<button
class="icon-btn danger"
@click=${() => this.removeEnvVar(index)}
title="Remove variable"
aria-label="Remove variable"
>
<rh-icon set="ui" icon="trash"></rh-icon>
</button>
</div>
`
)}
<rh-button variant="secondary" @click=${this.addEnvVar}>
<rh-icon set="ui" icon="add-circle" slot="icon"></rh-icon>
Add Variable
</rh-button>
</div>
`;
}
}
declare global {
interface HTMLElementTagNameMap {
'global-settings': GlobalSettings;
}
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/components/builder/pipeline-canvas.ts | TypeScript | /**
* Pipeline Canvas - Visual pipeline builder with drag-and-drop
* Following RHDS patterns for cards and interactions
*/
import { LitElement, html, css } from 'lit';
import { customElement, state } from 'lit/decorators.js';
import { stringify as yamlStringify, parse as yamlParse } from 'yaml';
import type { Pipeline, PipelineStep } from '../../types/pipeline.js';
import { k8sClient } from '../../lib/k8s-client.js';
import { navigate } from '../../lib/router.js';
import {
createEmptyPipeline,
createDefaultStep,
validateStepName,
} from '../../lib/graph-layout.js';
type ViewMode = 'builder' | 'yaml';
interface CanvasStep extends PipelineStep {
x: number;
y: number;
}
@customElement('pipeline-canvas')
export class PipelineCanvas extends LitElement {
@state() private pipeline: Pipeline = createEmptyPipeline('new-pipeline');
@state() private canvasSteps: CanvasStep[] = [];
@state() private selectedStep: string | null = null;
@state() private showStepEditor = false;
@state() private showGlobalSettings = false;
@state() private saving = false;
@state() private error: string | null = null;
@state() private draggedStepIndex: number | null = null;
@state() private existingPipelineNames: Set<string> = new Set();
@state() private nameError: string | null = null;
@state() private viewMode: ViewMode = 'builder';
@state() private yamlContent = '';
@state() private yamlError: string | null = null;
private boundNamespaceHandler = this.handleNamespaceChange.bind(this);
static styles = css`
:host {
display: block;
}
.error-banner {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
padding: var(--rh-space-md, 16px);
background: var(--rh-color-red-100, #fce8e6);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-red-500, #c9190b);
border-radius: var(--rh-border-radius-default, 3px);
color: var(--rh-color-red-700, #a30d05);
margin-block-end: var(--rh-space-lg, 24px);
}
.error-banner rh-icon {
--rh-icon-size: 20px;
}
.header {
display: flex;
justify-content: space-between;
align-items: center;
margin-block-end: var(--rh-space-xl, 32px);
flex-wrap: wrap;
gap: var(--rh-space-md, 16px);
}
.header-left {
display: flex;
align-items: center;
gap: var(--rh-space-md, 16px);
}
.pipeline-name-group {
display: flex;
flex-direction: column;
gap: var(--rh-space-xs, 4px);
}
.pipeline-name-label {
display: flex;
align-items: center;
gap: var(--rh-space-xs, 4px);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-weight: var(--rh-font-weight-body-text-medium, 500);
color: var(--rh-color-text-primary-on-light, #151515);
}
.required-indicator {
color: var(--rh-color-red-500, #c9190b);
font-weight: var(--rh-font-weight-body-text-bold, 700);
}
.pipeline-name-input {
padding: var(--rh-space-sm, 8px);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
font-size: var(--rh-font-size-body-text-md, 1rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
font-weight: normal;
min-width: 250px;
transition: border-color 150ms ease;
}
.pipeline-name-input:focus {
outline: none;
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
box-shadow: 0 0 0 1px var(--rh-color-interactive-blue-darker, #0066cc);
}
.pipeline-name-input.error {
border-color: var(--rh-color-red-500, #c9190b);
}
.pipeline-name-input.error:focus {
box-shadow: 0 0 0 1px var(--rh-color-red-500, #c9190b);
}
.name-error-text {
font-size: var(--rh-font-size-body-text-xs, 0.75rem);
color: var(--rh-color-red-700, #a30d05);
}
.header-actions {
display: flex;
gap: var(--rh-space-sm, 8px);
align-items: center;
}
.view-switch {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
margin-inline-start: var(--rh-space-lg, 24px);
}
.view-switch-label {
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.yaml-editor-container {
flex: 1;
display: flex;
flex-direction: column;
background: var(--rh-color-surface-lightest, #ffffff);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
overflow: hidden;
}
.yaml-editor-container code-editor {
flex: 1;
min-height: 500px;
}
.yaml-error-banner {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
padding: var(--rh-space-sm, 8px) var(--rh-space-md, 16px);
background: var(--rh-color-orange-100, #fef3e2);
border-block-end: var(--rh-border-width-sm, 1px) solid var(--rh-color-orange-500, #f4b740);
color: var(--rh-color-orange-700, #8a5b00);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
}
.yaml-error-banner rh-icon {
--rh-icon-size: 16px;
flex-shrink: 0;
}
.workspace {
display: grid;
grid-template-columns: 280px 1fr;
gap: var(--rh-space-lg, 24px);
min-height: 600px;
}
@media (max-width: 900px) {
.workspace {
grid-template-columns: 1fr;
}
}
.sidebar {
background: var(--rh-color-surface-lighter, #f5f5f5);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
padding: var(--rh-space-lg, 24px);
}
.sidebar h3 {
margin: 0 0 var(--rh-space-md, 16px) 0;
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-weight: var(--rh-font-weight-heading-medium, 500);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
text-transform: uppercase;
letter-spacing: 0.5px;
}
.step-templates {
display: flex;
flex-direction: column;
gap: var(--rh-space-sm, 8px);
}
.step-template {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
padding: var(--rh-space-md, 16px);
background: var(--rh-color-surface-lightest, #ffffff);
border: var(--rh-border-width-sm, 1px) dashed var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
cursor: grab;
transition: all 150ms ease;
}
.step-template:hover {
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
background: var(--rh-color-blue-50, #e7f1fa);
}
.step-template:active {
cursor: grabbing;
}
.step-template:focus-visible {
outline: var(--rh-border-width-md, 2px) solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: 2px;
}
.step-template-icon {
display: flex;
align-items: center;
justify-content: center;
width: 40px;
height: 40px;
background: var(--rh-color-surface-lighter, #f5f5f5);
border-radius: var(--rh-border-radius-default, 3px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.step-template-icon rh-icon {
--rh-icon-size: 20px;
}
.step-template-info h4 {
margin: 0;
font-size: var(--rh-font-size-body-text-md, 1rem);
font-weight: var(--rh-font-weight-body-text-medium, 500);
}
.step-template-info p {
margin: 0;
font-size: var(--rh-font-size-body-text-xs, 0.75rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.canvas {
background: var(--rh-color-surface-lightest, #ffffff);
border: 2px dashed var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
padding: var(--rh-space-lg, 24px);
min-height: 500px;
position: relative;
transition:
border-color 150ms ease,
background-color 150ms ease;
}
.canvas.drag-over {
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
background: var(--rh-color-blue-50, #e7f1fa);
}
.canvas-empty {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
height: 100%;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
text-align: center;
gap: var(--rh-space-md, 16px);
}
.canvas-empty rh-icon {
--rh-icon-size: 48px;
color: var(--rh-color-gray-40, #8a8d90);
}
.steps-list {
display: flex;
flex-direction: column;
gap: var(--rh-space-md, 16px);
}
.step-card {
display: flex;
align-items: center;
gap: var(--rh-space-md, 16px);
padding: var(--rh-space-md, 16px);
background: var(--rh-color-surface-lightest, #ffffff);
border: 2px solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
cursor: pointer;
transition: all 150ms ease;
}
.step-card:hover {
box-shadow: var(--rh-box-shadow-md, 0 4px 6px -1px rgba(21, 21, 21, 0.1));
}
.step-card:focus-visible {
outline: var(--rh-border-width-md, 2px) solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: 2px;
}
.step-card.selected {
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
}
.step-card.dragging {
opacity: 0.5;
}
.step-drag-handle {
display: flex;
align-items: center;
justify-content: center;
cursor: grab;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
padding: var(--rh-space-xs, 4px);
}
.step-drag-handle:active {
cursor: grabbing;
}
.step-drag-handle rh-icon {
--rh-icon-size: 16px;
}
.step-info {
flex: 1;
}
.step-name {
font-weight: var(--rh-font-weight-body-text-medium, 500);
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
margin-block-end: 2px;
}
.step-image {
font-size: var(--rh-font-size-body-text-xs, 0.75rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.step-actions {
display: flex;
gap: var(--rh-space-xs, 4px);
}
.step-action-btn {
display: inline-flex;
align-items: center;
justify-content: center;
padding: var(--rh-space-xs, 4px);
background: none;
border: none;
cursor: pointer;
border-radius: var(--rh-border-radius-default, 3px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
transition:
background-color 150ms ease,
color 150ms ease;
}
.step-action-btn:hover {
background: var(--rh-color-surface-lighter, #f5f5f5);
color: var(--rh-color-text-primary-on-light, #151515);
}
.step-action-btn:focus-visible {
outline: var(--rh-border-width-md, 2px) solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: 2px;
}
.step-action-btn.delete:hover {
background: var(--rh-color-red-100, #fce8e6);
color: var(--rh-color-red-700, #a30d05);
}
.step-action-btn rh-icon {
--rh-icon-size: 16px;
}
.step-connector {
display: flex;
justify-content: center;
padding: var(--rh-space-xs, 4px) 0;
}
.step-connector-line {
width: 2px;
height: 20px;
background: var(--rh-color-border-subtle-on-light, #d2d2d2);
}
`;
connectedCallback() {
super.connectedCallback();
// Get current namespace from app-shell's namespace selector
const currentNamespace = this.getCurrentNamespaceFromShell();
if (currentNamespace && currentNamespace !== 'default') {
this.pipeline = createEmptyPipeline('new-pipeline', currentNamespace);
}
// Listen for namespace changes from the app-shell
window.addEventListener('namespace-change', this.boundNamespaceHandler);
this.loadPipelineData();
}
disconnectedCallback() {
super.disconnectedCallback();
window.removeEventListener('namespace-change', this.boundNamespaceHandler);
}
private getCurrentNamespaceFromShell(): string {
// Find the app-shell and get its current namespace from the select element
const appShell = document.querySelector('app-shell');
if (appShell?.shadowRoot) {
const namespaceSelect = appShell.shadowRoot.querySelector(
'.namespace-select'
) as HTMLSelectElement;
if (namespaceSelect) {
return namespaceSelect.value;
}
}
return 'default';
}
private handleNamespaceChange(e: Event) {
const customEvent = e as CustomEvent<{ namespace: string }>;
const newNamespace = customEvent.detail.namespace;
// Only update if the pipeline hasn't been modified yet (still has default name pattern)
// and we don't have any steps yet (fresh builder state)
if (this.canvasSteps.length === 0) {
this.pipeline = {
...this.pipeline,
metadata: {
...this.pipeline.metadata,
namespace: newNamespace,
},
};
// Reload existing pipeline names for the new namespace
this.loadExistingPipelines();
}
}
private async loadPipelineData() {
// Check if there's a copied pipeline in sessionStorage
const copiedPipelineJson = sessionStorage.getItem('pipeline-copy');
if (copiedPipelineJson) {
sessionStorage.removeItem('pipeline-copy');
try {
const copiedPipeline = JSON.parse(copiedPipelineJson) as Pipeline;
await this.loadFromCopiedPipeline(copiedPipeline);
return;
} catch (e) {
console.warn('Failed to parse copied pipeline:', e);
}
}
// Otherwise, initialize a new pipeline
await this.loadExistingPipelines();
}
private async loadFromCopiedPipeline(copiedPipeline: Pipeline) {
try {
const namespace = copiedPipeline.metadata.namespace || 'default';
const pipelines = await k8sClient.listPipelines(namespace);
this.existingPipelineNames = new Set(pipelines.map(p => p.metadata.name));
// Generate a unique name based on the copied pipeline name
const baseName = copiedPipeline.metadata.name;
let uniqueName = baseName;
let counter = 1;
while (this.existingPipelineNames.has(uniqueName)) {
counter++;
uniqueName = `${baseName.replace(/-copy(-\d+)?$/, '')}-copy-${counter}`;
}
// Set the pipeline with the unique name
this.pipeline = {
...copiedPipeline,
metadata: {
...copiedPipeline.metadata,
name: uniqueName,
},
};
// Load steps into the canvas
if (copiedPipeline.spec.steps && copiedPipeline.spec.steps.length > 0) {
this.canvasSteps = copiedPipeline.spec.steps.map((step, index) => ({
...step,
x: 0,
y: index * 120,
}));
}
} catch (e) {
console.warn('Failed to load pipeline names:', e);
// Still load the copied pipeline even if we can't check names
this.pipeline = copiedPipeline;
if (copiedPipeline.spec.steps && copiedPipeline.spec.steps.length > 0) {
this.canvasSteps = copiedPipeline.spec.steps.map((step, index) => ({
...step,
x: 0,
y: index * 120,
}));
}
}
}
private async loadExistingPipelines() {
try {
// Use the pipeline's current namespace (which may have been set from the shell)
const namespace = this.pipeline.metadata.namespace || this.getCurrentNamespaceFromShell();
const pipelines = await k8sClient.listPipelines(namespace);
this.existingPipelineNames = new Set(pipelines.map(p => p.metadata.name));
// Generate a unique name
const baseName = 'new-pipeline';
let uniqueName = baseName;
let counter = 1;
while (this.existingPipelineNames.has(uniqueName)) {
counter++;
uniqueName = `${baseName}-${counter}`;
}
this.pipeline = {
...this.pipeline,
metadata: {
...this.pipeline.metadata,
name: uniqueName,
namespace,
},
};
} catch (e) {
// If we can't load pipelines, just use default name
console.warn('Failed to load existing pipelines:', e);
}
}
private validatePipelineName(name: string): string | null {
const validation = validateStepName(name);
if (!validation.valid) {
return validation.error || 'Invalid name';
}
if (this.existingPipelineNames.has(name)) {
return `A pipeline named "${name}" already exists in this namespace`;
}
return null;
}
private updatePipelineName(name: string) {
this.nameError = this.validatePipelineName(name);
this.pipeline = {
...this.pipeline,
metadata: {
...this.pipeline.metadata,
name,
},
};
}
private getStepImage(step: PipelineStep): string {
return step.jobSpec.template.spec.containers[0]?.image || 'default';
}
private addStep(type: 'bash' | 'python' | 'kubectl' | 'custom') {
const stepNumber = this.canvasSteps.length + 1;
const name = `step-${stepNumber}`;
const newStep = createDefaultStep(name);
const container = newStep.jobSpec.template.spec.containers[0];
// Customize based on type
switch (type) {
case 'bash':
container.image = 'registry.access.redhat.com/ubi9/ubi-minimal:latest';
container.command = ['sh', '-c'];
container.args = ['echo "Hello from bash step"'];
break;
case 'python':
container.image = 'registry.access.redhat.com/ubi9/python-311:latest';
container.command = ['python', '-c'];
container.args = ['print("Hello from Python step")'];
break;
case 'kubectl':
container.image = 'bitnami/kubectl:latest';
container.command = ['sh', '-c'];
container.args = ['kubectl version --client'];
break;
case 'custom':
container.image = '';
container.command = [];
container.args = [];
break;
}
const canvasStep: CanvasStep = {
...newStep,
x: 100,
y: this.canvasSteps.length * 120 + 50,
};
this.canvasSteps = [...this.canvasSteps, canvasStep];
this.updatePipelineFromCanvas();
this.selectedStep = name;
this.showStepEditor = true;
}
private removeStep(name: string) {
this.canvasSteps = this.canvasSteps.filter(s => s.name !== name);
this.updatePipelineFromCanvas();
if (this.selectedStep === name) {
this.selectedStep = null;
this.showStepEditor = false;
}
}
private selectStep(name: string) {
this.selectedStep = name;
this.showStepEditor = true;
this.showGlobalSettings = false;
}
private closeDrawer() {
this.showStepEditor = false;
this.showGlobalSettings = false;
this.selectedStep = null;
}
private updatePipelineFromCanvas() {
this.pipeline = {
...this.pipeline,
spec: {
...this.pipeline.spec,
steps: this.canvasSteps.map(({ x: _x, y: _y, ...step }) => step),
},
};
}
private updateStep(name: string, updates: Partial<PipelineStep>) {
this.canvasSteps = this.canvasSteps.map(step =>
step.name === name ? { ...step, ...updates } : step
);
this.updatePipelineFromCanvas();
}
private handleDragStart(e: DragEvent, index: number) {
this.draggedStepIndex = index;
(e.target as HTMLElement).classList.add('dragging');
}
private handleDragEnd(e: DragEvent) {
this.draggedStepIndex = null;
(e.target as HTMLElement).classList.remove('dragging');
}
private handleDragOver(e: DragEvent, index: number) {
e.preventDefault();
if (this.draggedStepIndex === null || this.draggedStepIndex === index) return;
const steps = [...this.canvasSteps];
const [draggedStep] = steps.splice(this.draggedStepIndex, 1);
steps.splice(index, 0, draggedStep);
this.canvasSteps = steps;
this.draggedStepIndex = index;
this.updatePipelineFromCanvas();
}
private handleCanvasDragOver(e: DragEvent) {
e.preventDefault();
(e.currentTarget as HTMLElement).classList.add('drag-over');
}
private handleCanvasDragLeave(e: DragEvent) {
(e.currentTarget as HTMLElement).classList.remove('drag-over');
}
private handleCanvasDrop(e: DragEvent) {
e.preventDefault();
(e.currentTarget as HTMLElement).classList.remove('drag-over');
// Check if dropping a template
const templateType = e.dataTransfer?.getData('template-type');
if (templateType) {
this.addStep(templateType as 'bash' | 'python' | 'kubectl' | 'custom');
}
}
private handleTemplateDragStart(e: DragEvent, type: string) {
e.dataTransfer?.setData('template-type', type);
}
private pipelineToYaml(): string {
// Update pipeline steps from canvas before converting
this.updatePipelineFromCanvas();
return yamlStringify(this.pipeline, {
indent: 2,
lineWidth: 0,
defaultKeyType: 'PLAIN',
defaultStringType: 'QUOTE_DOUBLE',
});
}
private yamlToPipeline(yamlStr: string): Pipeline | null {
try {
const parsed = yamlParse(yamlStr) as Pipeline;
// Validate basic structure
if (!parsed || typeof parsed !== 'object') {
throw new Error('Invalid YAML: must be an object');
}
// Ensure required fields
if (!parsed.apiVersion) {
parsed.apiVersion = 'pipeline.yaacov.io/v1';
}
if (!parsed.kind) {
parsed.kind = 'Pipeline';
}
if (!parsed.metadata) {
parsed.metadata = { name: 'new-pipeline' };
}
if (!parsed.spec) {
parsed.spec = { steps: [] };
}
if (!parsed.spec.steps) {
parsed.spec.steps = [];
}
this.yamlError = null;
return parsed;
} catch (e) {
this.yamlError = e instanceof Error ? e.message : 'Invalid YAML';
return null;
}
}
private switchToYamlView() {
// Sync current builder state to YAML
this.yamlContent = this.pipelineToYaml();
this.yamlError = null;
this.viewMode = 'yaml';
// Close any open drawers
this.showStepEditor = false;
this.showGlobalSettings = false;
}
private switchToBuilderView() {
// Try to parse YAML and update builder state
const parsed = this.yamlToPipeline(this.yamlContent);
if (parsed) {
this.pipeline = parsed;
// Rebuild canvas steps from pipeline
this.canvasSteps = (parsed.spec.steps || []).map((step, index) => ({
...step,
x: 0,
y: index * 120,
}));
this.viewMode = 'builder';
}
// If parsing fails, yamlError is set and user stays in YAML view
}
private handleYamlChange(e: CustomEvent) {
this.yamlContent = e.detail.value;
// Try to parse to validate and clear/set error
this.yamlToPipeline(this.yamlContent);
}
private async savePipeline() {
let pipelineToSave: Pipeline;
// If in YAML mode, parse YAML as source of truth
if (this.viewMode === 'yaml') {
const parsed = this.yamlToPipeline(this.yamlContent);
if (!parsed) {
this.error = `Cannot save: ${this.yamlError}`;
return;
}
pipelineToSave = parsed;
} else {
// In builder mode, use the current pipeline state
this.updatePipelineFromCanvas();
pipelineToSave = this.pipeline;
}
// Validate name
const nameValidationError = this.validatePipelineName(pipelineToSave.metadata.name);
if (nameValidationError) {
this.nameError = nameValidationError;
this.error = `Pipeline name: ${nameValidationError}`;
return;
}
if (!pipelineToSave.spec.steps || pipelineToSave.spec.steps.length === 0) {
this.error = 'Pipeline must have at least one step';
return;
}
this.saving = true;
this.error = null;
try {
const namespace = pipelineToSave.metadata.namespace || 'default';
await k8sClient.createPipeline(namespace, pipelineToSave);
navigate(`/pipelines/${namespace}/${pipelineToSave.metadata.name}`);
} catch (e) {
this.error = e instanceof Error ? e.message : 'Failed to save pipeline';
} finally {
this.saving = false;
}
}
render() {
return html`
${this.error
? html`
<div class="error-banner" role="alert">
<rh-icon set="ui" icon="error-fill"></rh-icon>
${this.error}
</div>
`
: ''}
<header class="header">
<div class="header-left">
<div class="pipeline-name-group">
<label class="pipeline-name-label" for="pipeline-name">
Pipeline Name <span class="required-indicator">*</span>
</label>
<input
type="text"
id="pipeline-name"
class="pipeline-name-input ${this.nameError ? 'error' : ''}"
.value=${this.pipeline.metadata.name}
@input=${(e: Event) => this.updatePipelineName((e.target as HTMLInputElement).value)}
placeholder="my-pipeline"
required
aria-required="true"
aria-invalid=${this.nameError ? 'true' : 'false'}
aria-describedby=${this.nameError ? 'name-error' : ''}
/>
${this.nameError
? html`
<span id="name-error" class="name-error-text" role="alert"
>${this.nameError}</span
>
`
: ''}
</div>
<div class="view-switch">
<rh-switch
?checked=${this.viewMode === 'yaml'}
@change=${(e: Event) => {
const checked = (e.target as HTMLInputElement).checked;
if (checked) {
this.switchToYamlView();
} else {
this.switchToBuilderView();
}
}}
aria-label="Toggle between Builder and YAML view"
></rh-switch>
<span class="view-switch-label">YAML</span>
</div>
</div>
<div class="header-actions">
${this.viewMode === 'builder'
? html`
<rh-button
variant="secondary"
@click=${() => {
this.showGlobalSettings = true;
this.showStepEditor = false;
}}
>
<rh-icon set="ui" icon="settings" slot="icon"></rh-icon>
Settings
</rh-button>
`
: ''}
<rh-button
?disabled=${this.saving || (this.viewMode === 'yaml' && !!this.yamlError)}
@click=${this.savePipeline}
>
<rh-icon set="ui" icon="save" slot="icon"></rh-icon>
${this.saving ? 'Saving...' : 'Save Pipeline'}
</rh-button>
</div>
</header>
${this.viewMode === 'yaml'
? html`
<div class="yaml-editor-container">
${this.yamlError
? html`
<div class="yaml-error-banner">
<rh-icon set="ui" icon="warning"></rh-icon>
<span>${this.yamlError}</span>
</div>
`
: ''}
<code-editor
.value=${this.yamlContent}
language="yaml"
.showLanguageSelector=${false}
minHeight="600px"
@change=${this.handleYamlChange}
></code-editor>
</div>
`
: html`
<div class="workspace">
<!-- Left sidebar - Step templates -->
<aside class="sidebar">
<h3>Add Step</h3>
<div class="step-templates">
<div
class="step-template"
draggable="true"
tabindex="0"
role="button"
@dragstart=${(e: DragEvent) => this.handleTemplateDragStart(e, 'bash')}
@click=${() => this.addStep('bash')}
@keydown=${(e: KeyboardEvent) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault();
this.addStep('bash');
}
}}
>
<span class="step-template-icon">
<rh-icon set="standard" icon="command-line"></rh-icon>
</span>
<div class="step-template-info">
<h4>Bash Runner</h4>
<p>UBI Minimal</p>
</div>
</div>
<div
class="step-template"
draggable="true"
tabindex="0"
role="button"
@dragstart=${(e: DragEvent) => this.handleTemplateDragStart(e, 'python')}
@click=${() => this.addStep('python')}
@keydown=${(e: KeyboardEvent) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault();
this.addStep('python');
}
}}
>
<span class="step-template-icon">
<rh-icon set="ui" icon="code"></rh-icon>
</span>
<div class="step-template-info">
<h4>Python Runner</h4>
<p>UBI Python 3.11</p>
</div>
</div>
<div
class="step-template"
draggable="true"
tabindex="0"
role="button"
@dragstart=${(e: DragEvent) => this.handleTemplateDragStart(e, 'kubectl')}
@click=${() => this.addStep('kubectl')}
@keydown=${(e: KeyboardEvent) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault();
this.addStep('kubectl');
}
}}
>
<span class="step-template-icon">
<rh-icon set="ui" icon="kubernetes-service"></rh-icon>
</span>
<div class="step-template-info">
<h4>Kubectl Step</h4>
<p>Kubernetes CLI</p>
</div>
</div>
<div
class="step-template"
draggable="true"
tabindex="0"
role="button"
@dragstart=${(e: DragEvent) => this.handleTemplateDragStart(e, 'custom')}
@click=${() => this.addStep('custom')}
@keydown=${(e: KeyboardEvent) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault();
this.addStep('custom');
}
}}
>
<span class="step-template-icon">
<rh-icon set="ui" icon="puzzle-piece"></rh-icon>
</span>
<div class="step-template-info">
<h4>Custom Step</h4>
<p>Set your own image</p>
</div>
</div>
</div>
</aside>
<!-- Center - Canvas -->
<div
class="canvas"
@dragover=${this.handleCanvasDragOver}
@dragleave=${this.handleCanvasDragLeave}
@drop=${this.handleCanvasDrop}
>
${this.canvasSteps.length === 0
? html`
<div class="canvas-empty">
<rh-icon set="standard" icon="data"></rh-icon>
<p>Drag steps here or click to add</p>
</div>
`
: html`
<div class="steps-list" role="list">
${this.canvasSteps.map(
(step, index) => html`
${index > 0
? html`
<div class="step-connector" aria-hidden="true">
<div class="step-connector-line"></div>
</div>
`
: ''}
<article
class="step-card ${this.selectedStep === step.name ? 'selected' : ''}"
role="listitem"
tabindex="0"
draggable="true"
@dragstart=${(e: DragEvent) => this.handleDragStart(e, index)}
@dragend=${this.handleDragEnd}
@dragover=${(e: DragEvent) => this.handleDragOver(e, index)}
@click=${() => this.selectStep(step.name)}
@keydown=${(e: KeyboardEvent) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault();
this.selectStep(step.name);
}
}}
>
<span class="step-drag-handle" aria-label="Drag to reorder">
<rh-icon set="ui" icon="grip-horizontal"></rh-icon>
</span>
<div class="step-info">
<div class="step-name">${step.name}</div>
<div class="step-image">${this.getStepImage(step)}</div>
</div>
<div class="step-actions">
<button
class="step-action-btn"
title="Edit step"
aria-label="Edit ${step.name}"
@click=${(e: Event) => {
e.stopPropagation();
this.selectStep(step.name);
}}
>
<rh-icon set="ui" icon="edit"></rh-icon>
</button>
<button
class="step-action-btn delete"
title="Delete step"
aria-label="Delete ${step.name}"
@click=${(e: Event) => {
e.stopPropagation();
this.removeStep(step.name);
}}
>
<rh-icon set="ui" icon="trash"></rh-icon>
</button>
</div>
</article>
`
)}
</div>
`}
</div>
</div>
`}
<!-- Side Drawer for Global Settings -->
<side-drawer
?open=${this.showGlobalSettings}
heading="Pipeline Settings"
@close=${this.closeDrawer}
>
<global-settings
.pipeline=${this.pipeline}
@update=${(e: CustomEvent) => {
this.pipeline = e.detail.pipeline;
}}
></global-settings>
</side-drawer>
<!-- Side Drawer for Step Editor -->
<side-drawer
?open=${this.showStepEditor && !!this.selectedStep}
heading="Edit Step"
.showWidthToggle=${true}
@close=${this.closeDrawer}
>
${this.selectedStep
? html`
<step-editor
.step=${this.canvasSteps.find(s => s.name === this.selectedStep)}
.allSteps=${this.canvasSteps.map(s => s.name)}
.namespace=${this.pipeline.metadata.namespace || 'default'}
@update=${(e: CustomEvent) => this.updateStep(this.selectedStep!, e.detail)}
@delete=${() => {
this.removeStep(this.selectedStep!);
this.closeDrawer();
}}
@close=${this.closeDrawer}
></step-editor>
`
: ''}
</side-drawer>
`;
}
}
declare global {
interface HTMLElementTagNameMap {
'pipeline-canvas': PipelineCanvas;
}
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/components/builder/step-editor.ts | TypeScript | /**
* Step Editor - Edit step configuration
* Following RHDS form patterns
*/
import { LitElement, html, css } from 'lit';
import { customElement, property, state } from 'lit/decorators.js';
import type { PipelineStep, EnvVar, EnvFromSource } from '../../types/pipeline.js';
import { validateStepName } from '../../lib/graph-layout.js';
import { k8sClient } from '../../lib/k8s-client.js';
import type { EditorLanguage } from '../shared/code-editor.js';
interface SecretInfo {
name: string;
keys: string[];
}
@customElement('step-editor')
export class StepEditor extends LitElement {
@property({ type: Object }) step?: PipelineStep;
@property({ type: Array }) allSteps: string[] = [];
@property({ type: String }) namespace = 'default';
@state() private nameError: string | null = null;
@state() private showAdvanced = false;
@state() private availableSecrets: SecretInfo[] = [];
@state() private loadingSecrets = false;
@state() private editorLanguage: EditorLanguage = 'bash';
static styles = css`
:host {
display: flex;
flex-direction: column;
height: 100%;
width: 100%;
min-height: 0;
box-sizing: border-box;
}
.form-group {
margin-block-end: var(--rh-space-md, 16px);
width: 100%;
box-sizing: border-box;
}
.form-group.flex-grow {
flex: 1;
display: flex;
flex-direction: column;
min-height: 0;
}
.form-group.flex-grow textarea {
flex: 1;
min-height: 200px;
}
label {
display: block;
margin-block-end: var(--rh-space-xs, 4px);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-weight: var(--rh-font-weight-body-text-medium, 500);
color: var(--rh-color-text-primary-on-light, #151515);
}
.label-optional {
font-weight: normal;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
input[type='text'],
input[type='number'],
textarea,
select {
width: 100%;
padding: var(--rh-space-sm, 8px);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
transition: border-color 150ms ease;
box-sizing: border-box;
}
input:focus,
textarea:focus,
select:focus {
outline: none;
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
box-shadow: 0 0 0 1px var(--rh-color-interactive-blue-darker, #0066cc);
}
input.error {
border-color: var(--rh-color-red-500, #c9190b);
}
input.error:focus {
box-shadow: 0 0 0 1px var(--rh-color-red-500, #c9190b);
}
.error-text {
font-size: var(--rh-font-size-body-text-xs, 0.75rem);
color: var(--rh-color-red-700, #a30d05);
margin-block-start: var(--rh-space-xs, 4px);
}
textarea {
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
min-height: 120px;
resize: vertical;
}
.section-header {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
margin: var(--rh-space-lg, 24px) 0 var(--rh-space-md, 16px) 0;
padding-block-end: var(--rh-space-sm, 8px);
border-block-end: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.section-header h4 {
margin: 0;
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
font-size: var(--rh-font-size-body-text-md, 1rem);
font-weight: var(--rh-font-weight-heading-medium, 500);
}
.env-list {
display: flex;
flex-direction: column;
gap: var(--rh-space-sm, 8px);
}
.env-row {
display: grid;
grid-template-columns: 1fr 1fr auto;
gap: var(--rh-space-sm, 8px);
align-items: start;
}
.env-row input {
width: 100%;
}
.icon-btn {
display: inline-flex;
align-items: center;
justify-content: center;
padding: var(--rh-space-sm, 8px);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
background: var(--rh-color-surface-lightest, #ffffff);
cursor: pointer;
transition: all 150ms ease;
}
.icon-btn:hover {
background: var(--rh-color-surface-lighter, #f5f5f5);
}
.icon-btn:focus-visible {
outline: var(--rh-border-width-md, 2px) solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: 2px;
}
.icon-btn.danger:hover {
background: var(--rh-color-red-100, #fce8e6);
border-color: var(--rh-color-red-500, #c9190b);
color: var(--rh-color-red-700, #a30d05);
}
.icon-btn rh-icon {
--rh-icon-size: 16px;
}
/* Custom accordion styling */
.accordion {
margin-block-start: var(--rh-space-lg, 24px);
width: 100%;
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
overflow: hidden;
box-sizing: border-box;
}
.accordion-header {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
width: 100%;
padding: var(--rh-space-sm, 8px) var(--rh-space-md, 16px);
background: var(--rh-color-surface-lighter, #f5f5f5);
border: none;
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
font-weight: var(--rh-font-weight-body-text-medium, 500);
color: var(--rh-color-text-primary-on-light, #151515);
cursor: pointer;
text-align: left;
}
.accordion-header:hover {
background: var(--rh-color-surface-light, #e0e0e0);
}
.accordion-header:focus {
outline: none;
}
.accordion-header .chevron {
display: inline-block;
width: 0;
height: 0;
border-top: 5px solid transparent;
border-bottom: 5px solid transparent;
border-left: 6px solid currentColor;
transition: transform 150ms ease;
}
.accordion-header .chevron.open {
transform: rotate(90deg);
}
.accordion-content {
padding: var(--rh-space-md, 16px);
border-block-start: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.accordion-content .form-group {
margin-block-end: var(--rh-space-md, 16px);
}
.accordion-content .form-group:last-child {
margin-block-end: 0;
}
.runif-section {
padding: var(--rh-space-md, 16px);
background: var(--rh-color-surface-lighter, #f5f5f5);
border-radius: var(--rh-border-radius-default, 3px);
margin-block-start: var(--rh-space-sm, 8px);
}
.checkbox-group {
display: flex;
flex-wrap: wrap;
gap: var(--rh-space-sm, 8px);
}
.checkbox-label {
display: inline-flex;
align-items: center;
gap: var(--rh-space-xs, 4px);
padding: var(--rh-space-xs, 4px) var(--rh-space-sm, 8px);
background: var(--rh-color-surface-lightest, #ffffff);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
cursor: pointer;
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
transition: all 150ms ease;
}
.checkbox-label:hover {
background: var(--rh-color-surface-light, #e0e0e0);
}
.checkbox-label:has(input:checked) {
background: var(--rh-color-blue-50, #e7f1fa);
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
}
.checkbox-label input {
width: auto;
margin: 0;
}
.radio-group {
display: flex;
gap: var(--rh-space-md, 16px);
}
.radio-label {
display: inline-flex;
align-items: center;
gap: var(--rh-space-xs, 4px);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
cursor: pointer;
}
.radio-label input {
width: auto;
margin: 0;
}
.actions-section {
display: flex;
justify-content: space-between;
gap: var(--rh-space-md, 16px);
margin-block-start: var(--rh-space-xl, 32px);
padding-block-start: var(--rh-space-lg, 24px);
border-block-start: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.secret-selector-row {
display: flex;
gap: var(--rh-space-sm, 8px);
align-items: stretch;
}
.secret-selector-row select {
flex: 1;
}
.secret-selector-row .icon-btn {
flex-shrink: 0;
}
.secret-selector-row .icon-btn.loading {
opacity: 0.6;
pointer-events: none;
}
@keyframes spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
.secret-selector-row .icon-btn.loading rh-icon {
animation: spin 1s linear infinite;
}
.envfrom-list {
display: flex;
flex-direction: column;
gap: var(--rh-space-sm, 8px);
}
.envfrom-item {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
padding: var(--rh-space-sm, 8px);
background: var(--rh-color-surface-lighter, #f5f5f5);
border-radius: var(--rh-border-radius-default, 3px);
}
.envfrom-item rh-tag {
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
}
.envfrom-item .secret-name {
flex: 1;
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
}
`;
connectedCallback() {
super.connectedCallback();
this.fetchSecrets();
// Listen for namespace changes
window.addEventListener('namespace-change', this.handleNamespaceChange);
}
disconnectedCallback() {
super.disconnectedCallback();
window.removeEventListener('namespace-change', this.handleNamespaceChange);
}
updated(changedProperties: Map<string, unknown>) {
// Auto-detect language when step changes
if (changedProperties.has('step') && this.step) {
this.editorLanguage = this.detectLanguageFromCommand();
}
}
private handleNamespaceChange = ((e: CustomEvent) => {
this.namespace = e.detail.namespace;
this.fetchSecrets();
}) as EventListener;
private async fetchSecrets() {
this.loadingSecrets = true;
try {
const secrets = await k8sClient.listSecrets(this.namespace);
// Only show Opaque secrets (user-created, not system secrets)
this.availableSecrets = secrets
.filter(s => s.type === 'Opaque')
.map(s => ({
name: s.metadata.name,
keys: s.data ? Object.keys(s.data) : [],
}));
} catch (error) {
console.error('Failed to fetch secrets:', error);
this.availableSecrets = [];
} finally {
this.loadingSecrets = false;
}
}
private getContainer() {
return this.step?.jobSpec.template.spec.containers[0];
}
private isScriptMode(): boolean {
const command = this.getContainer()?.command || [];
return command.length > 0 && command[command.length - 1] === '-c';
}
private detectLanguageFromCommand(): EditorLanguage {
const command = this.getContainer()?.command || [];
const commandStr = command.join(' ').toLowerCase();
if (commandStr.includes('python')) return 'python';
if (commandStr.includes('node') || commandStr.includes('javascript')) return 'javascript';
if (commandStr.includes('sh') || commandStr.includes('bash') || commandStr.includes('kubectl'))
return 'bash';
return 'bash'; // Default to bash
}
private handleEditorLanguageChange(e: CustomEvent) {
this.editorLanguage = e.detail.language;
}
private dispatchUpdate(updates: Partial<PipelineStep>) {
this.dispatchEvent(new CustomEvent('update', { detail: updates }));
}
private updateName(name: string) {
const validation = validateStepName(name);
this.nameError = validation.valid ? null : validation.error || null;
this.dispatchUpdate({ name });
}
private updateImage(image: string) {
if (!this.step) return;
const containers = [...(this.step.jobSpec.template.spec.containers || [])];
if (containers.length > 0) {
containers[0] = { ...containers[0], image };
}
this.dispatchUpdate({
jobSpec: {
...this.step.jobSpec,
template: {
...this.step.jobSpec.template,
spec: {
...this.step.jobSpec.template.spec,
containers,
},
},
},
});
}
private updateCommand(commandStr: string) {
if (!this.step) return;
// Parse command string into array (split by spaces, respecting quotes would be ideal but keeping simple)
const command = commandStr.trim() ? commandStr.split(/\s+/) : [];
const containers = [...(this.step.jobSpec.template.spec.containers || [])];
if (containers.length > 0) {
containers[0] = {
...containers[0],
command,
};
}
this.dispatchUpdate({
jobSpec: {
...this.step.jobSpec,
template: {
...this.step.jobSpec.template,
spec: {
...this.step.jobSpec.template.spec,
containers,
},
},
},
});
}
private updateArgs(argsStr: string) {
if (!this.step) return;
const container = this.getContainer();
const command = container?.command || [];
// Check if command ends with -c (e.g., "sh -c" or "python -c")
// In this case, the entire script should be a single argument
const isScriptMode = command.length > 0 && command[command.length - 1] === '-c';
let args: string[];
if (isScriptMode) {
// For script mode, keep the entire text as a single argument (preserving newlines)
const script = argsStr.trim();
args = script ? [script] : [];
} else {
// Otherwise, each line becomes a separate arg
const lines = argsStr.split('\n').filter(l => l.trim());
args = lines.length > 0 ? lines : [];
}
const containers = [...(this.step.jobSpec.template.spec.containers || [])];
if (containers.length > 0) {
containers[0] = {
...containers[0],
args,
};
}
this.dispatchUpdate({
jobSpec: {
...this.step.jobSpec,
template: {
...this.step.jobSpec.template,
spec: {
...this.step.jobSpec.template.spec,
containers,
},
},
},
});
}
private getEnvVars(): EnvVar[] {
return this.getContainer()?.env || [];
}
private getEnvFrom(): EnvFromSource[] {
return this.getContainer()?.envFrom || [];
}
private updateEnvFrom(envFrom: EnvFromSource[]) {
if (!this.step) return;
const containers = [...(this.step.jobSpec.template.spec.containers || [])];
if (containers.length > 0) {
containers[0] = {
...containers[0],
envFrom: envFrom.length > 0 ? envFrom : undefined,
};
}
this.dispatchUpdate({
jobSpec: {
...this.step.jobSpec,
template: {
...this.step.jobSpec.template,
spec: {
...this.step.jobSpec.template.spec,
containers,
},
},
},
});
}
private addSecretEnvFrom(secretName: string) {
if (!secretName) return;
const currentEnvFrom = this.getEnvFrom();
// Check if already added
if (currentEnvFrom.some(e => e.secretRef?.name === secretName)) return;
const newEnvFrom: EnvFromSource[] = [...currentEnvFrom, { secretRef: { name: secretName } }];
this.updateEnvFrom(newEnvFrom);
}
private removeSecretEnvFrom(secretName: string) {
const currentEnvFrom = this.getEnvFrom();
const newEnvFrom = currentEnvFrom.filter(e => e.secretRef?.name !== secretName);
this.updateEnvFrom(newEnvFrom);
}
private updateEnvVars(envVars: EnvVar[]) {
if (!this.step) return;
const containers = [...(this.step.jobSpec.template.spec.containers || [])];
if (containers.length > 0) {
containers[0] = { ...containers[0], env: envVars };
}
this.dispatchUpdate({
jobSpec: {
...this.step.jobSpec,
template: {
...this.step.jobSpec.template,
spec: {
...this.step.jobSpec.template.spec,
containers,
},
},
},
});
}
private addEnvVar() {
const envVars = [...this.getEnvVars(), { name: '', value: '' }];
this.updateEnvVars(envVars);
}
private removeEnvVar(index: number) {
const envVars = this.getEnvVars().filter((_, i) => i !== index);
this.updateEnvVars(envVars);
}
private updateEnvVar(index: number, field: 'name' | 'value', value: string) {
const envVars = this.getEnvVars().map((env, i) =>
i === index ? { ...env, [field]: value } : env
);
this.updateEnvVars(envVars);
}
private updateRunIf(field: string, value: unknown) {
if (!this.step) return;
const runIf = this.step.runIf || { steps: [] };
this.dispatchUpdate({
runIf: { ...runIf, [field]: value },
});
}
private toggleRunIfStep(stepName: string) {
if (!this.step) return;
const currentSteps = this.step.runIf?.steps || [];
const newSteps = currentSteps.includes(stepName)
? currentSteps.filter(s => s !== stepName)
: [...currentSteps, stepName];
if (newSteps.length === 0) {
// Remove runIf entirely
this.dispatchUpdate({ runIf: undefined });
} else {
this.updateRunIf('steps', newSteps);
}
}
private dispatchDelete() {
this.dispatchEvent(new CustomEvent('delete'));
}
private dispatchClose() {
this.dispatchEvent(new CustomEvent('close'));
}
render() {
if (!this.step) {
return html`<p>No step selected</p>`;
}
const container = this.getContainer();
const command = container?.command?.join(' ') || '';
const args = container?.args?.join('\n') || '';
const envVars = this.getEnvVars();
const otherSteps = this.allSteps.filter(s => s !== this.step?.name);
return html`
<!-- Name -->
<div class="form-group">
<label for="step-name">Step Name *</label>
<input
type="text"
id="step-name"
class="${this.nameError ? 'error' : ''}"
.value=${this.step.name}
@input=${(e: Event) => this.updateName((e.target as HTMLInputElement).value)}
aria-describedby=${this.nameError ? 'name-error' : ''}
aria-invalid=${this.nameError ? 'true' : 'false'}
/>
${this.nameError
? html` <div id="name-error" class="error-text" role="alert">${this.nameError}</div> `
: ''}
</div>
<!-- Image -->
<div class="form-group">
<label for="step-image">Container Image</label>
<input
type="text"
id="step-image"
.value=${container?.image || ''}
@input=${(e: Event) => this.updateImage((e.target as HTMLInputElement).value)}
placeholder="e.g., registry.access.redhat.com/ubi9/ubi-minimal:latest"
/>
</div>
<!-- Command -->
<div class="form-group">
<label for="step-command">Command</label>
<input
type="text"
id="step-command"
.value=${command}
@input=${(e: Event) => this.updateCommand((e.target as HTMLInputElement).value)}
placeholder="e.g., sh -c or python -c or kubectl"
/>
</div>
<!-- Arguments / Script -->
<div class="form-group flex-grow">
<label for="step-args">
${this.isScriptMode() ? 'Script' : 'Arguments'}
<span class="label-optional"
>${this.isScriptMode() ? '(passed as single argument to -c)' : '(one per line)'}</span
>
</label>
<code-editor
.value=${args}
.language=${this.editorLanguage}
.showLanguageSelector=${this.isScriptMode()}
.minHeight=${'200px'}
@change=${(e: CustomEvent) => this.updateArgs(e.detail.value)}
@language-change=${this.handleEditorLanguageChange}
></code-editor>
</div>
<!-- Environment Variables -->
<div class="section-header">
<rh-icon set="ui" icon="list"></rh-icon>
<h4>Environment Variables</h4>
</div>
<div class="env-list">
${envVars.map(
(env, index) => html`
<div class="env-row">
<input
type="text"
placeholder="Name"
.value=${env.name}
@input=${(e: Event) =>
this.updateEnvVar(index, 'name', (e.target as HTMLInputElement).value)}
aria-label="Variable name"
/>
<input
type="text"
placeholder="Value"
.value=${env.value || ''}
@input=${(e: Event) =>
this.updateEnvVar(index, 'value', (e.target as HTMLInputElement).value)}
aria-label="Variable value"
/>
<button
class="icon-btn danger"
@click=${() => this.removeEnvVar(index)}
title="Remove variable"
aria-label="Remove variable"
>
<rh-icon set="ui" icon="trash"></rh-icon>
</button>
</div>
`
)}
<rh-button variant="secondary" @click=${this.addEnvVar}>
<rh-icon set="ui" icon="add-circle" slot="icon"></rh-icon>
Add Variable
</rh-button>
</div>
<!-- Conditional Execution -->
${otherSteps.length > 0
? html`
<div class="section-header">
<rh-icon set="ui" icon="link"></rh-icon>
<h4>Conditional Execution</h4>
<span class="label-optional">(optional)</span>
</div>
<div class="runif-section">
<div class="form-group">
<label>Run after these steps:</label>
<div class="checkbox-group">
${otherSteps.map(
stepName => html`
<label class="checkbox-label">
<input
type="checkbox"
?checked=${this.step?.runIf?.steps?.includes(stepName)}
@change=${() => this.toggleRunIfStep(stepName)}
/>
${stepName}
</label>
`
)}
</div>
</div>
${this.step.runIf?.steps?.length
? html`
<div class="form-group">
<label>Condition:</label>
<div class="radio-group">
<label class="radio-label">
<input
type="radio"
name="condition"
value="success"
?checked=${(this.step.runIf?.condition || 'success') === 'success'}
@change=${() => this.updateRunIf('condition', 'success')}
/>
On Success
</label>
<label class="radio-label">
<input
type="radio"
name="condition"
value="fail"
?checked=${this.step.runIf?.condition === 'fail'}
@change=${() => this.updateRunIf('condition', 'fail')}
/>
On Failure
</label>
</div>
</div>
<div class="form-group">
<label>Operator:</label>
<div class="radio-group">
<label class="radio-label">
<input
type="radio"
name="operator"
value="and"
?checked=${(this.step.runIf?.operator || 'and') === 'and'}
@change=${() => this.updateRunIf('operator', 'and')}
/>
ALL (AND)
</label>
<label class="radio-label">
<input
type="radio"
name="operator"
value="or"
?checked=${this.step.runIf?.operator === 'or'}
@change=${() => this.updateRunIf('operator', 'or')}
/>
ANY (OR)
</label>
</div>
</div>
`
: ''}
</div>
`
: ''}
<!-- Advanced Settings -->
<div class="accordion">
<button
class="accordion-header"
@click=${() => (this.showAdvanced = !this.showAdvanced)}
aria-expanded=${this.showAdvanced}
>
<span class="chevron ${this.showAdvanced ? 'open' : ''}"></span>
Advanced Settings
</button>
${this.showAdvanced
? html`
<div class="accordion-content">
<div class="form-group">
<label for="backoff-limit">Backoff Limit</label>
<input
type="number"
id="backoff-limit"
min="0"
.value=${String(this.step.jobSpec.backoffLimit ?? 6)}
@input=${(e: Event) => {
const value = parseInt((e.target as HTMLInputElement).value) || 6;
this.dispatchUpdate({
jobSpec: { ...this.step!.jobSpec, backoffLimit: value },
});
}}
/>
</div>
<div class="form-group">
<label for="active-deadline">Active Deadline (seconds)</label>
<input
type="number"
id="active-deadline"
min="0"
.value=${String(this.step.jobSpec.activeDeadlineSeconds || '')}
placeholder="No limit"
@input=${(e: Event) => {
const value = parseInt((e.target as HTMLInputElement).value);
this.dispatchUpdate({
jobSpec: {
...this.step!.jobSpec,
activeDeadlineSeconds: value || undefined,
},
});
}}
/>
</div>
<div class="form-group">
<label for="secret-envfrom">
Secret as Environment Variables
<span class="label-optional">(mount all keys from a secret)</span>
</label>
<div class="secret-selector-row">
${this.loadingSecrets
? html`
<select id="secret-envfrom" disabled>
<option>Loading secrets...</option>
</select>
`
: html`
<select
id="secret-envfrom"
@change=${(e: Event) => {
const value = (e.target as HTMLSelectElement).value;
if (value) {
this.addSecretEnvFrom(value);
(e.target as HTMLSelectElement).value = '';
}
}}
>
<option value="">-- Select a secret to add --</option>
${this.availableSecrets
.filter(
s => !this.getEnvFrom().some(e => e.secretRef?.name === s.name)
)
.map(
secret => html`
<option value=${secret.name}>
${secret.name} (${secret.keys.length} keys)
</option>
`
)}
</select>
`}
<button
class="icon-btn ${this.loadingSecrets ? 'loading' : ''}"
@click=${() => this.fetchSecrets()}
title="Refresh secrets list"
aria-label="Refresh secrets list"
?disabled=${this.loadingSecrets}
>
<rh-icon set="ui" icon="sync"></rh-icon>
</button>
</div>
${this.getEnvFrom().filter(e => e.secretRef).length > 0
? html`
<div class="envfrom-list" style="margin-top: var(--rh-space-sm, 8px);">
${this.getEnvFrom()
.filter(e => e.secretRef)
.map(
envFrom => html`
<div class="envfrom-item">
<rh-tag compact color="orange">sec</rh-tag>
<span class="secret-name">${envFrom.secretRef!.name}</span>
<button
class="icon-btn danger"
@click=${() =>
this.removeSecretEnvFrom(envFrom.secretRef!.name)}
title="Remove secret"
aria-label="Remove ${envFrom.secretRef!.name}"
>
<rh-icon set="ui" icon="trash"></rh-icon>
</button>
</div>
`
)}
</div>
`
: ''}
</div>
</div>
`
: ''}
</div>
<!-- Actions -->
<div class="actions-section">
<rh-button variant="danger" @click=${this.dispatchDelete}>
<rh-icon set="ui" icon="trash" slot="icon"></rh-icon>
Delete Step
</rh-button>
<rh-button variant="secondary" @click=${this.dispatchClose}>
<rh-icon set="ui" icon="close" slot="icon"></rh-icon>
Close
</rh-button>
</div>
`;
}
}
declare global {
interface HTMLElementTagNameMap {
'step-editor': StepEditor;
}
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/components/monitor/pipeline-detail.ts | TypeScript | /**
* Pipeline Detail - Shows detailed view of a single pipeline
* Following RHDS patterns for tabs and data display
*/
import { LitElement, html, css, svg } from 'lit';
import { customElement, property, state } from 'lit/decorators.js';
import { stringify as yamlStringify } from 'yaml';
import type {
Pipeline,
StepStatus,
PipelineStep,
PipelineGraph,
PipelineEdge,
PipelineNode,
} from '../../types/pipeline.js';
import { k8sClient } from '../../lib/k8s-client.js';
import { navigate } from '../../lib/router.js';
import { pipelineToGraph, layoutGraph } from '../../lib/graph-layout.js';
interface RouteLocation {
params: { namespace?: string; name?: string };
}
@customElement('pipeline-detail')
export class PipelineDetail extends LitElement {
@property({ type: Object }) location?: RouteLocation;
@state() private pipeline: Pipeline | null = null;
@state() private loading = true;
@state() private error: string | null = null;
@state() private selectedStep: string | null = null;
@state() private activeTab = 0;
@state() private graph: PipelineGraph | null = null;
@state() private openMenuId: string | null = null;
// Cached step data to prevent unnecessary re-renders of step-detail
private cachedStep: PipelineStep | null = null;
private cachedStatus: StepStatus | null = null;
private cachedStepName: string | null = null;
private pollInterval?: ReturnType<typeof setInterval>;
static styles = css`
:host {
display: block;
}
rh-breadcrumb {
display: block;
margin-block-end: var(--rh-space-lg, 24px);
}
/* Breadcrumb light DOM styles (needed inside shadow DOM) */
rh-breadcrumb ol {
display: flex;
flex-wrap: wrap;
align-items: center;
gap: var(--rh-space-xs, 4px);
list-style: none;
padding: 0;
margin: 0;
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
}
rh-breadcrumb li {
display: flex;
align-items: center;
gap: var(--rh-space-xs, 4px);
}
rh-breadcrumb li:not(:last-child)::after {
content: '/';
color: var(--rh-color-text-secondary-on-light, #6a6e73);
margin-inline-start: var(--rh-space-xs, 4px);
}
rh-breadcrumb a {
color: var(--rh-color-interactive-blue-darker, #0066cc);
text-decoration: none;
transition: color 150ms ease;
}
rh-breadcrumb a:hover {
color: var(--rh-color-interactive-blue-darkest, #004d99);
text-decoration: underline;
}
rh-breadcrumb a[aria-current='page'] {
color: var(--rh-color-text-primary-on-light, #151515);
pointer-events: none;
}
.header {
display: flex;
justify-content: space-between;
align-items: flex-start;
margin-block-end: var(--rh-space-xl, 32px);
flex-wrap: wrap;
gap: var(--rh-space-md, 16px);
}
.header-left {
display: flex;
flex-direction: column;
gap: var(--rh-space-sm, 8px);
}
.header-left h1 {
display: flex;
align-items: center;
gap: var(--rh-space-md, 16px);
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
font-size: var(--rh-font-size-heading-lg, 1.75rem);
font-weight: var(--rh-font-weight-heading-medium, 500);
margin: 0;
}
.header-meta {
display: flex;
gap: var(--rh-space-xl, 32px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
}
.header-meta-item {
display: flex;
flex-direction: column;
gap: 2px;
}
.header-meta-label {
font-weight: var(--rh-font-weight-body-text-medium, 500);
color: var(--rh-color-text-primary-on-light, #151515);
}
.header-actions {
display: flex;
gap: var(--rh-space-sm, 8px);
}
.content {
display: block;
}
.graph-container {
background: var(--rh-color-surface-lighter, #f5f5f5);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
padding: var(--rh-space-lg, 24px);
position: relative;
}
.graph-canvas {
position: relative;
}
.graph-node {
position: absolute;
background: var(--rh-color-surface-lightest, #ffffff);
border: 2px solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
padding: var(--rh-space-md, 16px);
width: 240px;
height: 100px;
box-sizing: border-box;
cursor: pointer;
transition: all 150ms ease;
overflow: hidden;
}
.graph-node:hover {
box-shadow: var(--rh-box-shadow-md, 0 4px 6px -1px rgba(21, 21, 21, 0.1));
}
.graph-node:focus-visible {
outline: var(--rh-border-width-md, 2px) solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: 2px;
}
.graph-node.selected {
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
box-shadow: 0 0 0 2px rgba(0, 102, 204, 0.2);
}
.graph-node.succeeded {
border-color: var(--rh-color-green-500, #3e8635);
}
.graph-node.running {
border-color: var(--rh-color-teal-500, #009596);
}
.graph-node.failed {
border-color: var(--rh-color-red-500, #c9190b);
}
.node-header {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
margin-block-end: var(--rh-space-xs, 4px);
}
.node-name {
font-weight: var(--rh-font-weight-body-text-medium, 500);
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
}
.node-image {
font-size: var(--rh-font-size-body-text-xs, 0.75rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.graph-edges {
position: absolute;
top: 0;
left: 0;
pointer-events: none;
overflow: visible;
}
.graph-edge {
fill: none;
stroke: var(--rh-color-gray-40, #8a8d90);
stroke-width: 2;
}
.graph-edge.success {
stroke: var(--rh-color-green-500, #3e8635);
}
.graph-edge.failure {
stroke: var(--rh-color-red-500, #c9190b);
}
.graph-arrow {
fill: var(--rh-color-gray-40, #8a8d90);
}
.graph-arrow.success {
fill: var(--rh-color-green-500, #3e8635);
}
.graph-arrow.failure {
fill: var(--rh-color-red-500, #c9190b);
}
rh-table {
overflow: visible;
}
.timeline-table {
width: 100%;
border-collapse: collapse;
overflow: visible;
}
.timeline-table th,
.timeline-table td {
padding: var(--rh-space-sm, 8px) var(--rh-space-md, 16px);
text-align: start;
border-block-end: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.timeline-table th {
font-weight: var(--rh-font-weight-body-text-medium, 500);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
background: var(--rh-color-surface-lighter, #f5f5f5);
}
.timeline-table tbody tr {
cursor: pointer;
transition: background-color 150ms ease;
}
.timeline-table tbody tr:hover {
background: var(--rh-color-surface-lighter, #f5f5f5);
}
/* Kebab menu styles for timeline */
.actions-cell {
position: relative;
width: 48px;
text-align: center;
overflow: visible;
}
.timeline-table tbody {
overflow: visible;
}
.timeline-table tbody tr {
overflow: visible;
}
.kebab-btn {
display: inline-flex;
align-items: center;
justify-content: center;
width: 32px;
height: 32px;
padding: 0;
background: none;
border: none;
border-radius: var(--rh-border-radius-default, 3px);
cursor: pointer;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
transition:
background-color 150ms ease,
color 150ms ease;
}
.kebab-btn:hover {
background: var(--rh-color-surface-light, #e0e0e0);
color: var(--rh-color-text-primary-on-light, #151515);
}
.kebab-btn:focus {
outline: none;
}
.kebab-menu {
position: absolute;
top: 100%;
right: 0;
z-index: 1000;
min-width: 150px;
background: var(--rh-color-surface-lightest, #ffffff);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
box-shadow: var(--rh-box-shadow-md, 0 4px 6px -1px rgba(21, 21, 21, 0.1));
padding: var(--rh-space-xs, 4px) 0;
}
.kebab-menu-item {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
width: 100%;
padding: var(--rh-space-sm, 8px) var(--rh-space-md, 16px);
background: none;
border: none;
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
color: var(--rh-color-text-primary-on-light, #151515);
cursor: pointer;
text-align: left;
transition: background-color 150ms ease;
}
.kebab-menu-item:hover {
background: var(--rh-color-surface-lighter, #f5f5f5);
}
.kebab-menu-item:focus {
outline: none;
background: var(--rh-color-surface-lighter, #f5f5f5);
}
.kebab-menu-item rh-icon {
--rh-icon-size: 16px;
}
.yaml-container {
width: 100%;
max-width: 100%;
box-sizing: border-box;
}
.yaml-container code-editor {
display: block;
width: 100%;
}
.loading-container,
.error-container {
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
padding: var(--rh-space-2xl, 48px);
gap: var(--rh-space-md, 16px);
}
.error-container {
color: var(--rh-color-red-700, #a30d05);
}
rh-tabs {
margin-block-end: var(--rh-space-lg, 24px);
/* Override RHDS focus custom properties */
--rh-tabs-link-focus-outline: none;
--rh-tabs-focus-outline: none;
}
/* Remove blue focus outline from tabs and panels */
rh-tabs,
rh-tabs::part(tabs),
rh-tabs::part(panels) {
outline: none !important;
}
rh-tab,
rh-tab-panel {
outline: none !important;
/* Override any RHDS focus variables */
--rh-tab-focus-outline: none;
--rh-focus-outline-color: transparent;
--rh-focus-outline-width: 0;
}
rh-tab:focus,
rh-tab:focus-within,
rh-tab-panel:focus {
outline: none !important;
box-shadow: none !important;
}
rh-tab:focus-visible {
outline: none !important;
box-shadow: none !important;
}
/* Target internal button if exposed via ::part */
rh-tab::part(button),
rh-tab::part(tab) {
outline: none !important;
}
rh-tab::part(button):focus,
rh-tab::part(button):focus-visible,
rh-tab::part(tab):focus,
rh-tab::part(tab):focus-visible {
outline: none !important;
box-shadow: none !important;
}
`;
connectedCallback() {
super.connectedCallback();
this.loadPipeline();
this.pollInterval = setInterval(() => this.loadPipeline(), 3000);
document.addEventListener('click', this.handleDocumentClick);
}
disconnectedCallback() {
super.disconnectedCallback();
if (this.pollInterval) {
clearInterval(this.pollInterval);
}
document.removeEventListener('click', this.handleDocumentClick);
}
private handleDocumentClick = () => {
this.closeMenu();
};
private toggleMenu(e: Event, stepName: string) {
e.stopPropagation();
this.openMenuId = this.openMenuId === stepName ? null : stepName;
}
private closeMenu() {
this.openMenuId = null;
}
private get params(): { namespace: string; name: string } {
return {
namespace: this.location?.params?.namespace || 'default',
name: this.location?.params?.name || '',
};
}
private async loadPipeline() {
const { namespace, name } = this.params;
if (!name) return;
try {
this.pipeline = await k8sClient.getPipeline(namespace, name);
this.error = null;
// Generate graph layout
const rawGraph = pipelineToGraph(this.pipeline);
this.graph = await layoutGraph(rawGraph);
} catch (e) {
this.error = e instanceof Error ? e.message : 'Failed to load pipeline';
} finally {
this.loading = false;
}
}
private async deletePipeline() {
if (!this.pipeline) return;
const confirmed = confirm(
`Are you sure you want to delete pipeline "${this.pipeline.metadata.name}"?`
);
if (!confirmed) return;
try {
await k8sClient.deletePipeline(
this.pipeline.metadata.namespace || 'default',
this.pipeline.metadata.name
);
navigate('/pipelines');
} catch (e) {
alert(`Failed to delete pipeline: ${e instanceof Error ? e.message : 'Unknown error'}`);
}
}
private formatDuration(startTime?: string, endTime?: string): string {
if (!startTime) return '-';
const start = new Date(startTime);
const end = endTime ? new Date(endTime) : new Date();
const diffMs = end.getTime() - start.getTime();
const diffSecs = Math.floor(diffMs / 1000);
const mins = Math.floor(diffSecs / 60);
const secs = diffSecs % 60;
if (mins > 0) {
return `${mins}m ${secs}s`;
}
return `${secs}s`;
}
private getStepImage(step: PipelineStep): string {
const container = step.jobSpec.template.spec.containers[0];
return container?.image || 'default';
}
private selectStep(stepName: string) {
this.selectedStep = stepName;
}
private closeDrawer() {
this.selectedStep = null;
}
render() {
if (this.loading) {
return html`
<div class="loading-container">
<rh-spinner size="lg"></rh-spinner>
<span>Loading pipeline...</span>
</div>
`;
}
if (this.error || !this.pipeline) {
return html`
<div class="error-container">
<rh-icon set="ui" icon="error-fill" style="--rh-icon-size: 32px"></rh-icon>
<span>${this.error || 'Pipeline not found'}</span>
<rh-button @click=${() => navigate('/pipelines')}>
<rh-icon set="ui" icon="arrow-left" slot="icon"></rh-icon>
Back to Pipelines
</rh-button>
</div>
`;
}
const phase = this.pipeline.status?.phase || 'Pending';
return html`
<rh-breadcrumb>
<ol>
<li>
<a
href="/pipelines"
@click=${(e: Event) => {
e.preventDefault();
navigate('/pipelines');
}}
>Pipelines</a
>
</li>
<li><a href="" aria-current="page">${this.pipeline.metadata.name}</a></li>
</ol>
</rh-breadcrumb>
<header class="header">
<div class="header-left">
<h1>
${this.pipeline.metadata.name}
<status-badge status=${phase}></status-badge>
</h1>
<div class="header-meta">
<div class="header-meta-item">
<span class="header-meta-label">Namespace</span>
<span>${this.pipeline.metadata.namespace || 'default'}</span>
</div>
<div class="header-meta-item">
<span class="header-meta-label">Duration</span>
<span
>${this.formatDuration(
this.pipeline.status?.startTime,
this.pipeline.status?.completionTime
)}</span
>
</div>
<div class="header-meta-item">
<span class="header-meta-label">Steps</span>
<span
>${this.pipeline.status?.steps?.filter(s => s.phase === 'Succeeded').length ||
0}/${this.pipeline.spec.steps.length}</span
>
</div>
</div>
</div>
<div class="header-actions">
<rh-button variant="secondary" @click=${this.loadPipeline}>
<rh-icon set="ui" icon="refresh" slot="icon"></rh-icon>
Refresh
</rh-button>
<rh-button variant="danger" @click=${this.deletePipeline}>
<rh-icon set="ui" icon="trash" slot="icon"></rh-icon>
Delete
</rh-button>
</div>
</header>
<rh-tabs
@click=${(e: Event) => {
const tab = (e.target as HTMLElement).closest('rh-tab');
if (tab) {
const tabs = this.shadowRoot?.querySelectorAll('rh-tab');
tabs?.forEach((t, i) => {
if (t === tab) this.activeTab = i;
});
}
}}
>
<rh-tab slot="tab">
<rh-icon set="ui" icon="network" slot="icon"></rh-icon>
Graph
</rh-tab>
<rh-tab-panel> ${this.renderGraph()} </rh-tab-panel>
<rh-tab slot="tab">
<rh-icon set="ui" icon="list" slot="icon"></rh-icon>
Timeline
</rh-tab>
<rh-tab-panel> ${this.renderTimeline()} </rh-tab-panel>
<rh-tab slot="tab">
<rh-icon set="ui" icon="code" slot="icon"></rh-icon>
YAML
</rh-tab>
<rh-tab-panel> ${this.renderYaml()} </rh-tab-panel>
</rh-tabs>
<div class="content">
<div id="tab-content">
<!-- Content rendered by tabs above -->
</div>
</div>
<!-- Side Drawer for Step Details -->
<side-drawer
?open=${!!this.selectedStep}
heading=${this.selectedStep ? `Step: ${this.selectedStep}` : 'Step Details'}
.showWidthToggle=${true}
@close=${this.closeDrawer}
>
${this.selectedStep ? this.renderStepDetails() : ''}
</side-drawer>
`;
}
private renderStepDetails() {
const step = this.pipeline?.spec.steps.find(s => s.name === this.selectedStep);
const status = this.pipeline?.status?.steps?.find(s => s.name === this.selectedStep);
if (!step) return '';
// Only update cached values if step changed or status meaningfully changed
const stepChanged = this.cachedStepName !== this.selectedStep;
const statusChanged = this.hasStatusChanged(this.cachedStatus, status);
if (stepChanged || statusChanged || !this.cachedStep) {
this.cachedStep = step;
this.cachedStatus = status || null;
this.cachedStepName = this.selectedStep;
}
return html`
<step-detail
.step=${this.cachedStep}
.status=${this.cachedStatus}
.namespace=${this.pipeline?.metadata.namespace || 'default'}
></step-detail>
`;
}
private hasStatusChanged(
oldStatus: StepStatus | null | undefined,
newStatus: StepStatus | null | undefined
): boolean {
if (!oldStatus && !newStatus) return false;
if (!oldStatus || !newStatus) return true;
// Compare key fields that would affect the UI
return (
oldStatus.phase !== newStatus.phase ||
oldStatus.jobName !== newStatus.jobName ||
oldStatus.jobStatus?.succeeded !== newStatus.jobStatus?.succeeded ||
oldStatus.jobStatus?.failed !== newStatus.jobStatus?.failed ||
oldStatus.jobStatus?.completionTime !== newStatus.jobStatus?.completionTime
);
}
private renderGraph() {
if (!this.graph) return html`<div class="graph-container">Loading graph...</div>`;
// Calculate canvas dimensions based on node positions
// Node dimensions from graph-layout.ts: width=240, height=100
const nodeWidth = 240;
const nodeHeight = 100;
const padding = 20;
let maxX = 0;
let maxY = 0;
for (const node of this.graph.nodes) {
maxX = Math.max(maxX, node.position.x + nodeWidth);
maxY = Math.max(maxY, node.position.y + nodeHeight);
}
const canvasWidth = maxX + padding;
const canvasHeight = maxY + padding;
// Build a map of node positions for edge rendering
const nodePositions = new Map<string, { x: number; y: number; width: number }>();
for (const node of this.graph.nodes) {
nodePositions.set(node.id, {
x: node.position.x,
y: node.position.y,
width: nodeWidth,
});
}
// Count outgoing edges per source and incoming edges per target
const outgoingEdges = new Map<string, PipelineEdge[]>();
const incomingEdges = new Map<string, PipelineEdge[]>();
for (const edge of this.graph.edges) {
if (!outgoingEdges.has(edge.source)) outgoingEdges.set(edge.source, []);
if (!incomingEdges.has(edge.target)) incomingEdges.set(edge.target, []);
outgoingEdges.get(edge.source)!.push(edge);
incomingEdges.get(edge.target)!.push(edge);
}
return html`
<div class="graph-container">
<div class="graph-canvas" style="width: ${canvasWidth}px; height: ${canvasHeight}px;">
<!-- Render edges first (behind nodes) -->
<svg class="graph-edges" width="${canvasWidth}" height="${canvasHeight}">
<defs>
<marker id="arrow" markerWidth="6" markerHeight="6" refX="6" refY="3" orient="auto">
<path d="M0,0 L6,3 L0,6 z" class="graph-arrow" />
</marker>
<marker
id="arrow-success"
markerWidth="6"
markerHeight="6"
refX="6"
refY="3"
orient="auto"
>
<path d="M0,0 L6,3 L0,6 z" class="graph-arrow success" />
</marker>
<marker
id="arrow-failure"
markerWidth="6"
markerHeight="6"
refX="6"
refY="3"
orient="auto"
>
<path d="M0,0 L6,3 L0,6 z" class="graph-arrow failure" />
</marker>
</defs>
${this.graph.edges.map((edge: PipelineEdge) => {
const sourceNode = nodePositions.get(edge.source);
const targetNode = nodePositions.get(edge.target);
if (!sourceNode || !targetNode) return '';
// Calculate offset for this edge among all outgoing edges from source
const sourceEdges = outgoingEdges.get(edge.source) || [];
const sourceIndex = sourceEdges.indexOf(edge);
const sourceCount = sourceEdges.length;
// Calculate offset for this edge among all incoming edges to target
const targetEdges = incomingEdges.get(edge.target) || [];
const targetIndex = targetEdges.indexOf(edge);
const targetCount = targetEdges.length;
// Spread connection points around center (max 120px total spread)
const maxSpread = 120;
const centerX = nodeWidth / 2;
// Calculate X position for source (bottom of node)
const sourceSpread = Math.min(maxSpread, (sourceCount - 1) * 25);
const sourceStartX = centerX - sourceSpread / 2;
const sourceSpacing = sourceCount > 1 ? sourceSpread / (sourceCount - 1) : 0;
const sourceOffsetX =
sourceCount > 1 ? sourceStartX + sourceIndex * sourceSpacing : centerX;
const x1 = sourceNode.x + sourceOffsetX;
const y1 = sourceNode.y + nodeHeight; // Start at bottom of source node
// Calculate X position for target (top of node)
const targetSpread = Math.min(maxSpread, (targetCount - 1) * 25);
const targetStartX = centerX - targetSpread / 2;
const targetSpacing = targetCount > 1 ? targetSpread / (targetCount - 1) : 0;
const targetOffsetX =
targetCount > 1 ? targetStartX + targetIndex * targetSpacing : centerX;
const x2 = targetNode.x + targetOffsetX;
const y2 = targetNode.y - 2; // End at top of target node (arrow will point to it)
// Create a curved path with control points offset for smoother curves
const deltaY = y2 - y1;
const controlOffset = Math.max(Math.abs(deltaY) * 0.4, 20);
const path = `M ${x1} ${y1} C ${x1} ${y1 + controlOffset}, ${x2} ${y2 - controlOffset}, ${x2} ${y2}`;
const edgeClass =
edge.type === 'failure' ? 'failure' : edge.type === 'success' ? 'success' : '';
const markerId =
edge.type === 'failure'
? 'arrow-failure'
: edge.type === 'success'
? 'arrow-success'
: 'arrow';
return svg`
<path
class="graph-edge ${edgeClass}"
d="${path}"
marker-end="url(#${markerId})"
/>
`;
})}
</svg>
<!-- Render nodes -->
${this.graph.nodes.map((node: PipelineNode) => {
const phase = node.data.status?.phase?.toLowerCase() || 'pending';
return html`
<div
class="graph-node ${phase} ${this.selectedStep === node.id ? 'selected' : ''}"
style="left: ${node.position.x}px; top: ${node.position.y}px"
tabindex="0"
role="button"
aria-pressed=${this.selectedStep === node.id}
@click=${() => this.selectStep(node.id)}
@keydown=${(e: KeyboardEvent) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault();
this.selectStep(node.id);
}
}}
>
<div class="node-header">
<status-badge
status=${node.data.status?.phase || 'Pending'}
size="sm"
></status-badge>
<span class="node-name">${node.data.step.name}</span>
</div>
<div class="node-image">${this.getStepImage(node.data.step)}</div>
</div>
`;
})}
</div>
</div>
`;
}
private renderTimeline() {
if (!this.pipeline) return '';
const steps = this.pipeline.spec.steps.map(specStep => {
const status = this.pipeline!.status?.steps?.find(s => s.name === specStep.name);
return { spec: specStep, status };
});
return html`
<rh-table>
<table class="timeline-table">
<thead>
<tr>
<th scope="col">Step</th>
<th scope="col">Status</th>
<th scope="col">Started</th>
<th scope="col">Duration</th>
<th scope="col">Actions</th>
</tr>
</thead>
<tbody>
${steps.map(
({ spec, status }) => html`
<tr @click=${() => this.selectStep(spec.name)}>
<td><strong>${spec.name}</strong></td>
<td>
<status-badge status=${status?.phase || 'Pending'} size="sm"></status-badge>
</td>
<td>
${status?.jobStatus?.startTime
? new Date(status.jobStatus.startTime).toLocaleTimeString()
: '-'}
</td>
<td>
${this.formatDuration(
status?.jobStatus?.startTime,
status?.jobStatus?.completionTime
)}
</td>
<td class="actions-cell">
<button
class="kebab-btn"
@click=${(e: Event) => this.toggleMenu(e, spec.name)}
aria-label="Actions for ${spec.name}"
aria-haspopup="true"
aria-expanded=${this.openMenuId === spec.name}
>
<rh-icon set="ui" icon="ellipsis-vertical"></rh-icon>
</button>
${this.openMenuId === spec.name
? html`
<div class="kebab-menu" role="menu">
<button
class="kebab-menu-item"
role="menuitem"
@click=${(e: Event) => {
e.stopPropagation();
this.closeMenu();
this.selectStep(spec.name);
}}
>
<rh-icon set="ui" icon="view"></rh-icon>
View Details
</button>
</div>
`
: ''}
</td>
</tr>
`
)}
</tbody>
</table>
</rh-table>
`;
}
private renderYaml() {
if (!this.pipeline) return '';
// Convert to properly formatted YAML
const yamlContent = yamlStringify(this.pipeline, {
indent: 2,
lineWidth: 0, // Don't wrap lines
defaultKeyType: 'PLAIN',
defaultStringType: 'QUOTE_DOUBLE',
});
return html`
<div class="yaml-container">
<code-editor
.value=${yamlContent}
language="yaml"
.readonly=${true}
.showLanguageSelector=${false}
minHeight="400px"
></code-editor>
</div>
`;
}
}
declare global {
interface HTMLElementTagNameMap {
'pipeline-detail': PipelineDetail;
}
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/components/monitor/pipeline-list.ts | TypeScript | /**
* Pipeline List - Displays all pipelines with status
* Following RHDS card and list patterns
*/
import { LitElement, html, css } from 'lit';
import { customElement, state } from 'lit/decorators.js';
import type { Pipeline, StepStatus } from '../../types/pipeline.js';
import { k8sClient } from '../../lib/k8s-client.js';
import { navigate } from '../../lib/router.js';
type SortColumn = 'name' | 'status' | 'created';
type SortDirection = 'asc' | 'desc';
@customElement('pipeline-list')
export class PipelineList extends LitElement {
@state() private pipelines: Pipeline[] = [];
@state() private loading = true;
@state() private error: string | null = null;
@state() private searchQuery = '';
@state() private namespace = 'default';
@state() private sortColumn: SortColumn = 'created';
@state() private sortDirection: SortDirection = 'desc';
@state() private openMenuId: string | null = null;
@state() private currentPage = 1;
@state() private pageSize = 10;
private pollInterval?: ReturnType<typeof setInterval>;
static styles = css`
:host {
display: block;
}
.header {
display: flex;
justify-content: space-between;
align-items: center;
margin-block-end: var(--rh-space-xl, 32px);
flex-wrap: wrap;
gap: var(--rh-space-md, 16px);
}
h1 {
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
font-size: var(--rh-font-size-heading-lg, 1.75rem);
font-weight: var(--rh-font-weight-heading-medium, 500);
margin: 0;
color: var(--rh-color-text-primary-on-light, #151515);
}
.controls {
display: flex;
gap: var(--rh-space-md, 16px);
align-items: center;
}
.search-input {
padding: var(--rh-space-sm, 8px) var(--rh-space-md, 16px);
padding-inline-start: var(--rh-space-xl, 32px);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
font-size: var(--rh-font-size-body-text-md, 1rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
min-width: 280px;
background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='16' height='16' viewBox='0 0 24 24' fill='none' stroke='%236a6e73' stroke-width='2'%3E%3Ccircle cx='11' cy='11' r='8'/%3E%3Cpath d='m21 21-4.35-4.35'/%3E%3C/svg%3E");
background-repeat: no-repeat;
background-position: var(--rh-space-sm, 8px) center;
transition:
border-color 150ms ease,
box-shadow 150ms ease;
}
.search-input:focus {
outline: none;
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
box-shadow: 0 0 0 1px var(--rh-color-interactive-blue-darker, #0066cc);
}
.pipeline-table-container {
background: var(--rh-color-surface-lightest, #ffffff);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
}
.pipeline-table {
width: 100%;
border-collapse: collapse;
}
.pipeline-table th,
.pipeline-table td {
padding: var(--rh-space-md, 16px);
text-align: start;
border-block-end: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.pipeline-table th {
background: var(--rh-color-surface-lighter, #f5f5f5);
font-weight: var(--rh-font-weight-body-text-medium, 500);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
white-space: nowrap;
}
.pipeline-table th.sortable {
cursor: pointer;
user-select: none;
transition: background-color 150ms ease;
}
.pipeline-table th.sortable:hover {
background: var(--rh-color-surface-light, #e0e0e0);
}
.pipeline-table th.sortable .sort-header {
display: inline-flex;
align-items: center;
gap: var(--rh-space-xs, 4px);
}
.pipeline-table th.sortable rh-icon {
--rh-icon-size: 14px;
opacity: 0.5;
}
.pipeline-table th.sortable.active rh-icon {
opacity: 1;
color: var(--rh-color-interactive-blue-darker, #0066cc);
}
.pipeline-table tbody tr {
cursor: pointer;
transition: background-color 150ms ease;
}
.pipeline-table tbody tr:hover {
background: var(--rh-color-surface-lighter, #f5f5f5);
}
.pipeline-table tbody tr:focus-visible {
outline: var(--rh-border-width-md, 2px) solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: -2px;
}
.pipeline-table tbody tr:last-child td {
border-block-end: none;
}
.pipeline-name-cell {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
}
.pipeline-name-cell rh-tag {
text-transform: uppercase;
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
}
.pipeline-name {
font-weight: var(--rh-font-weight-body-text-medium, 500);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
color: var(--rh-color-text-primary-on-light, #151515);
}
.steps-inline {
display: flex;
align-items: center;
gap: var(--rh-space-xs, 4px);
flex-wrap: wrap;
}
.steps-inline rh-tag {
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
}
@keyframes pulse {
0%,
100% {
opacity: 1;
}
50% {
opacity: 0.4;
}
}
.step-running {
animation: pulse 1.5s ease-in-out infinite;
}
.created-time {
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
@keyframes pulse {
0%,
100% {
opacity: 1;
}
50% {
opacity: 0.4;
}
}
.step-chip.running rh-icon {
animation: pulse 1.5s ease-in-out infinite;
}
.loading-container {
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
padding: var(--rh-space-2xl, 48px);
gap: var(--rh-space-md, 16px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.error-container {
padding: var(--rh-space-lg, 24px);
background: var(--rh-color-red-100, #fce8e6);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-red-500, #c9190b);
border-radius: var(--rh-border-radius-default, 3px);
}
.error-container h4 {
margin: 0 0 var(--rh-space-sm, 8px) 0;
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
color: var(--rh-color-red-700, #a30d05);
}
.error-container p {
margin: 0 0 var(--rh-space-md, 16px) 0;
color: var(--rh-color-red-700, #a30d05);
}
.empty-state {
text-align: center;
padding: var(--rh-space-2xl, 48px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.empty-state rh-icon {
--rh-icon-size: 48px;
color: var(--rh-color-gray-40, #8a8d90);
margin-block-end: var(--rh-space-md, 16px);
}
.empty-state h3 {
margin: 0 0 var(--rh-space-md, 16px) 0;
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
color: var(--rh-color-text-primary-on-light, #151515);
}
.empty-state p {
margin: 0 0 var(--rh-space-lg, 24px) 0;
}
/* Kebab menu styles */
.actions-cell {
position: relative;
width: 48px;
text-align: center;
}
.kebab-btn {
display: inline-flex;
align-items: center;
justify-content: center;
width: 32px;
height: 32px;
padding: 0;
background: none;
border: none;
border-radius: var(--rh-border-radius-default, 3px);
cursor: pointer;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
transition:
background-color 150ms ease,
color 150ms ease;
}
.kebab-btn:hover {
background: var(--rh-color-surface-light, #e0e0e0);
color: var(--rh-color-text-primary-on-light, #151515);
}
.kebab-btn:focus {
outline: none;
}
.kebab-btn:focus-visible {
outline: 2px solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: 2px;
}
.kebab-btn rh-icon {
--rh-icon-size: 18px;
}
.kebab-menu {
position: absolute;
top: 100%;
right: 0;
z-index: 100;
min-width: 150px;
background: var(--rh-color-surface-lightest, #ffffff);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
box-shadow: var(--rh-box-shadow-md, 0 4px 6px -1px rgba(21, 21, 21, 0.1));
padding: var(--rh-space-xs, 4px) 0;
}
.kebab-menu-item {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
width: 100%;
padding: var(--rh-space-sm, 8px) var(--rh-space-md, 16px);
background: none;
border: none;
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
color: var(--rh-color-text-primary-on-light, #151515);
cursor: pointer;
text-align: left;
transition: background-color 150ms ease;
}
.kebab-menu-item:hover {
background: var(--rh-color-surface-lighter, #f5f5f5);
}
.kebab-menu-item:focus {
outline: none;
background: var(--rh-color-surface-lighter, #f5f5f5);
}
.kebab-menu-item.danger {
color: var(--rh-color-red-700, #a30d05);
}
.kebab-menu-item.danger:hover {
background: var(--rh-color-red-100, #fce8e6);
}
.kebab-menu-item rh-icon {
--rh-icon-size: 16px;
}
/* Pagination styles */
.pagination {
display: flex;
justify-content: space-between;
align-items: center;
padding: var(--rh-space-md, 16px);
border-block-start: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
background: var(--rh-color-surface-lighter, #f5f5f5);
border-radius: 0 0 var(--rh-border-radius-default, 3px) var(--rh-border-radius-default, 3px);
}
.pagination-info {
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.pagination-controls {
display: flex;
align-items: center;
gap: var(--rh-space-xs, 4px);
}
.pagination-btn {
display: inline-flex;
align-items: center;
justify-content: center;
min-width: 32px;
height: 32px;
padding: 0 var(--rh-space-sm, 8px);
background: var(--rh-color-surface-lightest, #ffffff);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
cursor: pointer;
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
color: var(--rh-color-text-primary-on-light, #151515);
transition: all 150ms ease;
}
.pagination-btn:hover:not(:disabled) {
background: var(--rh-color-surface-light, #e0e0e0);
border-color: var(--rh-color-border-strong-on-light, #8a8d90);
}
.pagination-btn:disabled {
opacity: 0.5;
cursor: not-allowed;
}
.pagination-btn.active {
background: var(--rh-color-interactive-blue-darker, #0066cc);
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
color: var(--rh-color-text-primary-on-dark, #ffffff);
}
.pagination-btn rh-icon {
--rh-icon-size: 16px;
}
.pagination-ellipsis {
padding: 0 var(--rh-space-xs, 4px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
`;
connectedCallback() {
super.connectedCallback();
this.loadPipelines();
// Poll for updates every 5 seconds
this.pollInterval = setInterval(() => this.loadPipelines(), 5000);
// Listen for namespace changes
window.addEventListener('namespace-change', ((e: CustomEvent) => {
this.namespace = e.detail.namespace;
this.loadPipelines();
}) as EventListener);
// Close menu when clicking outside
document.addEventListener('click', this.handleDocumentClick);
}
disconnectedCallback() {
super.disconnectedCallback();
if (this.pollInterval) {
clearInterval(this.pollInterval);
}
document.removeEventListener('click', this.handleDocumentClick);
}
private handleDocumentClick = () => {
this.closeMenu();
};
private async loadPipelines() {
try {
this.pipelines = await k8sClient.listPipelines(this.namespace);
this.error = null;
} catch (e) {
this.error = e instanceof Error ? e.message : 'Failed to load pipelines';
} finally {
this.loading = false;
}
}
private navigateToPipeline(pipeline: Pipeline) {
const namespace = pipeline.metadata.namespace || 'default';
const name = pipeline.metadata.name;
navigate(`/pipelines/${namespace}/${name}`);
}
private getCompletedSteps(steps: StepStatus[]): number {
return steps.filter(
s => s.phase === 'Succeeded' || s.phase === 'Failed' || s.phase === 'Skipped'
).length;
}
private getStepState(phase: string): 'inactive' | 'active' | 'complete' | 'warn' | 'fail' {
switch (phase) {
case 'Succeeded':
return 'complete';
case 'Running':
return 'active';
case 'Failed':
return 'fail';
case 'Skipped':
return 'inactive';
case 'Pending':
default:
return 'inactive';
}
}
private formatTime(isoString: string): string {
const date = new Date(isoString);
const now = new Date();
const diffMs = now.getTime() - date.getTime();
const diffMins = Math.floor(diffMs / 60000);
const diffHours = Math.floor(diffMins / 60);
const diffDays = Math.floor(diffHours / 24);
if (diffMins < 1) return 'just now';
if (diffMins < 60) return `${diffMins}m ago`;
if (diffHours < 24) return `${diffHours}h ago`;
if (diffDays < 7) return `${diffDays}d ago`;
return date.toLocaleDateString();
}
private get filteredPipelines(): Pipeline[] {
let result = this.pipelines;
// Filter by search query
if (this.searchQuery) {
const query = this.searchQuery.toLowerCase();
result = result.filter(p => p.metadata.name.toLowerCase().includes(query));
}
// Sort
result = [...result].sort((a, b) => {
let comparison = 0;
switch (this.sortColumn) {
case 'name':
comparison = a.metadata.name.localeCompare(b.metadata.name);
break;
case 'status':
comparison = (a.status?.phase || 'Pending').localeCompare(b.status?.phase || 'Pending');
break;
case 'created': {
const aTime = a.metadata.creationTimestamp
? new Date(a.metadata.creationTimestamp).getTime()
: 0;
const bTime = b.metadata.creationTimestamp
? new Date(b.metadata.creationTimestamp).getTime()
: 0;
comparison = aTime - bTime;
break;
}
}
return this.sortDirection === 'asc' ? comparison : -comparison;
});
return result;
}
private get totalPages(): number {
return Math.ceil(this.filteredPipelines.length / this.pageSize);
}
private get paginatedPipelines(): Pipeline[] {
const start = (this.currentPage - 1) * this.pageSize;
const end = start + this.pageSize;
return this.filteredPipelines.slice(start, end);
}
private get paginationRange(): (number | 'ellipsis')[] {
const total = this.totalPages;
const current = this.currentPage;
const range: (number | 'ellipsis')[] = [];
if (total <= 7) {
for (let i = 1; i <= total; i++) range.push(i);
} else {
range.push(1);
if (current > 3) range.push('ellipsis');
for (let i = Math.max(2, current - 1); i <= Math.min(total - 1, current + 1); i++) {
range.push(i);
}
if (current < total - 2) range.push('ellipsis');
range.push(total);
}
return range;
}
private goToPage(page: number) {
if (page >= 1 && page <= this.totalPages) {
this.currentPage = page;
}
}
private handleSearchInput(e: Event) {
this.searchQuery = (e.target as HTMLInputElement).value;
this.currentPage = 1; // Reset to first page on search
}
private toggleSort(column: SortColumn) {
if (this.sortColumn === column) {
this.sortDirection = this.sortDirection === 'asc' ? 'desc' : 'asc';
} else {
this.sortColumn = column;
this.sortDirection = column === 'created' ? 'desc' : 'asc';
}
}
private getSortIcon(column: SortColumn): string {
if (this.sortColumn !== column) return 'arrow-up-down';
return this.sortDirection === 'asc' ? 'arrow-up' : 'arrow-down';
}
private toggleMenu(e: Event, pipelineId: string) {
e.stopPropagation();
this.openMenuId = this.openMenuId === pipelineId ? null : pipelineId;
}
private closeMenu() {
this.openMenuId = null;
}
private async deletePipeline(e: Event, pipeline: Pipeline) {
e.stopPropagation();
this.closeMenu();
const confirmed = confirm(
`Are you sure you want to delete pipeline "${pipeline.metadata.name}"?`
);
if (!confirmed) return;
try {
await k8sClient.deletePipeline(
pipeline.metadata.namespace || 'default',
pipeline.metadata.name
);
// Reload the list
await this.loadPipelines();
} catch (err) {
alert(`Failed to delete pipeline: ${err instanceof Error ? err.message : 'Unknown error'}`);
}
}
private copyPipeline(e: Event, pipeline: Pipeline) {
e.stopPropagation();
this.closeMenu();
// Create a deep copy of the pipeline, removing K8s metadata
const copiedPipeline: Pipeline = {
apiVersion: pipeline.apiVersion,
kind: pipeline.kind,
metadata: {
name: `${pipeline.metadata.name}-copy`,
namespace: pipeline.metadata.namespace,
labels: pipeline.metadata.labels ? { ...pipeline.metadata.labels } : undefined,
annotations: pipeline.metadata.annotations
? { ...pipeline.metadata.annotations }
: undefined,
// Explicitly exclude uid, resourceVersion, creationTimestamp
},
spec: JSON.parse(JSON.stringify(pipeline.spec)), // Deep copy the spec
// Exclude status - we're creating a new pipeline
};
// Store in sessionStorage for the builder to pick up
sessionStorage.setItem('pipeline-copy', JSON.stringify(copiedPipeline));
// Navigate to builder
navigate('/builder');
}
render() {
if (this.loading) {
return html`
<div class="loading-container">
<rh-spinner size="lg"></rh-spinner>
<span>Loading pipelines...</span>
</div>
`;
}
if (this.error) {
return html`
<div class="error-container">
<h4>
<rh-icon set="ui" icon="error-fill"></rh-icon>
Error loading pipelines
</h4>
<p>${this.error}</p>
<rh-button @click=${this.loadPipelines}>
<rh-icon set="ui" icon="refresh" slot="icon"></rh-icon>
Retry
</rh-button>
</div>
`;
}
return html`
<div class="header">
<h1>Pipelines</h1>
<div class="controls">
<input
type="search"
class="search-input"
placeholder="Search pipelines..."
.value=${this.searchQuery}
@input=${this.handleSearchInput}
aria-label="Search pipelines"
/>
<rh-button @click=${() => navigate('/builder')}>
<rh-icon set="ui" icon="add-circle" slot="icon"></rh-icon>
New Pipeline
</rh-button>
</div>
</div>
${this.filteredPipelines.length === 0
? html`
<div class="empty-state">
<rh-icon set="standard" icon="data"></rh-icon>
<h3>No pipelines found</h3>
<p>Create your first pipeline to get started.</p>
<rh-cta>
<a href="/builder">Create Pipeline</a>
</rh-cta>
</div>
`
: html`
<div class="pipeline-table-container">
<table class="pipeline-table">
<thead>
<tr>
<th
class="sortable ${this.sortColumn === 'name' ? 'active' : ''}"
@click=${() => this.toggleSort('name')}
>
<span class="sort-header">
Name
<rh-icon set="ui" icon=${this.getSortIcon('name')}></rh-icon>
</span>
</th>
<th
class="sortable ${this.sortColumn === 'status' ? 'active' : ''}"
@click=${() => this.toggleSort('status')}
>
<span class="sort-header">
Status
<rh-icon set="ui" icon=${this.getSortIcon('status')}></rh-icon>
</span>
</th>
<th>Steps</th>
<th
class="sortable ${this.sortColumn === 'created' ? 'active' : ''}"
@click=${() => this.toggleSort('created')}
>
<span class="sort-header">
Created
<rh-icon set="ui" icon=${this.getSortIcon('created')}></rh-icon>
</span>
</th>
<th class="actions-cell">Actions</th>
</tr>
</thead>
<tbody>
${this.paginatedPipelines.map(pipeline => this.renderPipelineRow(pipeline))}
</tbody>
</table>
${this.totalPages > 1 ? this.renderPagination() : ''}
</div>
`}
`;
}
private renderPagination() {
const start = (this.currentPage - 1) * this.pageSize + 1;
const end = Math.min(this.currentPage * this.pageSize, this.filteredPipelines.length);
const total = this.filteredPipelines.length;
return html`
<div class="pagination">
<span class="pagination-info"> Showing ${start}-${end} of ${total} pipelines </span>
<div class="pagination-controls">
<button
class="pagination-btn"
@click=${() => this.goToPage(this.currentPage - 1)}
?disabled=${this.currentPage === 1}
aria-label="Previous page"
>
<rh-icon set="ui" icon="caret-left"></rh-icon>
</button>
${this.paginationRange.map(item =>
item === 'ellipsis'
? html`<span class="pagination-ellipsis">...</span>`
: html`
<button
class="pagination-btn ${item === this.currentPage ? 'active' : ''}"
@click=${() => this.goToPage(item)}
aria-label="Page ${item}"
aria-current=${item === this.currentPage ? 'page' : 'false'}
>
${item}
</button>
`
)}
<button
class="pagination-btn"
@click=${() => this.goToPage(this.currentPage + 1)}
?disabled=${this.currentPage === this.totalPages}
aria-label="Next page"
>
<rh-icon set="ui" icon="caret-right"></rh-icon>
</button>
</div>
</div>
`;
}
private renderPipelineRow(pipeline: Pipeline) {
const phase = pipeline.status?.phase || 'Pending';
const steps = pipeline.status?.steps || [];
const specSteps = pipeline.spec.steps || [];
// Merge spec steps with status
const mergedSteps = specSteps.map(specStep => {
const statusStep = steps.find(s => s.name === specStep.name);
return {
name: specStep.name,
phase: statusStep?.phase || 'Pending',
};
});
const stepColor = (stepPhase: string): string => {
switch (stepPhase) {
case 'Succeeded':
return 'green';
case 'Running':
return 'cyan';
case 'Failed':
return 'red';
case 'Skipped':
return 'gray';
default:
return 'gray';
}
};
return html`
<tr
tabindex="0"
@click=${() => this.navigateToPipeline(pipeline)}
@keydown=${(e: KeyboardEvent) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault();
this.navigateToPipeline(pipeline);
}
}}
>
<td>
<div class="pipeline-name-cell">
<rh-tag compact color="purple">pl</rh-tag>
<span class="pipeline-name">${pipeline.metadata.name}</span>
</div>
</td>
<td>
<status-badge status=${phase} size="sm"></status-badge>
</td>
<td>
<div class="steps-inline">
${mergedSteps.map(
step => html`
<rh-tag
compact
color=${stepColor(step.phase)}
class="${step.phase === 'Running' ? 'step-running' : ''}"
title="${step.name}: ${step.phase}"
>
${step.name}
</rh-tag>
`
)}
</div>
</td>
<td>
<span class="created-time">
${pipeline.metadata.creationTimestamp
? this.formatTime(pipeline.metadata.creationTimestamp)
: '-'}
</span>
</td>
<td class="actions-cell">
<button
class="kebab-btn"
@click=${(e: Event) => this.toggleMenu(e, pipeline.metadata.name)}
aria-label="Actions for ${pipeline.metadata.name}"
aria-haspopup="true"
aria-expanded=${this.openMenuId === pipeline.metadata.name}
>
<rh-icon set="ui" icon="ellipsis-vertical"></rh-icon>
</button>
${this.openMenuId === pipeline.metadata.name
? html`
<div class="kebab-menu" role="menu">
<button
class="kebab-menu-item"
role="menuitem"
@click=${(e: Event) => this.copyPipeline(e, pipeline)}
>
<rh-icon set="ui" icon="copy"></rh-icon>
Copy
</button>
<button
class="kebab-menu-item danger"
role="menuitem"
@click=${(e: Event) => this.deletePipeline(e, pipeline)}
>
<rh-icon set="ui" icon="trash"></rh-icon>
Delete
</button>
</div>
`
: ''}
</td>
</tr>
`;
}
}
declare global {
interface HTMLElementTagNameMap {
'pipeline-list': PipelineList;
}
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/components/monitor/step-detail.ts | TypeScript | /**
* Step Detail - Shows detailed information about a pipeline step
* Following RHDS patterns for panels and data display
*/
import { LitElement, html, css, nothing } from 'lit';
import { customElement, property, state } from 'lit/decorators.js';
import { guard } from 'lit/directives/guard.js';
import type { PipelineStep, StepStatus } from '../../types/pipeline.js';
import { k8sClient } from '../../lib/k8s-client.js';
// Number of tail lines to show in logs
const LOG_TAIL_LINES = 50;
@customElement('step-detail')
export class StepDetail extends LitElement {
@property({ type: Object }) step?: PipelineStep;
@property({ type: Object }) status?: StepStatus;
@property({ type: String }) namespace = 'default';
@state() private logs = '';
@state() private logsLoading = false;
@state() private logsError: string | null = null;
@state() private activeTab = 0;
@state() private events: Array<{
type: string;
reason: string;
message: string;
lastTimestamp: string;
}> = [];
// Track if this is the initial load
private initialLogsLoad = true;
// Track the current job name to avoid unnecessary reloads
private currentJobName: string | null = null;
// Polling interval for auto-refresh
private pollInterval: ReturnType<typeof setInterval> | null = null;
static styles = css`
:host {
display: flex;
flex-direction: column;
height: 100%;
min-height: 0;
}
.summary {
display: grid;
grid-template-columns: repeat(2, 1fr);
gap: var(--rh-space-md, 16px);
padding-block-end: var(--rh-space-md, 16px);
margin-block-end: var(--rh-space-md, 16px);
border-block-end: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.summary-item {
display: flex;
flex-direction: column;
gap: 2px;
}
.summary-label {
font-size: var(--rh-font-size-body-text-xs, 0.75rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
text-transform: uppercase;
letter-spacing: 0.5px;
font-weight: var(--rh-font-weight-body-text-medium, 500);
}
.summary-value {
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-weight: var(--rh-font-weight-body-text-medium, 500);
}
.tabs {
display: flex;
margin-block-end: var(--rh-space-md, 16px);
border-block-end: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.tab {
display: inline-flex;
align-items: center;
gap: var(--rh-space-xs, 4px);
padding: var(--rh-space-sm, 8px) var(--rh-space-md, 16px);
background: none;
border: none;
border-block-end: 2px solid transparent;
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
cursor: pointer;
transition: all 150ms ease;
}
.tab:hover {
color: var(--rh-color-text-primary-on-light, #151515);
background: var(--rh-color-surface-lighter, #f5f5f5);
}
.tab:focus-visible {
outline: var(--rh-border-width-md, 2px) solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: -2px;
}
.tab.active {
color: var(--rh-color-interactive-blue-darker, #0066cc);
border-block-end-color: var(--rh-color-interactive-blue-darker, #0066cc);
}
.tab rh-icon {
--rh-icon-size: 14px;
}
.tab-content {
flex: 1;
display: flex;
flex-direction: column;
min-height: 0;
overflow: auto;
}
.logs-wrapper {
flex: 1;
display: flex;
flex-direction: column;
min-height: 0;
}
.logs-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-block-end: var(--rh-space-xs, 4px);
font-size: var(--rh-font-size-body-text-xs, 0.75rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
flex-shrink: 0;
}
.logs-container {
flex: 1;
background: var(--rh-color-gray-90, #1e1e1e);
color: var(--rh-color-gray-10, #f0f0f0);
border-radius: var(--rh-border-radius-default, 3px);
padding: var(--rh-space-md, 16px);
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
font-size: var(--rh-font-size-body-text-xs, 0.75rem);
white-space: pre-wrap;
word-break: break-all;
min-height: 200px;
overflow: auto;
}
.logs-empty {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
gap: var(--rh-space-sm, 8px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
font-style: italic;
text-align: center;
min-height: 200px;
padding: var(--rh-space-lg, 24px);
}
.logs-loading {
display: flex;
align-items: center;
justify-content: center;
gap: var(--rh-space-sm, 8px);
min-height: 200px;
padding: var(--rh-space-lg, 24px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.spec-list {
list-style: none;
padding: 0;
margin: 0;
}
.spec-item {
display: flex;
justify-content: space-between;
padding: var(--rh-space-sm, 8px) 0;
border-block-end: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.spec-item:last-child {
border-block-end: none;
}
.spec-key {
color: var(--rh-color-text-secondary-on-light, #6a6e73);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
}
.spec-value {
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
text-align: end;
word-break: break-all;
max-width: 200px;
}
.events-list {
display: flex;
flex-direction: column;
gap: var(--rh-space-sm, 8px);
}
.event-item {
padding: var(--rh-space-sm, 8px);
background: var(--rh-color-surface-lighter, #f5f5f5);
border-radius: var(--rh-border-radius-default, 3px);
font-size: var(--rh-font-size-body-text-xs, 0.75rem);
}
.event-header {
display: flex;
justify-content: space-between;
margin-block-end: var(--rh-space-xs, 4px);
}
.event-type {
font-weight: var(--rh-font-weight-body-text-medium, 500);
}
.event-type.normal {
color: var(--rh-color-green-600, #3e8635);
}
.event-type.warning {
color: var(--rh-color-yellow-600, #f0ab00);
}
.event-time {
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.event-message {
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.actions {
display: flex;
justify-content: space-between;
gap: var(--rh-space-sm, 8px);
padding-block-start: var(--rh-space-md, 16px);
margin-block-start: var(--rh-space-md, 16px);
border-block-start: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
`;
connectedCallback() {
super.connectedCallback();
// Start polling for logs every 5 seconds
this.pollInterval = setInterval(() => {
if (this.status?.jobName && this.activeTab === 0) {
this.loadLogs();
}
}, 5000);
}
disconnectedCallback() {
super.disconnectedCallback();
// Clean up polling
if (this.pollInterval) {
clearInterval(this.pollInterval);
this.pollInterval = null;
}
}
updated(_changedProps: Map<string, unknown>) {
// Only reload if the job name actually changed (compare strings, not object refs)
const newJobName = this.status?.jobName || null;
if (newJobName !== this.currentJobName) {
this.currentJobName = newJobName;
this.initialLogsLoad = true;
if (newJobName) {
this.loadLogs();
this.loadEvents();
}
}
}
private async loadLogs() {
if (!this.status?.jobName) return;
// Only show loading spinner on initial load
if (this.initialLogsLoad) {
this.logsLoading = true;
this.logsError = null;
}
try {
// Find the pod for this job
const pods = await k8sClient.listPods(this.namespace, `job-name=${this.status.jobName}`);
if (pods.length > 0) {
const newLogs = await k8sClient.getPodLogs(this.namespace, pods[0].metadata.name, {
tailLines: LOG_TAIL_LINES,
});
// Only update if logs have changed
if (newLogs !== this.logs) {
this.logs = newLogs;
// Clear error on successful update
if (this.logsError !== null) {
this.logsError = null;
}
}
} else if (this.logs !== '') {
this.logs = '';
}
} catch (e) {
const errorMsg = e instanceof Error ? e.message : 'Failed to load logs';
// Only update error if it changed
if (this.logsError !== errorMsg) {
this.logsError = errorMsg;
}
} finally {
// Only update loading state if it changed
if (this.logsLoading) {
this.logsLoading = false;
}
this.initialLogsLoad = false;
}
}
private async loadEvents() {
if (!this.status?.jobName) return;
try {
const events = await k8sClient.getEvents(
this.namespace,
`involvedObject.name=${this.status.jobName}`
);
this.events = events;
} catch (e) {
console.warn('Failed to load events:', e);
}
}
private formatDuration(): string {
const start = this.status?.jobStatus?.startTime;
const end = this.status?.jobStatus?.completionTime;
if (!start) return '-';
const startDate = new Date(start);
const endDate = end ? new Date(end) : new Date();
const diffMs = endDate.getTime() - startDate.getTime();
const diffSecs = Math.floor(diffMs / 1000);
const mins = Math.floor(diffSecs / 60);
const secs = diffSecs % 60;
if (mins > 0) return `${mins}m ${secs}s`;
return `${secs}s`;
}
private getImage(): string {
return this.step?.jobSpec.template.spec.containers[0]?.image || '-';
}
private async copyToClipboard(text: string) {
try {
await navigator.clipboard.writeText(text);
} catch (e) {
console.error('Failed to copy:', e);
}
}
render() {
if (!this.step) {
return html`<div class="tab-content">No step selected</div>`;
}
return html`
<div class="summary">
<div class="summary-item">
<span class="summary-label">Status</span>
<span class="summary-value">
<status-badge status=${this.status?.phase || 'Pending'} size="sm"></status-badge>
</span>
</div>
<div class="summary-item">
<span class="summary-label">Duration</span>
<span class="summary-value">${this.formatDuration()}</span>
</div>
<div class="summary-item">
<span class="summary-label">Job</span>
<span class="summary-value">${this.status?.jobName || '-'}</span>
</div>
<div class="summary-item">
<span class="summary-label">Image</span>
<span class="summary-value" title=${this.getImage()}>
${this.getImage().split('/').pop()}
</span>
</div>
</div>
<nav class="tabs" role="tablist">
<button
class="tab ${this.activeTab === 0 ? 'active' : ''}"
role="tab"
aria-selected=${this.activeTab === 0}
@click=${() => (this.activeTab = 0)}
>
<rh-icon set="standard" icon="command-line"></rh-icon>
Logs
</button>
<button
class="tab ${this.activeTab === 1 ? 'active' : ''}"
role="tab"
aria-selected=${this.activeTab === 1}
@click=${() => (this.activeTab = 1)}
>
<rh-icon set="ui" icon="list"></rh-icon>
Spec
</button>
<button
class="tab ${this.activeTab === 2 ? 'active' : ''}"
role="tab"
aria-selected=${this.activeTab === 2}
@click=${() => (this.activeTab = 2)}
>
<rh-icon set="ui" icon="information"></rh-icon>
Debug
</button>
</nav>
<div class="tab-content" role="tabpanel">${this.renderTabContent()}</div>
<footer class="actions">
<rh-button variant="secondary" @click=${this.loadLogs}>
<rh-icon set="ui" icon="refresh" slot="icon"></rh-icon>
Refresh
</rh-button>
<rh-button variant="secondary" @click=${() => this.copyToClipboard(this.logs)}>
<rh-icon set="ui" icon="copy" slot="icon"></rh-icon>
Copy Logs
</rh-button>
</footer>
`;
}
private renderTabContent() {
switch (this.activeTab) {
case 0:
// Guard logs rendering - only re-render when these values change
return guard([this.logs, this.logsLoading, this.logsError], () => this.renderLogs());
case 1:
return this.renderSpec();
case 2:
return this.renderDebug();
default:
return nothing;
}
}
private renderLogs() {
if (this.logsLoading) {
return html`
<div class="logs-loading">
<rh-spinner size="md"></rh-spinner>
<span>Loading logs...</span>
</div>
`;
}
if (this.logsError) {
return html`
<div class="logs-empty">
<rh-icon set="ui" icon="error-fill"></rh-icon>
Error: ${this.logsError}
</div>
`;
}
if (!this.logs) {
return html`
<div class="logs-empty">
<rh-icon set="ui" icon="information"></rh-icon>
No logs available yet
</div>
`;
}
const lineCount = this.logs.split('\n').filter(l => l).length;
return html`
<div class="logs-wrapper">
<div class="logs-header">
<span
>Showing last ${lineCount} line${lineCount !== 1 ? 's' : ''} (tail
${LOG_TAIL_LINES})</span
>
</div>
<div class="logs-container">${this.logs}</div>
</div>
`;
}
private renderSpec() {
const container = this.step?.jobSpec.template.spec.containers[0];
return html`
<ul class="spec-list">
<li class="spec-item">
<span class="spec-key">Image</span>
<span class="spec-value">${container?.image || '-'}</span>
</li>
<li class="spec-item">
<span class="spec-key">Command</span>
<span class="spec-value">${container?.command?.join(' ') || '-'}</span>
</li>
<li class="spec-item">
<span class="spec-key">Restart Policy</span>
<span class="spec-value"
>${this.step?.jobSpec.template.spec.restartPolicy || 'Never'}</span
>
</li>
<li class="spec-item">
<span class="spec-key">Backoff Limit</span>
<span class="spec-value">${this.step?.jobSpec.backoffLimit ?? 6}</span>
</li>
${this.step?.runIf
? html`
<li class="spec-item">
<span class="spec-key">Run If</span>
<span class="spec-value">
${this.step.runIf.condition || 'success'} of ${this.step.runIf.steps.join(', ')}
</span>
</li>
`
: ''}
</ul>
`;
}
private renderDebug() {
return html`
<div class="events-list">
${this.events.length === 0
? html`
<div class="logs-empty">
<rh-icon set="ui" icon="information"></rh-icon>
No events available
</div>
`
: this.events.map(
event => html`
<article class="event-item">
<header class="event-header">
<span class="event-type ${event.type.toLowerCase()}">${event.reason}</span>
<time class="event-time"
>${new Date(event.lastTimestamp).toLocaleTimeString()}</time
>
</header>
<p class="event-message">${event.message}</p>
</article>
`
)}
</div>
`;
}
}
declare global {
interface HTMLElementTagNameMap {
'step-detail': StepDetail;
}
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/components/shared/code-editor.ts | TypeScript | /**
* Code Editor - A CodeMirror-based editor with syntax highlighting
* Supports multiple languages: bash, python, yaml, javascript
*/
import { LitElement, html, css } from 'lit';
import { customElement, property, state } from 'lit/decorators.js';
import { EditorState } from '@codemirror/state';
import {
EditorView,
keymap,
lineNumbers,
highlightActiveLineGutter,
drawSelection,
} from '@codemirror/view';
import { defaultKeymap, history, historyKeymap } from '@codemirror/commands';
import {
syntaxHighlighting,
defaultHighlightStyle,
StreamLanguage,
foldGutter,
indentOnInput,
bracketMatching,
foldKeymap,
} from '@codemirror/language';
import { python } from '@codemirror/lang-python';
import { javascript } from '@codemirror/lang-javascript';
import { yaml } from '@codemirror/lang-yaml';
import { shell } from '@codemirror/legacy-modes/mode/shell';
export type EditorLanguage = 'bash' | 'python' | 'yaml' | 'javascript' | 'text';
const languageLabels: Record<EditorLanguage, string> = {
bash: 'Bash/Shell',
python: 'Python',
yaml: 'YAML',
javascript: 'JavaScript',
text: 'Plain Text',
};
@customElement('code-editor')
export class CodeEditor extends LitElement {
@property({ type: String }) value = '';
@property({ type: String }) language: EditorLanguage = 'bash';
@property({ type: String }) placeholder = '';
@property({ type: Boolean }) readonly = false;
@property({ type: Boolean }) showLanguageSelector = true;
@property({ type: String }) minHeight = '150px';
@state() private editorView?: EditorView;
static styles = css`
:host {
display: block;
width: 100%;
}
.editor-container {
display: flex;
flex-direction: column;
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
overflow: hidden;
background: var(--rh-color-surface-lightest, #ffffff);
}
.editor-container:focus-within {
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
box-shadow: 0 0 0 1px var(--rh-color-interactive-blue-darker, #0066cc);
}
.editor-toolbar {
display: flex;
align-items: center;
justify-content: flex-end;
padding: var(--rh-space-xs, 4px) var(--rh-space-sm, 8px);
background: var(--rh-color-surface-lighter, #f5f5f5);
border-block-end: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
gap: var(--rh-space-sm, 8px);
}
.language-select {
padding: var(--rh-space-xs, 4px) var(--rh-space-sm, 8px);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
font-size: var(--rh-font-size-body-text-xs, 0.75rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
background: var(--rh-color-surface-lightest, #ffffff);
cursor: pointer;
}
.language-select:focus {
outline: none;
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
}
.editor-wrapper {
flex: 1;
min-height: var(--editor-min-height, 150px);
}
.cm-editor {
height: 100%;
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
}
.cm-editor.cm-focused {
outline: none;
}
.cm-scroller {
overflow: auto;
}
.cm-content {
padding: var(--rh-space-sm, 8px) 0;
}
.cm-line {
padding: 0 var(--rh-space-sm, 8px);
}
.cm-gutters {
background: var(--rh-color-surface-lighter, #f5f5f5);
border-inline-end: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.cm-activeLineGutter {
background: var(--rh-color-surface-light, #e0e0e0);
}
.cm-activeLine {
background: rgba(0, 102, 204, 0.05);
}
/* Fold gutter styling */
.cm-foldGutter {
width: 16px;
}
.cm-foldGutter .cm-gutterElement {
cursor: pointer;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
padding: 0 2px;
transition: color 150ms ease;
}
.cm-foldGutter .cm-gutterElement:hover {
color: var(--rh-color-interactive-blue-darker, #0066cc);
}
.cm-foldPlaceholder {
background: var(--rh-color-surface-light, #e0e0e0);
border: none;
padding: 0 4px;
border-radius: 2px;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
cursor: pointer;
}
.cm-foldPlaceholder:hover {
background: var(--rh-color-blue-50, #e7f1fa);
color: var(--rh-color-interactive-blue-darker, #0066cc);
}
/* Syntax highlighting colors */
.cm-editor .cm-content {
caret-color: var(--rh-color-text-primary-on-light, #151515);
}
.cm-editor .cm-cursor {
border-left-color: var(--rh-color-text-primary-on-light, #151515);
}
.cm-editor .cm-selectionBackground,
.cm-editor.cm-focused .cm-selectionBackground {
background: rgba(0, 102, 204, 0.2);
}
`;
private getLanguageExtension() {
switch (this.language) {
case 'python':
return python();
case 'javascript':
return javascript();
case 'yaml':
return yaml();
case 'bash':
return StreamLanguage.define(shell);
case 'text':
default:
return [];
}
}
private createEditorState() {
return EditorState.create({
doc: this.value,
extensions: [
lineNumbers(),
highlightActiveLineGutter(),
foldGutter(),
drawSelection(),
indentOnInput(),
bracketMatching(),
history(),
keymap.of([...defaultKeymap, ...historyKeymap, ...foldKeymap]),
syntaxHighlighting(defaultHighlightStyle),
this.getLanguageExtension(),
EditorView.updateListener.of(update => {
if (update.docChanged) {
const newValue = update.state.doc.toString();
if (newValue !== this.value) {
this.value = newValue;
this.dispatchEvent(
new CustomEvent('change', {
detail: { value: newValue },
bubbles: true,
composed: true,
})
);
}
}
}),
EditorState.readOnly.of(this.readonly),
EditorView.theme({
'&': {
minHeight: this.minHeight,
},
}),
],
});
}
private initEditor() {
const wrapper = this.shadowRoot?.querySelector('.editor-wrapper');
if (!wrapper) return;
// Clean up existing editor
if (this.editorView) {
this.editorView.destroy();
}
this.editorView = new EditorView({
state: this.createEditorState(),
parent: wrapper as HTMLElement,
});
}
firstUpdated() {
this.style.setProperty('--editor-min-height', this.minHeight);
this.initEditor();
}
updated(changedProperties: Map<string, unknown>) {
if (changedProperties.has('language')) {
// Recreate editor with new language
this.initEditor();
}
if (changedProperties.has('value') && this.editorView) {
const currentValue = this.editorView.state.doc.toString();
if (currentValue !== this.value) {
this.editorView.dispatch({
changes: {
from: 0,
to: currentValue.length,
insert: this.value,
},
});
}
}
if (changedProperties.has('minHeight')) {
this.style.setProperty('--editor-min-height', this.minHeight);
}
}
disconnectedCallback() {
super.disconnectedCallback();
if (this.editorView) {
this.editorView.destroy();
}
}
private handleLanguageChange(e: Event) {
const select = e.target as HTMLSelectElement;
this.language = select.value as EditorLanguage;
this.dispatchEvent(
new CustomEvent('language-change', {
detail: { language: this.language },
bubbles: true,
composed: true,
})
);
}
render() {
return html`
<div class="editor-container">
${this.showLanguageSelector
? html`
<div class="editor-toolbar">
<select
class="language-select"
.value=${this.language}
@change=${this.handleLanguageChange}
aria-label="Select language"
>
${Object.entries(languageLabels).map(
([value, label]) => html`
<option value=${value} ?selected=${value === this.language}>${label}</option>
`
)}
</select>
</div>
`
: ''}
<div class="editor-wrapper"></div>
</div>
`;
}
}
declare global {
interface HTMLElementTagNameMap {
'code-editor': CodeEditor;
}
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/components/shared/side-drawer.ts | TypeScript | /**
* Side Drawer - A drawer that slides in from the right
* Used for editing step details and global settings
*/
import { LitElement, html, css } from 'lit';
import { customElement, property } from 'lit/decorators.js';
@customElement('side-drawer')
export class SideDrawer extends LitElement {
@property({ type: Boolean, reflect: true }) open = false;
@property({ type: Boolean, reflect: true }) wide = false;
@property({ type: String }) heading = '';
@property({ type: Boolean }) showWidthToggle = false;
static styles = css`
:host {
display: block;
}
.overlay {
position: fixed;
inset: 0;
background: rgba(0, 0, 0, 0.5);
opacity: 0;
visibility: hidden;
transition:
opacity 200ms ease,
visibility 200ms ease;
z-index: 200;
}
:host([open]) .overlay {
opacity: 1;
visibility: visible;
}
.drawer {
position: fixed;
top: 0;
right: 0;
bottom: 0;
width: 400px;
max-width: 100vw;
background: var(--rh-color-surface-lightest, #ffffff);
border-inline-start: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
box-shadow: var(--rh-box-shadow-lg, -4px 0 15px rgba(0, 0, 0, 0.15));
transform: translateX(100%);
transition:
transform 250ms ease,
width 250ms ease;
z-index: 201;
display: flex;
flex-direction: column;
}
:host([open]) .drawer {
transform: translateX(0);
}
:host([wide]) .drawer {
width: 800px;
}
.drawer-header {
display: flex;
align-items: center;
gap: var(--rh-space-md, 16px);
padding: var(--rh-space-md, 16px) var(--rh-space-lg, 24px);
border-block-end: 1px solid var(--rh-color-border-subtle-on-light, #d2d2d2);
background: var(--rh-color-surface-lighter, #f5f5f5);
}
.close-btn {
display: inline-flex;
align-items: center;
justify-content: center;
width: 32px;
height: 32px;
padding: 0;
border: none;
background: transparent;
cursor: pointer;
border-radius: var(--rh-border-radius-default, 3px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
transition:
background-color 150ms ease,
color 150ms ease;
}
.close-btn:hover {
background: var(--rh-color-surface-light, #e0e0e0);
color: var(--rh-color-text-primary-on-light, #151515);
}
.close-btn:focus-visible {
outline: 2px solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: 2px;
}
.close-btn rh-icon {
--rh-icon-size: 20px;
}
.width-toggle-btn {
display: inline-flex;
align-items: center;
justify-content: center;
width: 32px;
height: 32px;
padding: 0;
border: none;
background: transparent;
cursor: pointer;
border-radius: var(--rh-border-radius-default, 3px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
transition:
background-color 150ms ease,
color 150ms ease;
}
.width-toggle-btn:hover {
background: var(--rh-color-surface-light, #e0e0e0);
color: var(--rh-color-text-primary-on-light, #151515);
}
.width-toggle-btn:focus-visible {
outline: 2px solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: 2px;
}
.width-toggle-btn.active {
background: var(--rh-color-blue-50, #e7f1fa);
color: var(--rh-color-interactive-blue-darker, #0066cc);
}
.width-toggle-btn rh-icon {
--rh-icon-size: 18px;
}
.drawer-title {
flex: 1;
margin: 0;
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
font-size: var(--rh-font-size-heading-xs, 1.125rem);
font-weight: var(--rh-font-weight-heading-medium, 500);
color: var(--rh-color-text-primary-on-light, #151515);
}
.drawer-content {
flex: 1;
display: flex;
flex-direction: column;
padding: var(--rh-space-lg, 24px);
overflow-y: auto;
min-height: 0;
}
::slotted(*) {
flex: 1;
min-height: 0;
}
@media (max-width: 480px) {
.drawer {
width: 100vw;
}
}
`;
private handleClose() {
this.open = false;
this.dispatchEvent(new CustomEvent('close'));
}
private toggleWidth() {
this.wide = !this.wide;
}
private handleOverlayClick(e: Event) {
if (e.target === e.currentTarget) {
this.handleClose();
}
}
private handleKeyDown(e: KeyboardEvent) {
if (e.key === 'Escape') {
this.handleClose();
}
}
connectedCallback() {
super.connectedCallback();
document.addEventListener('keydown', this.handleKeyDown.bind(this));
}
disconnectedCallback() {
super.disconnectedCallback();
document.removeEventListener('keydown', this.handleKeyDown.bind(this));
}
render() {
return html`
<div class="overlay" @click=${this.handleOverlayClick}></div>
<aside class="drawer" role="dialog" aria-modal="true" aria-label=${this.heading}>
<header class="drawer-header">
<button class="close-btn" @click=${this.handleClose} aria-label="Close drawer">
<rh-icon set="ui" icon="close"></rh-icon>
</button>
<h2 class="drawer-title">${this.heading}</h2>
${this.showWidthToggle
? html`
<button
class="width-toggle-btn ${this.wide ? 'active' : ''}"
@click=${this.toggleWidth}
aria-label="${this.wide ? 'Collapse drawer' : 'Expand drawer'}"
title="${this.wide ? 'Collapse drawer' : 'Expand drawer'}"
>
<rh-icon set="ui" icon="${this.wide ? 'caret-right' : 'caret-left'}"></rh-icon>
</button>
`
: ''}
</header>
<div class="drawer-content">
<slot></slot>
</div>
</aside>
`;
}
}
declare global {
interface HTMLElementTagNameMap {
'side-drawer': SideDrawer;
}
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/components/shared/status-badge.ts | TypeScript | /**
* Status Badge - Displays pipeline/step status with consistent styling
* Uses RHDS rh-tag component
*/
import { LitElement, html, css } from 'lit';
import { customElement, property } from 'lit/decorators.js';
import type { PipelinePhase, StepPhase } from '../../types/pipeline.js';
type StatusType = PipelinePhase | StepPhase;
type TagColor = 'gray' | 'blue' | 'green' | 'cyan' | 'orange' | 'red' | 'purple' | 'teal';
interface StatusConfig {
color: TagColor;
label: string;
}
const STATUS_CONFIG: Record<StatusType, StatusConfig> = {
Pending: {
color: 'gray',
label: 'Pending',
},
Running: {
color: 'cyan',
label: 'Running',
},
Succeeded: {
color: 'green',
label: 'Succeeded',
},
Failed: {
color: 'red',
label: 'Failed',
},
Skipped: {
color: 'gray',
label: 'Skipped',
},
Suspended: {
color: 'orange',
label: 'Suspended',
},
};
@customElement('status-badge')
export class StatusBadge extends LitElement {
@property({ type: String }) status: StatusType = 'Pending';
@property({ type: String }) size: 'sm' | 'md' | 'lg' = 'md';
static styles = css`
:host {
display: inline-flex;
}
@keyframes pulse {
0%,
100% {
opacity: 1;
}
50% {
opacity: 0.6;
}
}
.running {
animation: pulse 1.5s ease-in-out infinite;
}
`;
private getConfig(): StatusConfig {
return STATUS_CONFIG[this.status] || STATUS_CONFIG.Pending;
}
render() {
const config = this.getConfig();
const isCompact = this.size === 'sm';
const isRunning = this.status === 'Running';
return html`
<rh-tag
?compact=${isCompact}
color=${config.color}
class="${isRunning ? 'running' : ''}"
role="status"
>
${config.label}
</rh-tag>
`;
}
}
declare global {
interface HTMLElementTagNameMap {
'status-badge': StatusBadge;
}
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/components/storage/pvc-list.ts | TypeScript | /**
* PVC List - Displays all PersistentVolumeClaims with management
* Following RHDS card and list patterns
*/
import { LitElement, html, css } from 'lit';
import { customElement, state } from 'lit/decorators.js';
import { k8sClient } from '../../lib/k8s-client.js';
interface PVC {
metadata: { name: string; namespace: string; creationTimestamp?: string };
spec: {
accessModes?: string[];
storageClassName?: string;
volumeMode?: string;
resources?: { requests?: { storage?: string } };
};
status: { phase: string };
}
interface StorageClass {
metadata: { name: string };
provisioner: string;
}
type SortDirection = 'asc' | 'desc';
@customElement('pvc-list')
export class PVCList extends LitElement {
@state() private pvcs: PVC[] = [];
@state() private storageClasses: StorageClass[] = [];
@state() private loading = true;
@state() private error: string | null = null;
@state() private namespace = 'default';
@state() private showCreateModal = false;
@state() private creating = false;
@state() private createError: string | null = null;
@state() private searchQuery = '';
@state() private sortDirection: SortDirection = 'asc';
@state() private currentPage = 1;
@state() private pageSize = 10;
// Create form state
@state() private newPvcName = '';
@state() private newPvcSize = '1';
@state() private newPvcSizeUnit = 'Gi';
@state() private newPvcVolumeMode = 'Filesystem';
@state() private newPvcStorageClass = '';
@state() private newPvcAccessMode = 'ReadWriteOnce';
private pollInterval?: ReturnType<typeof setInterval>;
static styles = css`
:host {
display: block;
}
.header {
display: flex;
justify-content: space-between;
align-items: center;
margin-block-end: var(--rh-space-xl, 32px);
flex-wrap: wrap;
gap: var(--rh-space-md, 16px);
}
h1 {
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
font-size: var(--rh-font-size-heading-lg, 1.75rem);
font-weight: var(--rh-font-weight-heading-medium, 500);
margin: 0;
color: var(--rh-color-text-primary-on-light, #151515);
}
.controls {
display: flex;
gap: var(--rh-space-md, 16px);
align-items: center;
}
.search-input {
padding: var(--rh-space-sm, 8px) var(--rh-space-md, 16px);
padding-inline-start: var(--rh-space-xl, 32px);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
font-size: var(--rh-font-size-body-text-md, 1rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
min-width: 200px;
background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='16' height='16' viewBox='0 0 24 24' fill='none' stroke='%236a6e73' stroke-width='2'%3E%3Ccircle cx='11' cy='11' r='8'/%3E%3Cpath d='m21 21-4.35-4.35'/%3E%3C/svg%3E");
background-repeat: no-repeat;
background-position: var(--rh-space-sm, 8px) center;
transition:
border-color 150ms ease,
box-shadow 150ms ease;
}
.search-input:focus {
outline: none;
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
box-shadow: 0 0 0 1px var(--rh-color-interactive-blue-darker, #0066cc);
}
.pvc-table-container {
background: var(--rh-color-surface-lightest, #ffffff);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
}
.pvc-table {
width: 100%;
border-collapse: collapse;
}
.pvc-table th,
.pvc-table td {
padding: var(--rh-space-md, 16px);
text-align: start;
border-block-end: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.pvc-table th {
background: var(--rh-color-surface-lighter, #f5f5f5);
font-weight: var(--rh-font-weight-body-text-medium, 500);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
white-space: nowrap;
}
.pvc-table th.sortable {
cursor: pointer;
user-select: none;
transition: background-color 150ms ease;
}
.pvc-table th.sortable:hover {
background: var(--rh-color-surface-light, #e0e0e0);
}
.pvc-table th.sortable .sort-header {
display: inline-flex;
align-items: center;
gap: var(--rh-space-xs, 4px);
}
.pvc-table th.sortable rh-icon {
--rh-icon-size: 14px;
opacity: 0.7;
}
.pvc-table tbody tr:last-child td {
border-block-end: none;
}
.pvc-name-cell {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
}
.pvc-name-cell rh-tag {
text-transform: uppercase;
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
}
.pvc-name {
font-weight: var(--rh-font-weight-body-text-medium, 500);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
color: var(--rh-color-text-primary-on-light, #151515);
}
.created-time {
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.loading-container {
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
padding: var(--rh-space-2xl, 48px);
gap: var(--rh-space-md, 16px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.error-container {
padding: var(--rh-space-lg, 24px);
background: var(--rh-color-red-100, #fce8e6);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-red-500, #c9190b);
border-radius: var(--rh-border-radius-default, 3px);
}
.error-container h4 {
margin: 0 0 var(--rh-space-sm, 8px) 0;
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
color: var(--rh-color-red-700, #a30d05);
}
.error-container p {
margin: 0 0 var(--rh-space-md, 16px) 0;
color: var(--rh-color-red-700, #a30d05);
}
.empty-state {
text-align: center;
padding: var(--rh-space-2xl, 48px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.empty-state rh-icon {
--rh-icon-size: 48px;
color: var(--rh-color-gray-40, #8a8d90);
margin-block-end: var(--rh-space-md, 16px);
}
.empty-state h3 {
margin: 0 0 var(--rh-space-md, 16px) 0;
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
color: var(--rh-color-text-primary-on-light, #151515);
}
.empty-state p {
margin: 0 0 var(--rh-space-lg, 24px) 0;
}
.actions-cell {
width: 48px;
text-align: center;
}
.delete-btn {
display: inline-flex;
align-items: center;
justify-content: center;
width: 32px;
height: 32px;
padding: 0;
background: none;
border: none;
border-radius: var(--rh-border-radius-default, 3px);
cursor: pointer;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
transition:
background-color 150ms ease,
color 150ms ease;
}
.delete-btn:hover {
background: var(--rh-color-red-100, #fce8e6);
color: var(--rh-color-red-700, #a30d05);
}
.delete-btn:focus-visible {
outline: 2px solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: 2px;
}
.delete-btn rh-icon {
--rh-icon-size: 18px;
}
/* Modal styles */
.modal-overlay {
position: fixed;
top: 0;
left: 0;
right: 0;
bottom: 0;
background: rgba(21, 21, 21, 0.5);
display: flex;
align-items: center;
justify-content: center;
z-index: 1000;
}
.modal {
background: var(--rh-color-surface-lightest, #ffffff);
border-radius: var(--rh-border-radius-default, 3px);
box-shadow: var(--rh-box-shadow-lg, 0 10px 15px -3px rgba(21, 21, 21, 0.1));
width: 100%;
max-width: 500px;
max-height: 90vh;
overflow-y: auto;
}
.modal-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: var(--rh-space-lg, 24px);
border-block-end: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.modal-header h2 {
margin: 0;
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
font-size: var(--rh-font-size-heading-md, 1.5rem);
font-weight: var(--rh-font-weight-heading-medium, 500);
}
.close-btn {
display: inline-flex;
align-items: center;
justify-content: center;
width: 32px;
height: 32px;
padding: 0;
background: none;
border: none;
border-radius: var(--rh-border-radius-default, 3px);
cursor: pointer;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
transition: background-color 150ms ease;
}
.close-btn:hover {
background: var(--rh-color-surface-lighter, #f5f5f5);
}
.close-btn rh-icon {
--rh-icon-size: 20px;
}
.modal-body {
padding: var(--rh-space-lg, 24px);
}
.form-group {
margin-block-end: var(--rh-space-md, 16px);
}
.form-group:last-child {
margin-block-end: 0;
}
.form-group label {
display: block;
margin-block-end: var(--rh-space-xs, 4px);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-weight: var(--rh-font-weight-body-text-medium, 500);
color: var(--rh-color-text-primary-on-light, #151515);
}
.form-group .label-hint {
font-weight: normal;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
font-size: var(--rh-font-size-body-text-xs, 0.75rem);
}
.form-group input,
.form-group select {
width: 100%;
padding: var(--rh-space-sm, 8px);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
box-sizing: border-box;
}
.form-group input:focus,
.form-group select:focus {
outline: none;
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
box-shadow: 0 0 0 1px var(--rh-color-interactive-blue-darker, #0066cc);
}
.size-row {
display: grid;
grid-template-columns: 1fr auto;
gap: var(--rh-space-sm, 8px);
}
.size-row select {
width: auto;
}
.modal-footer {
display: flex;
justify-content: flex-end;
gap: var(--rh-space-sm, 8px);
padding: var(--rh-space-lg, 24px);
border-block-start: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.form-error {
padding: var(--rh-space-sm, 8px) var(--rh-space-md, 16px);
background: var(--rh-color-red-100, #fce8e6);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-red-500, #c9190b);
border-radius: var(--rh-border-radius-default, 3px);
color: var(--rh-color-red-700, #a30d05);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
margin-block-end: var(--rh-space-md, 16px);
}
.storage-class-hint {
font-size: var(--rh-font-size-body-text-xs, 0.75rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
margin-block-start: var(--rh-space-xs, 4px);
}
/* Pagination styles */
.pagination {
display: flex;
justify-content: space-between;
align-items: center;
padding: var(--rh-space-md, 16px);
border-block-start: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
background: var(--rh-color-surface-lighter, #f5f5f5);
border-radius: 0 0 var(--rh-border-radius-default, 3px) var(--rh-border-radius-default, 3px);
}
.pagination-info {
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.pagination-controls {
display: flex;
align-items: center;
gap: var(--rh-space-xs, 4px);
}
.pagination-btn {
display: inline-flex;
align-items: center;
justify-content: center;
min-width: 32px;
height: 32px;
padding: 0 var(--rh-space-sm, 8px);
background: var(--rh-color-surface-lightest, #ffffff);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
cursor: pointer;
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
color: var(--rh-color-text-primary-on-light, #151515);
transition: all 150ms ease;
}
.pagination-btn:hover:not(:disabled) {
background: var(--rh-color-surface-light, #e0e0e0);
border-color: var(--rh-color-border-strong-on-light, #8a8d90);
}
.pagination-btn:disabled {
opacity: 0.5;
cursor: not-allowed;
}
.pagination-btn.active {
background: var(--rh-color-interactive-blue-darker, #0066cc);
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
color: var(--rh-color-text-primary-on-dark, #ffffff);
}
.pagination-btn rh-icon {
--rh-icon-size: 16px;
}
.pagination-ellipsis {
padding: 0 var(--rh-space-xs, 4px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
`;
connectedCallback() {
super.connectedCallback();
this.loadData();
// Poll for updates every 10 seconds
this.pollInterval = setInterval(() => this.loadPVCs(), 10000);
// Listen for namespace changes
window.addEventListener('namespace-change', ((e: CustomEvent) => {
this.namespace = e.detail.namespace;
this.loadPVCs();
}) as EventListener);
}
disconnectedCallback() {
super.disconnectedCallback();
if (this.pollInterval) {
clearInterval(this.pollInterval);
}
}
private async loadData() {
await Promise.all([this.loadPVCs(), this.loadStorageClasses()]);
}
private async loadPVCs() {
try {
this.pvcs = await k8sClient.listPVCs(this.namespace);
this.error = null;
} catch (e) {
this.error = e instanceof Error ? e.message : 'Failed to load PVCs';
} finally {
this.loading = false;
}
}
private async loadStorageClasses() {
try {
this.storageClasses = await k8sClient.listStorageClasses();
} catch (e) {
console.warn('Could not load storage classes:', e);
}
}
private formatTime(isoString: string): string {
const date = new Date(isoString);
const now = new Date();
const diffMs = now.getTime() - date.getTime();
const diffMins = Math.floor(diffMs / 60000);
const diffHours = Math.floor(diffMins / 60);
const diffDays = Math.floor(diffHours / 24);
if (diffMins < 1) return 'just now';
if (diffMins < 60) return `${diffMins}m ago`;
if (diffHours < 24) return `${diffHours}h ago`;
if (diffDays < 7) return `${diffDays}d ago`;
return date.toLocaleDateString();
}
private getPhaseColor(phase: string): string {
switch (phase) {
case 'Bound':
return 'green';
case 'Pending':
return 'orange';
case 'Lost':
return 'red';
default:
return 'gray';
}
}
private get filteredPVCs(): PVC[] {
let result = this.pvcs;
// Filter by search query
if (this.searchQuery) {
const query = this.searchQuery.toLowerCase();
result = result.filter(p => p.metadata.name.toLowerCase().includes(query));
}
// Sort by name
result = [...result].sort((a, b) => {
const comparison = a.metadata.name.localeCompare(b.metadata.name);
return this.sortDirection === 'asc' ? comparison : -comparison;
});
return result;
}
private get totalPages(): number {
return Math.ceil(this.filteredPVCs.length / this.pageSize);
}
private get paginatedPVCs(): PVC[] {
const start = (this.currentPage - 1) * this.pageSize;
const end = start + this.pageSize;
return this.filteredPVCs.slice(start, end);
}
private get paginationRange(): (number | 'ellipsis')[] {
const total = this.totalPages;
const current = this.currentPage;
const range: (number | 'ellipsis')[] = [];
if (total <= 7) {
for (let i = 1; i <= total; i++) range.push(i);
} else {
range.push(1);
if (current > 3) range.push('ellipsis');
for (let i = Math.max(2, current - 1); i <= Math.min(total - 1, current + 1); i++) {
range.push(i);
}
if (current < total - 2) range.push('ellipsis');
range.push(total);
}
return range;
}
private goToPage(page: number) {
if (page >= 1 && page <= this.totalPages) {
this.currentPage = page;
}
}
private handleSearchInput(e: Event) {
this.searchQuery = (e.target as HTMLInputElement).value;
this.currentPage = 1; // Reset to first page on search
}
private toggleSort() {
this.sortDirection = this.sortDirection === 'asc' ? 'desc' : 'asc';
}
private openCreateModal() {
this.showCreateModal = true;
this.createError = null;
// Reset form
this.newPvcName = '';
this.newPvcSize = '1';
this.newPvcSizeUnit = 'Gi';
this.newPvcVolumeMode = 'Filesystem';
this.newPvcStorageClass = '';
this.newPvcAccessMode = 'ReadWriteOnce';
}
private closeCreateModal() {
this.showCreateModal = false;
this.createError = null;
}
private async createPVC() {
if (!this.newPvcName.trim()) {
this.createError = 'PVC name is required';
return;
}
// Validate name (DNS subdomain name)
const nameRegex = /^[a-z0-9]([-a-z0-9]*[a-z0-9])?$/;
if (!nameRegex.test(this.newPvcName)) {
this.createError =
'Name must consist of lowercase alphanumeric characters or "-", and must start and end with an alphanumeric character';
return;
}
this.creating = true;
this.createError = null;
try {
await k8sClient.createPVC(this.namespace, {
name: this.newPvcName.trim(),
storageClassName: this.newPvcStorageClass || undefined,
accessModes: [this.newPvcAccessMode],
volumeMode: this.newPvcVolumeMode,
storage: `${this.newPvcSize}${this.newPvcSizeUnit}`,
});
this.closeCreateModal();
await this.loadPVCs();
} catch (e) {
this.createError = e instanceof Error ? e.message : 'Failed to create PVC';
} finally {
this.creating = false;
}
}
private async deletePVC(pvc: PVC) {
const confirmed = confirm(`Are you sure you want to delete PVC "${pvc.metadata.name}"?`);
if (!confirmed) return;
try {
await k8sClient.deletePVC(pvc.metadata.namespace || this.namespace, pvc.metadata.name);
await this.loadPVCs();
} catch (err) {
alert(`Failed to delete PVC: ${err instanceof Error ? err.message : 'Unknown error'}`);
}
}
render() {
if (this.loading) {
return html`
<div class="loading-container">
<rh-spinner size="lg"></rh-spinner>
<span>Loading PVCs...</span>
</div>
`;
}
if (this.error) {
return html`
<div class="error-container">
<h4>
<rh-icon set="ui" icon="error-fill"></rh-icon>
Error loading PVCs
</h4>
<p>${this.error}</p>
<rh-button @click=${this.loadPVCs}>
<rh-icon set="ui" icon="refresh" slot="icon"></rh-icon>
Retry
</rh-button>
</div>
`;
}
return html`
<div class="header">
<h1>Persistent Volume Claims</h1>
<div class="controls">
<input
type="search"
class="search-input"
placeholder="Search PVCs..."
.value=${this.searchQuery}
@input=${this.handleSearchInput}
aria-label="Search PVCs"
/>
<rh-button @click=${this.openCreateModal}>
<rh-icon set="ui" icon="add-circle" slot="icon"></rh-icon>
Create PVC
</rh-button>
</div>
</div>
${this.filteredPVCs.length === 0
? html`
<div class="empty-state">
<rh-icon set="standard" icon="data"></rh-icon>
<h3>${this.searchQuery ? 'No matching PVCs' : 'No PVCs found'}</h3>
<p>
${this.searchQuery
? 'Try adjusting your search query.'
: 'Create a persistent volume claim to store data.'}
</p>
${!this.searchQuery
? html`
<rh-cta>
<a
href="#"
@click=${(e: Event) => {
e.preventDefault();
this.openCreateModal();
}}
>Create PVC</a
>
</rh-cta>
`
: ''}
</div>
`
: html`
<div class="pvc-table-container">
<table class="pvc-table">
<thead>
<tr>
<th class="sortable" @click=${this.toggleSort}>
<span class="sort-header">
Name
<rh-icon
set="ui"
icon="${this.sortDirection === 'asc' ? 'arrow-up' : 'arrow-down'}"
></rh-icon>
</span>
</th>
<th>Status</th>
<th>Size</th>
<th>Access Mode</th>
<th>Volume Mode</th>
<th>Storage Class</th>
<th>Created</th>
<th class="actions-cell">Actions</th>
</tr>
</thead>
<tbody>
${this.paginatedPVCs.map(pvc => this.renderPVCRow(pvc))}
</tbody>
</table>
${this.totalPages > 1 ? this.renderPagination() : ''}
</div>
`}
${this.showCreateModal ? this.renderCreateModal() : ''}
`;
}
private renderPagination() {
const start = (this.currentPage - 1) * this.pageSize + 1;
const end = Math.min(this.currentPage * this.pageSize, this.filteredPVCs.length);
const total = this.filteredPVCs.length;
return html`
<div class="pagination">
<span class="pagination-info"> Showing ${start}-${end} of ${total} PVCs </span>
<div class="pagination-controls">
<button
class="pagination-btn"
@click=${() => this.goToPage(this.currentPage - 1)}
?disabled=${this.currentPage === 1}
aria-label="Previous page"
>
<rh-icon set="ui" icon="caret-left"></rh-icon>
</button>
${this.paginationRange.map(item =>
item === 'ellipsis'
? html`<span class="pagination-ellipsis">...</span>`
: html`
<button
class="pagination-btn ${item === this.currentPage ? 'active' : ''}"
@click=${() => this.goToPage(item)}
aria-label="Page ${item}"
aria-current=${item === this.currentPage ? 'page' : 'false'}
>
${item}
</button>
`
)}
<button
class="pagination-btn"
@click=${() => this.goToPage(this.currentPage + 1)}
?disabled=${this.currentPage === this.totalPages}
aria-label="Next page"
>
<rh-icon set="ui" icon="caret-right"></rh-icon>
</button>
</div>
</div>
`;
}
private renderPVCRow(pvc: PVC) {
return html`
<tr>
<td>
<div class="pvc-name-cell">
<rh-tag compact color="teal">pvc</rh-tag>
<span class="pvc-name">${pvc.metadata.name}</span>
</div>
</td>
<td>
<rh-tag compact color=${this.getPhaseColor(pvc.status.phase)}>
${pvc.status.phase}
</rh-tag>
</td>
<td>${pvc.spec.resources?.requests?.storage || '-'}</td>
<td>${pvc.spec.accessModes?.join(', ') || '-'}</td>
<td>${pvc.spec.volumeMode || 'Filesystem'}</td>
<td>${pvc.spec.storageClassName || '(default)'}</td>
<td>
<span class="created-time">
${pvc.metadata.creationTimestamp
? this.formatTime(pvc.metadata.creationTimestamp)
: '-'}
</span>
</td>
<td class="actions-cell">
<button
class="delete-btn"
@click=${() => this.deletePVC(pvc)}
title="Delete PVC"
aria-label="Delete ${pvc.metadata.name}"
>
<rh-icon set="ui" icon="trash"></rh-icon>
</button>
</td>
</tr>
`;
}
private renderCreateModal() {
return html`
<div
class="modal-overlay"
@click=${(e: Event) => {
if (e.target === e.currentTarget) this.closeCreateModal();
}}
>
<div class="modal" role="dialog" aria-modal="true" aria-labelledby="modal-title">
<div class="modal-header">
<h2 id="modal-title">Create Persistent Volume Claim</h2>
<button class="close-btn" @click=${this.closeCreateModal} aria-label="Close">
<rh-icon set="ui" icon="close"></rh-icon>
</button>
</div>
<div class="modal-body">
${this.createError ? html` <div class="form-error">${this.createError}</div> ` : ''}
<div class="form-group">
<label for="pvc-name">Name *</label>
<input
type="text"
id="pvc-name"
.value=${this.newPvcName}
@input=${(e: Event) => (this.newPvcName = (e.target as HTMLInputElement).value)}
placeholder="my-pvc"
/>
</div>
<div class="form-group">
<label for="pvc-size">Size *</label>
<div class="size-row">
<input
type="number"
id="pvc-size"
min="1"
.value=${this.newPvcSize}
@input=${(e: Event) => (this.newPvcSize = (e.target as HTMLInputElement).value)}
/>
<select
id="pvc-size-unit"
.value=${this.newPvcSizeUnit}
@change=${(e: Event) =>
(this.newPvcSizeUnit = (e.target as HTMLSelectElement).value)}
>
<option value="Mi">Mi</option>
<option value="Gi" selected>Gi</option>
<option value="Ti">Ti</option>
</select>
</div>
</div>
<div class="form-group">
<label for="pvc-volume-mode">Volume Mode</label>
<select
id="pvc-volume-mode"
.value=${this.newPvcVolumeMode}
@change=${(e: Event) =>
(this.newPvcVolumeMode = (e.target as HTMLSelectElement).value)}
>
<option value="Filesystem" selected>Filesystem</option>
<option value="Block">Block</option>
</select>
</div>
<div class="form-group">
<label for="pvc-access-mode">Access Mode</label>
<select
id="pvc-access-mode"
.value=${this.newPvcAccessMode}
@change=${(e: Event) =>
(this.newPvcAccessMode = (e.target as HTMLSelectElement).value)}
>
<option value="ReadWriteOnce" selected>ReadWriteOnce (RWO)</option>
<option value="ReadOnlyMany">ReadOnlyMany (ROX)</option>
<option value="ReadWriteMany">ReadWriteMany (RWX)</option>
<option value="ReadWriteOncePod">ReadWriteOncePod (RWOP)</option>
</select>
</div>
<div class="form-group">
<label for="pvc-storage-class">
Storage Class
<span class="label-hint">(optional)</span>
</label>
<select
id="pvc-storage-class"
.value=${this.newPvcStorageClass}
@change=${(e: Event) =>
(this.newPvcStorageClass = (e.target as HTMLSelectElement).value)}
>
<option value="">(cluster default)</option>
${this.storageClasses.map(
sc => html` <option value=${sc.metadata.name}>${sc.metadata.name}</option> `
)}
</select>
${this.storageClasses.length === 0
? html` <div class="storage-class-hint">No storage classes found in cluster</div> `
: ''}
</div>
</div>
<div class="modal-footer">
<rh-button
variant="secondary"
@click=${this.closeCreateModal}
?disabled=${this.creating}
>
Cancel
</rh-button>
<rh-button @click=${this.createPVC} ?disabled=${this.creating}>
${this.creating ? 'Creating...' : 'Create PVC'}
</rh-button>
</div>
</div>
</div>
`;
}
}
declare global {
interface HTMLElementTagNameMap {
'pvc-list': PVCList;
}
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/components/storage/secret-list.ts | TypeScript | /**
* Secret List - Displays all Secrets with management
* Following RHDS card and list patterns
*/
import { LitElement, html, css } from 'lit';
import { customElement, state } from 'lit/decorators.js';
import { k8sClient } from '../../lib/k8s-client.js';
interface Secret {
metadata: { name: string; namespace: string; creationTimestamp?: string };
type: string;
data?: Record<string, string>;
}
interface KeyValuePair {
key: string;
value: string;
revealed: boolean;
}
type SortDirection = 'asc' | 'desc';
@customElement('secret-list')
export class SecretList extends LitElement {
@state() private secrets: Secret[] = [];
@state() private loading = true;
@state() private error: string | null = null;
@state() private namespace = 'default';
@state() private searchQuery = '';
@state() private sortDirection: SortDirection = 'asc';
@state() private currentPage = 1;
@state() private pageSize = 10;
// Create modal state
@state() private showCreateModal = false;
@state() private creating = false;
@state() private createError: string | null = null;
@state() private newSecretName = '';
@state() private newSecretPairs: KeyValuePair[] = [{ key: '', value: '', revealed: false }];
// View modal state
@state() private showViewModal = false;
@state() private viewingSecret: Secret | null = null;
@state() private revealedKeys: Set<string> = new Set();
// Edit modal state
@state() private showEditModal = false;
@state() private editingSecret: Secret | null = null;
@state() private editSecretPairs: KeyValuePair[] = [];
@state() private editing = false;
@state() private editError: string | null = null;
private pollInterval?: ReturnType<typeof setInterval>;
static styles = css`
:host {
display: block;
}
.header {
display: flex;
justify-content: space-between;
align-items: center;
margin-block-end: var(--rh-space-xl, 32px);
flex-wrap: wrap;
gap: var(--rh-space-md, 16px);
}
h1 {
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
font-size: var(--rh-font-size-heading-lg, 1.75rem);
font-weight: var(--rh-font-weight-heading-medium, 500);
margin: 0;
color: var(--rh-color-text-primary-on-light, #151515);
}
.controls {
display: flex;
gap: var(--rh-space-md, 16px);
align-items: center;
}
.search-input {
padding: var(--rh-space-sm, 8px) var(--rh-space-md, 16px);
padding-inline-start: var(--rh-space-xl, 32px);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
font-size: var(--rh-font-size-body-text-md, 1rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
min-width: 200px;
background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='16' height='16' viewBox='0 0 24 24' fill='none' stroke='%236a6e73' stroke-width='2'%3E%3Ccircle cx='11' cy='11' r='8'/%3E%3Cpath d='m21 21-4.35-4.35'/%3E%3C/svg%3E");
background-repeat: no-repeat;
background-position: var(--rh-space-sm, 8px) center;
transition:
border-color 150ms ease,
box-shadow 150ms ease;
}
.search-input:focus {
outline: none;
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
box-shadow: 0 0 0 1px var(--rh-color-interactive-blue-darker, #0066cc);
}
.secret-table-container {
background: var(--rh-color-surface-lightest, #ffffff);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
}
.secret-table {
width: 100%;
border-collapse: collapse;
}
.secret-table th,
.secret-table td {
padding: var(--rh-space-md, 16px);
text-align: start;
border-block-end: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.secret-table th {
background: var(--rh-color-surface-lighter, #f5f5f5);
font-weight: var(--rh-font-weight-body-text-medium, 500);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
white-space: nowrap;
}
.secret-table th.sortable {
cursor: pointer;
user-select: none;
transition: background-color 150ms ease;
}
.secret-table th.sortable:hover {
background: var(--rh-color-surface-light, #e0e0e0);
}
.secret-table th.sortable .sort-header {
display: inline-flex;
align-items: center;
gap: var(--rh-space-xs, 4px);
}
.secret-table th.sortable rh-icon {
--rh-icon-size: 14px;
opacity: 0.7;
}
.secret-table tbody tr {
cursor: pointer;
transition: background-color 150ms ease;
}
.secret-table tbody tr:hover {
background: var(--rh-color-surface-lighter, #f5f5f5);
}
.secret-table tbody tr:last-child td {
border-block-end: none;
}
.secret-name-cell {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
}
.secret-name-cell rh-tag {
text-transform: uppercase;
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
}
.secret-name {
font-weight: var(--rh-font-weight-body-text-medium, 500);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
color: var(--rh-color-text-primary-on-light, #151515);
}
.keys-list {
display: flex;
flex-wrap: wrap;
gap: var(--rh-space-xs, 4px);
}
.keys-list rh-tag {
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
}
.created-time {
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.loading-container {
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
padding: var(--rh-space-2xl, 48px);
gap: var(--rh-space-md, 16px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.error-container {
padding: var(--rh-space-lg, 24px);
background: var(--rh-color-red-100, #fce8e6);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-red-500, #c9190b);
border-radius: var(--rh-border-radius-default, 3px);
}
.error-container h4 {
margin: 0 0 var(--rh-space-sm, 8px) 0;
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
color: var(--rh-color-red-700, #a30d05);
}
.error-container p {
margin: 0 0 var(--rh-space-md, 16px) 0;
color: var(--rh-color-red-700, #a30d05);
}
.empty-state {
text-align: center;
padding: var(--rh-space-2xl, 48px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.empty-state rh-icon {
--rh-icon-size: 48px;
color: var(--rh-color-gray-40, #8a8d90);
margin-block-end: var(--rh-space-md, 16px);
}
.empty-state h3 {
margin: 0 0 var(--rh-space-md, 16px) 0;
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
color: var(--rh-color-text-primary-on-light, #151515);
}
.empty-state p {
margin: 0 0 var(--rh-space-lg, 24px) 0;
}
.actions-cell {
width: 48px;
text-align: center;
}
.delete-btn {
display: inline-flex;
align-items: center;
justify-content: center;
width: 32px;
height: 32px;
padding: 0;
background: none;
border: none;
border-radius: var(--rh-border-radius-default, 3px);
cursor: pointer;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
transition:
background-color 150ms ease,
color 150ms ease;
}
.delete-btn:hover {
background: var(--rh-color-red-100, #fce8e6);
color: var(--rh-color-red-700, #a30d05);
}
.delete-btn:focus-visible {
outline: 2px solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: 2px;
}
.delete-btn rh-icon {
--rh-icon-size: 18px;
}
/* Modal styles */
.modal-overlay {
position: fixed;
top: 0;
left: 0;
right: 0;
bottom: 0;
background: rgba(21, 21, 21, 0.5);
display: flex;
align-items: center;
justify-content: center;
z-index: 1000;
}
.modal {
background: var(--rh-color-surface-lightest, #ffffff);
border-radius: var(--rh-border-radius-default, 3px);
box-shadow: var(--rh-box-shadow-lg, 0 10px 15px -3px rgba(21, 21, 21, 0.1));
width: 100%;
max-width: 600px;
max-height: 90vh;
overflow-y: auto;
}
.modal-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: var(--rh-space-lg, 24px);
border-block-end: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.modal-header h2 {
margin: 0;
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
font-size: var(--rh-font-size-heading-md, 1.5rem);
font-weight: var(--rh-font-weight-heading-medium, 500);
}
.close-btn {
display: inline-flex;
align-items: center;
justify-content: center;
width: 32px;
height: 32px;
padding: 0;
background: none;
border: none;
border-radius: var(--rh-border-radius-default, 3px);
cursor: pointer;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
transition: background-color 150ms ease;
}
.close-btn:hover {
background: var(--rh-color-surface-lighter, #f5f5f5);
}
.close-btn rh-icon {
--rh-icon-size: 20px;
}
.modal-body {
padding: var(--rh-space-lg, 24px);
}
.form-group {
margin-block-end: var(--rh-space-md, 16px);
}
.form-group:last-child {
margin-block-end: 0;
}
.form-group label {
display: block;
margin-block-end: var(--rh-space-xs, 4px);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-weight: var(--rh-font-weight-body-text-medium, 500);
color: var(--rh-color-text-primary-on-light, #151515);
}
.form-group input {
width: 100%;
padding: var(--rh-space-sm, 8px);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
box-sizing: border-box;
}
.form-group input:focus {
outline: none;
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
box-shadow: 0 0 0 1px var(--rh-color-interactive-blue-darker, #0066cc);
}
.modal-footer {
display: flex;
justify-content: flex-end;
gap: var(--rh-space-sm, 8px);
padding: var(--rh-space-lg, 24px);
border-block-start: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.form-error {
padding: var(--rh-space-sm, 8px) var(--rh-space-md, 16px);
background: var(--rh-color-red-100, #fce8e6);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-red-500, #c9190b);
border-radius: var(--rh-border-radius-default, 3px);
color: var(--rh-color-red-700, #a30d05);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
margin-block-end: var(--rh-space-md, 16px);
}
/* Key-value pairs styling */
.kv-list {
display: flex;
flex-direction: column;
gap: var(--rh-space-sm, 8px);
}
.kv-row {
display: grid;
grid-template-columns: 1fr 1fr auto auto;
gap: var(--rh-space-sm, 8px);
align-items: start;
}
.kv-row input {
width: 100%;
}
.icon-btn {
display: inline-flex;
align-items: center;
justify-content: center;
padding: var(--rh-space-sm, 8px);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
background: var(--rh-color-surface-lightest, #ffffff);
cursor: pointer;
transition: all 150ms ease;
}
.icon-btn:hover {
background: var(--rh-color-surface-lighter, #f5f5f5);
}
.icon-btn:disabled {
opacity: 0.5;
cursor: not-allowed;
}
.icon-btn.danger:hover:not(:disabled) {
background: var(--rh-color-red-100, #fce8e6);
border-color: var(--rh-color-red-500, #c9190b);
color: var(--rh-color-red-700, #a30d05);
}
.icon-btn.revealed {
background: var(--rh-color-blue-50, #e7f1fa);
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
color: var(--rh-color-interactive-blue-darker, #0066cc);
}
.icon-btn rh-icon {
--rh-icon-size: 16px;
}
/* View secret modal */
.secret-data-list {
display: flex;
flex-direction: column;
gap: var(--rh-space-md, 16px);
}
.secret-data-item {
background: var(--rh-color-surface-lighter, #f5f5f5);
border-radius: var(--rh-border-radius-default, 3px);
padding: var(--rh-space-md, 16px);
}
.secret-data-key {
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-weight: var(--rh-font-weight-body-text-medium, 500);
color: var(--rh-color-text-primary-on-light, #151515);
margin-block-end: var(--rh-space-sm, 8px);
}
.secret-data-value-row {
display: flex;
align-items: center;
gap: var(--rh-space-sm, 8px);
}
.secret-data-value {
flex: 1;
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
word-break: break-all;
background: var(--rh-color-surface-lightest, #ffffff);
padding: var(--rh-space-sm, 8px);
border-radius: var(--rh-border-radius-default, 3px);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.secret-data-value.hidden {
letter-spacing: 0.2em;
}
.reveal-btn {
display: inline-flex;
align-items: center;
justify-content: center;
width: 32px;
height: 32px;
padding: 0;
background: var(--rh-color-surface-lightest, #ffffff);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
cursor: pointer;
color: var(--rh-color-text-secondary-on-light, #6a6e73);
transition: all 150ms ease;
flex-shrink: 0;
}
.reveal-btn:hover {
background: var(--rh-color-surface-lighter, #f5f5f5);
color: var(--rh-color-text-primary-on-light, #151515);
}
.reveal-btn.revealed {
background: var(--rh-color-blue-50, #e7f1fa);
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
color: var(--rh-color-interactive-blue-darker, #0066cc);
}
.reveal-btn rh-icon {
--rh-icon-size: 16px;
}
.empty-secret {
color: var(--rh-color-text-secondary-on-light, #6a6e73);
font-style: italic;
}
.view-modal-footer {
display: flex;
justify-content: space-between;
gap: var(--rh-space-sm, 8px);
padding: var(--rh-space-lg, 24px);
border-block-start: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
}
.view-modal-footer .left-actions {
display: flex;
gap: var(--rh-space-sm, 8px);
}
/* Pagination styles */
.pagination {
display: flex;
justify-content: space-between;
align-items: center;
padding: var(--rh-space-md, 16px);
border-block-start: var(--rh-border-width-sm, 1px) solid
var(--rh-color-border-subtle-on-light, #d2d2d2);
background: var(--rh-color-surface-lighter, #f5f5f5);
border-radius: 0 0 var(--rh-border-radius-default, 3px) var(--rh-border-radius-default, 3px);
}
.pagination-info {
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.pagination-controls {
display: flex;
align-items: center;
gap: var(--rh-space-xs, 4px);
}
.pagination-btn {
display: inline-flex;
align-items: center;
justify-content: center;
min-width: 32px;
height: 32px;
padding: 0 var(--rh-space-sm, 8px);
background: var(--rh-color-surface-lightest, #ffffff);
border: var(--rh-border-width-sm, 1px) solid var(--rh-color-border-subtle-on-light, #d2d2d2);
border-radius: var(--rh-border-radius-default, 3px);
cursor: pointer;
font-size: var(--rh-font-size-body-text-sm, 0.875rem);
font-family: var(--rh-font-family-body-text, 'Red Hat Text', sans-serif);
color: var(--rh-color-text-primary-on-light, #151515);
transition: all 150ms ease;
}
.pagination-btn:hover:not(:disabled) {
background: var(--rh-color-surface-light, #e0e0e0);
border-color: var(--rh-color-border-strong-on-light, #8a8d90);
}
.pagination-btn:disabled {
opacity: 0.5;
cursor: not-allowed;
}
.pagination-btn.active {
background: var(--rh-color-interactive-blue-darker, #0066cc);
border-color: var(--rh-color-interactive-blue-darker, #0066cc);
color: var(--rh-color-text-primary-on-dark, #ffffff);
}
.pagination-btn rh-icon {
--rh-icon-size: 16px;
}
.pagination-ellipsis {
padding: 0 var(--rh-space-xs, 4px);
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
`;
connectedCallback() {
super.connectedCallback();
this.loadSecrets();
// Poll for updates every 10 seconds
this.pollInterval = setInterval(() => this.loadSecrets(), 10000);
// Listen for namespace changes
window.addEventListener('namespace-change', ((e: CustomEvent) => {
this.namespace = e.detail.namespace;
this.loadSecrets();
}) as EventListener);
}
disconnectedCallback() {
super.disconnectedCallback();
if (this.pollInterval) {
clearInterval(this.pollInterval);
}
}
private async loadSecrets() {
try {
this.secrets = await k8sClient.listSecrets(this.namespace);
this.error = null;
} catch (e) {
this.error = e instanceof Error ? e.message : 'Failed to load secrets';
} finally {
this.loading = false;
}
}
private formatTime(isoString: string): string {
const date = new Date(isoString);
const now = new Date();
const diffMs = now.getTime() - date.getTime();
const diffMins = Math.floor(diffMs / 60000);
const diffHours = Math.floor(diffMins / 60);
const diffDays = Math.floor(diffHours / 24);
if (diffMins < 1) return 'just now';
if (diffMins < 60) return `${diffMins}m ago`;
if (diffHours < 24) return `${diffHours}h ago`;
if (diffDays < 7) return `${diffDays}d ago`;
return date.toLocaleDateString();
}
private getTypeColor(type: string): string {
switch (type) {
case 'Opaque':
return 'purple';
case 'kubernetes.io/service-account-token':
return 'blue';
case 'kubernetes.io/dockerconfigjson':
return 'teal';
case 'kubernetes.io/tls':
return 'green';
default:
return 'gray';
}
}
private getTypeLabel(type: string): string {
switch (type) {
case 'Opaque':
return 'Opaque';
case 'kubernetes.io/service-account-token':
return 'SA Token';
case 'kubernetes.io/dockerconfigjson':
return 'Docker';
case 'kubernetes.io/tls':
return 'TLS';
default:
return type.split('/').pop() || type;
}
}
private decodeBase64(encoded: string): string {
try {
return atob(encoded);
} catch {
return '[decode error]';
}
}
private get filteredSecrets(): Secret[] {
let result = this.secrets;
// Filter by search query
if (this.searchQuery) {
const query = this.searchQuery.toLowerCase();
result = result.filter(s => s.metadata.name.toLowerCase().includes(query));
}
// Sort by name
result = [...result].sort((a, b) => {
const comparison = a.metadata.name.localeCompare(b.metadata.name);
return this.sortDirection === 'asc' ? comparison : -comparison;
});
return result;
}
private get totalPages(): number {
return Math.ceil(this.filteredSecrets.length / this.pageSize);
}
private get paginatedSecrets(): Secret[] {
const start = (this.currentPage - 1) * this.pageSize;
const end = start + this.pageSize;
return this.filteredSecrets.slice(start, end);
}
private get paginationRange(): (number | 'ellipsis')[] {
const total = this.totalPages;
const current = this.currentPage;
const range: (number | 'ellipsis')[] = [];
if (total <= 7) {
for (let i = 1; i <= total; i++) range.push(i);
} else {
range.push(1);
if (current > 3) range.push('ellipsis');
for (let i = Math.max(2, current - 1); i <= Math.min(total - 1, current + 1); i++) {
range.push(i);
}
if (current < total - 2) range.push('ellipsis');
range.push(total);
}
return range;
}
private goToPage(page: number) {
if (page >= 1 && page <= this.totalPages) {
this.currentPage = page;
}
}
private handleSearchInput(e: Event) {
this.searchQuery = (e.target as HTMLInputElement).value;
this.currentPage = 1; // Reset to first page on search
}
private toggleSort() {
this.sortDirection = this.sortDirection === 'asc' ? 'desc' : 'asc';
}
// Create modal methods
private openCreateModal() {
this.showCreateModal = true;
this.createError = null;
this.newSecretName = '';
this.newSecretPairs = [{ key: '', value: '', revealed: false }];
}
private closeCreateModal() {
this.showCreateModal = false;
this.createError = null;
}
private addCreateKeyValuePair() {
this.newSecretPairs = [...this.newSecretPairs, { key: '', value: '', revealed: false }];
}
private removeCreateKeyValuePair(index: number) {
this.newSecretPairs = this.newSecretPairs.filter((_, i) => i !== index);
if (this.newSecretPairs.length === 0) {
this.newSecretPairs = [{ key: '', value: '', revealed: false }];
}
}
private updateCreateKeyValuePair(index: number, field: 'key' | 'value', value: string) {
this.newSecretPairs = this.newSecretPairs.map((pair, i) =>
i === index ? { ...pair, [field]: value } : pair
);
}
private toggleCreateReveal(index: number) {
this.newSecretPairs = this.newSecretPairs.map((pair, i) =>
i === index ? { ...pair, revealed: !pair.revealed } : pair
);
}
private async createSecret() {
if (!this.newSecretName.trim()) {
this.createError = 'Secret name is required';
return;
}
// Validate name (DNS subdomain name)
const nameRegex = /^[a-z0-9]([-a-z0-9]*[a-z0-9])?$/;
if (!nameRegex.test(this.newSecretName)) {
this.createError =
'Name must consist of lowercase alphanumeric characters or "-", and must start and end with an alphanumeric character';
return;
}
// Filter out empty pairs and validate
const validPairs = this.newSecretPairs.filter(p => p.key.trim());
if (validPairs.length === 0) {
this.createError = 'At least one key-value pair is required';
return;
}
// Check for duplicate keys
const keys = validPairs.map(p => p.key.trim());
if (new Set(keys).size !== keys.length) {
this.createError = 'Duplicate keys are not allowed';
return;
}
this.creating = true;
this.createError = null;
try {
const data: Record<string, string> = {};
for (const pair of validPairs) {
data[pair.key.trim()] = pair.value;
}
await k8sClient.createSecret(this.namespace, {
name: this.newSecretName.trim(),
data,
});
this.closeCreateModal();
await this.loadSecrets();
} catch (e) {
this.createError = e instanceof Error ? e.message : 'Failed to create secret';
} finally {
this.creating = false;
}
}
// View modal methods
private openViewModal(secret: Secret) {
this.viewingSecret = secret;
this.showViewModal = true;
this.revealedKeys = new Set();
}
private closeViewModal() {
this.showViewModal = false;
this.viewingSecret = null;
this.revealedKeys = new Set();
}
private toggleReveal(key: string) {
const newSet = new Set(this.revealedKeys);
if (newSet.has(key)) {
newSet.delete(key);
} else {
newSet.add(key);
}
this.revealedKeys = newSet;
}
// Edit modal methods
private openEditModal(secret: Secret) {
this.closeViewModal();
this.editingSecret = secret;
this.editError = null;
// Convert secret data to key-value pairs
const data = secret.data || {};
this.editSecretPairs = Object.keys(data).map(key => ({
key,
value: this.decodeBase64(data[key]),
revealed: false,
}));
if (this.editSecretPairs.length === 0) {
this.editSecretPairs = [{ key: '', value: '', revealed: false }];
}
this.showEditModal = true;
}
private closeEditModal() {
this.showEditModal = false;
this.editingSecret = null;
this.editSecretPairs = [];
this.editError = null;
}
private addEditKeyValuePair() {
this.editSecretPairs = [...this.editSecretPairs, { key: '', value: '', revealed: false }];
}
private removeEditKeyValuePair(index: number) {
this.editSecretPairs = this.editSecretPairs.filter((_, i) => i !== index);
if (this.editSecretPairs.length === 0) {
this.editSecretPairs = [{ key: '', value: '', revealed: false }];
}
}
private updateEditKeyValuePair(index: number, field: 'key' | 'value', value: string) {
this.editSecretPairs = this.editSecretPairs.map((pair, i) =>
i === index ? { ...pair, [field]: value } : pair
);
}
private toggleEditReveal(index: number) {
this.editSecretPairs = this.editSecretPairs.map((pair, i) =>
i === index ? { ...pair, revealed: !pair.revealed } : pair
);
}
private async saveEditSecret() {
if (!this.editingSecret) return;
// Filter out empty pairs and validate
const validPairs = this.editSecretPairs.filter(p => p.key.trim());
if (validPairs.length === 0) {
this.editError = 'At least one key-value pair is required';
return;
}
// Check for duplicate keys
const keys = validPairs.map(p => p.key.trim());
if (new Set(keys).size !== keys.length) {
this.editError = 'Duplicate keys are not allowed';
return;
}
this.editing = true;
this.editError = null;
try {
const data: Record<string, string> = {};
for (const pair of validPairs) {
data[pair.key.trim()] = pair.value;
}
await k8sClient.updateSecret(
this.editingSecret.metadata.namespace || this.namespace,
this.editingSecret.metadata.name,
data
);
this.closeEditModal();
await this.loadSecrets();
} catch (e) {
this.editError = e instanceof Error ? e.message : 'Failed to update secret';
} finally {
this.editing = false;
}
}
private async deleteSecret(e: Event, secret: Secret) {
e.stopPropagation();
const confirmed = confirm(`Are you sure you want to delete secret "${secret.metadata.name}"?`);
if (!confirmed) return;
try {
await k8sClient.deleteSecret(
secret.metadata.namespace || this.namespace,
secret.metadata.name
);
await this.loadSecrets();
} catch (err) {
alert(`Failed to delete secret: ${err instanceof Error ? err.message : 'Unknown error'}`);
}
}
render() {
if (this.loading) {
return html`
<div class="loading-container">
<rh-spinner size="lg"></rh-spinner>
<span>Loading secrets...</span>
</div>
`;
}
if (this.error) {
return html`
<div class="error-container">
<h4>
<rh-icon set="ui" icon="error-fill"></rh-icon>
Error loading secrets
</h4>
<p>${this.error}</p>
<rh-button @click=${this.loadSecrets}>
<rh-icon set="ui" icon="sync" slot="icon"></rh-icon>
Retry
</rh-button>
</div>
`;
}
return html`
<div class="header">
<h1>Secrets</h1>
<div class="controls">
<input
type="search"
class="search-input"
placeholder="Search secrets..."
.value=${this.searchQuery}
@input=${this.handleSearchInput}
aria-label="Search secrets"
/>
<rh-button @click=${this.openCreateModal}>
<rh-icon set="ui" icon="add-circle" slot="icon"></rh-icon>
Create Secret
</rh-button>
</div>
</div>
${this.filteredSecrets.length === 0
? html`
<div class="empty-state">
<rh-icon set="ui" icon="lock"></rh-icon>
<h3>${this.searchQuery ? 'No matching secrets' : 'No secrets found'}</h3>
<p>
${this.searchQuery
? 'Try adjusting your search query.'
: 'Create a secret to store sensitive data.'}
</p>
${!this.searchQuery
? html`
<rh-cta>
<a
href="#"
@click=${(e: Event) => {
e.preventDefault();
this.openCreateModal();
}}
>Create Secret</a
>
</rh-cta>
`
: ''}
</div>
`
: html`
<div class="secret-table-container">
<table class="secret-table">
<thead>
<tr>
<th class="sortable" @click=${this.toggleSort}>
<span class="sort-header">
Name
<rh-icon
set="ui"
icon="${this.sortDirection === 'asc' ? 'arrow-up' : 'arrow-down'}"
></rh-icon>
</span>
</th>
<th>Type</th>
<th>Keys</th>
<th>Created</th>
<th class="actions-cell">Actions</th>
</tr>
</thead>
<tbody>
${this.paginatedSecrets.map(secret => this.renderSecretRow(secret))}
</tbody>
</table>
${this.totalPages > 1 ? this.renderPagination() : ''}
</div>
`}
${this.showCreateModal ? this.renderCreateModal() : ''}
${this.showViewModal && this.viewingSecret ? this.renderViewModal() : ''}
${this.showEditModal && this.editingSecret ? this.renderEditModal() : ''}
`;
}
private renderPagination() {
const start = (this.currentPage - 1) * this.pageSize + 1;
const end = Math.min(this.currentPage * this.pageSize, this.filteredSecrets.length);
const total = this.filteredSecrets.length;
return html`
<div class="pagination">
<span class="pagination-info"> Showing ${start}-${end} of ${total} secrets </span>
<div class="pagination-controls">
<button
class="pagination-btn"
@click=${() => this.goToPage(this.currentPage - 1)}
?disabled=${this.currentPage === 1}
aria-label="Previous page"
>
<rh-icon set="ui" icon="caret-left"></rh-icon>
</button>
${this.paginationRange.map(item =>
item === 'ellipsis'
? html`<span class="pagination-ellipsis">...</span>`
: html`
<button
class="pagination-btn ${item === this.currentPage ? 'active' : ''}"
@click=${() => this.goToPage(item)}
aria-label="Page ${item}"
aria-current=${item === this.currentPage ? 'page' : 'false'}
>
${item}
</button>
`
)}
<button
class="pagination-btn"
@click=${() => this.goToPage(this.currentPage + 1)}
?disabled=${this.currentPage === this.totalPages}
aria-label="Next page"
>
<rh-icon set="ui" icon="caret-right"></rh-icon>
</button>
</div>
</div>
`;
}
private renderSecretRow(secret: Secret) {
const keys = secret.data ? Object.keys(secret.data) : [];
return html`
<tr @click=${() => this.openViewModal(secret)}>
<td>
<div class="secret-name-cell">
<rh-tag compact color="orange">sec</rh-tag>
<span class="secret-name">${secret.metadata.name}</span>
</div>
</td>
<td>
<rh-tag compact color=${this.getTypeColor(secret.type)}>
${this.getTypeLabel(secret.type)}
</rh-tag>
</td>
<td>
<div class="keys-list">
${keys.length > 0
? keys.slice(0, 5).map(key => html` <rh-tag compact color="gray">${key}</rh-tag> `)
: html`<span class="empty-secret">No data</span>`}
${keys.length > 5
? html` <rh-tag compact color="gray">+${keys.length - 5} more</rh-tag> `
: ''}
</div>
</td>
<td>
<span class="created-time">
${secret.metadata.creationTimestamp
? this.formatTime(secret.metadata.creationTimestamp)
: '-'}
</span>
</td>
<td class="actions-cell">
<button
class="delete-btn"
@click=${(e: Event) => this.deleteSecret(e, secret)}
title="Delete secret"
aria-label="Delete ${secret.metadata.name}"
>
<rh-icon set="ui" icon="trash"></rh-icon>
</button>
</td>
</tr>
`;
}
private renderCreateModal() {
return html`
<div
class="modal-overlay"
@click=${(e: Event) => {
if (e.target === e.currentTarget) this.closeCreateModal();
}}
>
<div class="modal" role="dialog" aria-modal="true" aria-labelledby="create-modal-title">
<div class="modal-header">
<h2 id="create-modal-title">Create Secret</h2>
<button class="close-btn" @click=${this.closeCreateModal} aria-label="Close">
<rh-icon set="ui" icon="close"></rh-icon>
</button>
</div>
<div class="modal-body">
${this.createError ? html` <div class="form-error">${this.createError}</div> ` : ''}
<div class="form-group">
<label for="secret-name">Name *</label>
<input
type="text"
id="secret-name"
.value=${this.newSecretName}
@input=${(e: Event) => (this.newSecretName = (e.target as HTMLInputElement).value)}
placeholder="my-secret"
/>
</div>
<div class="form-group">
<label>Data (Key-Value Pairs) *</label>
<div class="kv-list">
${this.newSecretPairs.map(
(pair, index) => html`
<div class="kv-row">
<input
type="text"
placeholder="Key"
.value=${pair.key}
@input=${(e: Event) =>
this.updateCreateKeyValuePair(
index,
'key',
(e.target as HTMLInputElement).value
)}
aria-label="Key"
/>
<input
type="${pair.revealed ? 'text' : 'password'}"
placeholder="Value"
.value=${pair.value}
@input=${(e: Event) =>
this.updateCreateKeyValuePair(
index,
'value',
(e.target as HTMLInputElement).value
)}
aria-label="Value"
/>
<button
class="icon-btn ${pair.revealed ? 'revealed' : ''}"
@click=${() => this.toggleCreateReveal(index)}
title="${pair.revealed ? 'Hide value' : 'Show value'}"
aria-label="${pair.revealed ? 'Hide value' : 'Show value'}"
>
<rh-icon set="ui" icon="${pair.revealed ? 'view-off' : 'view'}"></rh-icon>
</button>
<button
class="icon-btn danger"
@click=${() => this.removeCreateKeyValuePair(index)}
title="Remove"
aria-label="Remove pair"
?disabled=${this.newSecretPairs.length === 1}
>
<rh-icon set="ui" icon="trash"></rh-icon>
</button>
</div>
`
)}
<rh-button variant="secondary" @click=${this.addCreateKeyValuePair}>
<rh-icon set="ui" icon="add-circle" slot="icon"></rh-icon>
Add Key-Value Pair
</rh-button>
</div>
</div>
</div>
<div class="modal-footer">
<rh-button
variant="secondary"
@click=${this.closeCreateModal}
?disabled=${this.creating}
>
Cancel
</rh-button>
<rh-button @click=${this.createSecret} ?disabled=${this.creating}>
${this.creating ? 'Creating...' : 'Create Secret'}
</rh-button>
</div>
</div>
</div>
`;
}
private renderViewModal() {
const secret = this.viewingSecret!;
const data = secret.data || {};
const keys = Object.keys(data);
const isOpaque = secret.type === 'Opaque';
return html`
<div
class="modal-overlay"
@click=${(e: Event) => {
if (e.target === e.currentTarget) this.closeViewModal();
}}
>
<div class="modal" role="dialog" aria-modal="true" aria-labelledby="view-modal-title">
<div class="modal-header">
<h2 id="view-modal-title">${secret.metadata.name}</h2>
<button class="close-btn" @click=${this.closeViewModal} aria-label="Close">
<rh-icon set="ui" icon="close"></rh-icon>
</button>
</div>
<div class="modal-body">
${keys.length === 0
? html` <p class="empty-secret">This secret has no data.</p> `
: html`
<div class="secret-data-list">
${keys.map(key => {
const isRevealed = this.revealedKeys.has(key);
const decodedValue = this.decodeBase64(data[key]);
return html`
<div class="secret-data-item">
<div class="secret-data-key">${key}</div>
<div class="secret-data-value-row">
<span class="secret-data-value ${isRevealed ? '' : 'hidden'}">
${isRevealed ? decodedValue : '••••••••'}
</span>
<button
class="reveal-btn ${isRevealed ? 'revealed' : ''}"
@click=${() => this.toggleReveal(key)}
title="${isRevealed ? 'Hide value' : 'Show value'}"
aria-label="${isRevealed ? 'Hide value' : 'Show value'}"
>
<rh-icon
set="ui"
icon="${isRevealed ? 'view-off' : 'view'}"
></rh-icon>
</button>
</div>
</div>
`;
})}
</div>
`}
</div>
<div class="view-modal-footer">
<div class="left-actions">
${isOpaque
? html`
<rh-button variant="secondary" @click=${() => this.openEditModal(secret)}>
<rh-icon set="ui" icon="edit" slot="icon"></rh-icon>
Edit
</rh-button>
`
: ''}
</div>
<rh-button variant="secondary" @click=${this.closeViewModal}> Close </rh-button>
</div>
</div>
</div>
`;
}
private renderEditModal() {
const secret = this.editingSecret!;
return html`
<div
class="modal-overlay"
@click=${(e: Event) => {
if (e.target === e.currentTarget) this.closeEditModal();
}}
>
<div class="modal" role="dialog" aria-modal="true" aria-labelledby="edit-modal-title">
<div class="modal-header">
<h2 id="edit-modal-title">Edit: ${secret.metadata.name}</h2>
<button class="close-btn" @click=${this.closeEditModal} aria-label="Close">
<rh-icon set="ui" icon="close"></rh-icon>
</button>
</div>
<div class="modal-body">
${this.editError ? html` <div class="form-error">${this.editError}</div> ` : ''}
<div class="form-group">
<label>Data (Key-Value Pairs) *</label>
<div class="kv-list">
${this.editSecretPairs.map(
(pair, index) => html`
<div class="kv-row">
<input
type="text"
placeholder="Key"
.value=${pair.key}
@input=${(e: Event) =>
this.updateEditKeyValuePair(
index,
'key',
(e.target as HTMLInputElement).value
)}
aria-label="Key"
/>
<input
type="${pair.revealed ? 'text' : 'password'}"
placeholder="Value"
.value=${pair.value}
@input=${(e: Event) =>
this.updateEditKeyValuePair(
index,
'value',
(e.target as HTMLInputElement).value
)}
aria-label="Value"
/>
<button
class="icon-btn ${pair.revealed ? 'revealed' : ''}"
@click=${() => this.toggleEditReveal(index)}
title="${pair.revealed ? 'Hide value' : 'Show value'}"
aria-label="${pair.revealed ? 'Hide value' : 'Show value'}"
>
<rh-icon set="ui" icon="${pair.revealed ? 'view-off' : 'view'}"></rh-icon>
</button>
<button
class="icon-btn danger"
@click=${() => this.removeEditKeyValuePair(index)}
title="Remove"
aria-label="Remove pair"
>
<rh-icon set="ui" icon="trash"></rh-icon>
</button>
</div>
`
)}
<rh-button variant="secondary" @click=${this.addEditKeyValuePair}>
<rh-icon set="ui" icon="add-circle" slot="icon"></rh-icon>
Add Key-Value Pair
</rh-button>
</div>
</div>
</div>
<div class="modal-footer">
<rh-button variant="secondary" @click=${this.closeEditModal} ?disabled=${this.editing}>
Cancel
</rh-button>
<rh-button @click=${this.saveEditSecret} ?disabled=${this.editing}>
${this.editing ? 'Saving...' : 'Save Changes'}
</rh-button>
</div>
</div>
</div>
`;
}
}
declare global {
interface HTMLElementTagNameMap {
'secret-list': SecretList;
}
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/lib/graph-layout.ts | TypeScript | /**
* Graph layout utilities for pipeline visualization
* Uses ELK.js for automatic DAG layout
*/
import ELK, { type ElkNode, type ElkExtendedEdge } from 'elkjs/lib/elk.bundled.js';
import type { Pipeline, PipelineNode, PipelineEdge, PipelineGraph } from '../types/pipeline.js';
const elk = new ELK();
// Layout options for ELK
const layoutOptions = {
'elk.algorithm': 'layered',
'elk.direction': 'DOWN',
'elk.spacing.nodeNode': '25',
'elk.layered.spacing.nodeNodeBetweenLayers': '35',
'elk.layered.nodePlacement.strategy': 'SIMPLE',
'elk.edgeRouting': 'ORTHOGONAL',
};
/**
* Convert a Pipeline to a graph representation
*/
export function pipelineToGraph(pipeline: Pipeline): PipelineGraph {
const nodes: PipelineNode[] = [];
const edges: PipelineEdge[] = [];
const stepNames = new Set(pipeline.spec.steps.map(s => s.name));
// Create nodes for each step
pipeline.spec.steps.forEach((step, index) => {
const status = pipeline.status?.steps?.find(s => s.name === step.name);
nodes.push({
id: step.name,
type: 'step',
data: { step, status },
position: { x: 0, y: index * 120 }, // Initial position, will be recalculated
});
});
// Create edges based on dependencies
pipeline.spec.steps.forEach((step, index) => {
if (step.runIf) {
// Conditional execution - connect to specified steps
step.runIf.steps.forEach(depStep => {
if (stepNames.has(depStep)) {
edges.push({
id: `${depStep}->${step.name}`,
source: depStep,
target: step.name,
type: step.runIf!.condition === 'fail' ? 'failure' : 'success',
data: {
condition: step.runIf!.condition || 'success',
operator: step.runIf!.operator || 'and',
},
});
}
});
} else if (index > 0) {
// Sequential execution - connect to previous step
const prevStep = pipeline.spec.steps[index - 1];
edges.push({
id: `${prevStep.name}->${step.name}`,
source: prevStep.name,
target: step.name,
type: 'sequential',
});
}
});
return { nodes, edges };
}
/**
* Apply automatic layout to graph nodes using ELK
*/
export async function layoutGraph(graph: PipelineGraph): Promise<PipelineGraph> {
const elkGraph: ElkNode = {
id: 'root',
layoutOptions,
children: graph.nodes.map(node => ({
id: node.id,
width: 240,
height: 100,
})),
edges: graph.edges.map(edge => ({
id: edge.id,
sources: [edge.source],
targets: [edge.target],
})) as ElkExtendedEdge[],
};
const layoutedGraph = await elk.layout(elkGraph);
// Update node positions from ELK layout
const updatedNodes = graph.nodes.map(node => {
const elkNode = layoutedGraph.children?.find(n => n.id === node.id);
if (elkNode) {
return {
...node,
position: {
x: elkNode.x || 0,
y: elkNode.y || 0,
},
};
}
return node;
});
return {
nodes: updatedNodes,
edges: graph.edges,
};
}
/**
* Create a new empty pipeline
*/
export function createEmptyPipeline(name: string, namespace = 'default'): Pipeline {
return {
apiVersion: 'pipeline.yaacov.io/v1',
kind: 'Pipeline',
metadata: {
name,
namespace,
},
spec: {
steps: [],
},
};
}
/**
* Create a new step with default values
*/
export function createDefaultStep(name: string): Pipeline['spec']['steps'][0] {
return {
name,
jobSpec: {
template: {
spec: {
containers: [
{
name: 'main',
image: 'registry.access.redhat.com/ubi9/ubi-minimal:latest',
command: ['sh', '-c'],
args: ['echo "Hello from step"'],
},
],
restartPolicy: 'Never',
},
},
},
};
}
/**
* Validate step name
*/
export function validateStepName(name: string): { valid: boolean; error?: string } {
if (!name) {
return { valid: false, error: 'Name is required' };
}
if (name.length > 63) {
return { valid: false, error: 'Name must be 63 characters or less' };
}
if (!/^[a-z0-9]([-a-z0-9]*[a-z0-9])?$/.test(name)) {
return {
valid: false,
error:
'Name must be lowercase alphanumeric with hyphens, starting and ending with alphanumeric',
};
}
return { valid: true };
}
/**
* Check if adding an edge would create a cycle
*/
export function wouldCreateCycle(
edges: PipelineEdge[],
newSource: string,
newTarget: string
): boolean {
// Build adjacency list
const adjacency = new Map<string, Set<string>>();
for (const edge of edges) {
if (!adjacency.has(edge.source)) {
adjacency.set(edge.source, new Set());
}
adjacency.get(edge.source)!.add(edge.target);
}
// Add the new edge temporarily
if (!adjacency.has(newSource)) {
adjacency.set(newSource, new Set());
}
adjacency.get(newSource)!.add(newTarget);
// DFS to detect cycle
const visited = new Set<string>();
const recursionStack = new Set<string>();
function hasCycle(node: string): boolean {
visited.add(node);
recursionStack.add(node);
const neighbors = adjacency.get(node) || new Set();
for (const neighbor of neighbors) {
if (!visited.has(neighbor)) {
if (hasCycle(neighbor)) return true;
} else if (recursionStack.has(neighbor)) {
return true;
}
}
recursionStack.delete(node);
return false;
}
// Check from all nodes
for (const node of adjacency.keys()) {
if (!visited.has(node)) {
if (hasCycle(node)) return true;
}
}
return false;
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/lib/k8s-client.ts | TypeScript | /**
* Kubernetes API Client for Pipeline CRD
* Communicates with the Kubernetes API server via proxy
*/
import type { Pipeline, PipelineList, WatchEvent, ApiError } from '../types/pipeline.js';
export class K8sClient {
private baseUrl: string;
constructor(baseUrl = '') {
this.baseUrl = baseUrl;
}
/**
* Make an API request with error handling
*/
private async request<T>(path: string, options?: RequestInit): Promise<T> {
const response = await fetch(`${this.baseUrl}${path}`, {
...options,
headers: {
'Content-Type': 'application/json',
Accept: 'application/json',
...options?.headers,
},
});
if (!response.ok) {
let errorMessage = `HTTP ${response.status}`;
try {
const error: ApiError = await response.json();
errorMessage = error.message || errorMessage;
} catch {
errorMessage = (await response.text()) || errorMessage;
}
throw new Error(errorMessage);
}
return response.json();
}
/**
* List all pipelines in a namespace (or all namespaces)
*/
async listPipelines(namespace = 'default'): Promise<Pipeline[]> {
const path =
namespace === '_all'
? '/apis/pipeline.yaacov.io/v1/pipelines'
: `/apis/pipeline.yaacov.io/v1/namespaces/${namespace}/pipelines`;
const result = await this.request<PipelineList>(path);
return result.items || [];
}
/**
* Get a specific pipeline
*/
async getPipeline(namespace: string, name: string): Promise<Pipeline> {
return this.request<Pipeline>(
`/apis/pipeline.yaacov.io/v1/namespaces/${namespace}/pipelines/${name}`
);
}
/**
* Create a new pipeline
*/
async createPipeline(namespace: string, pipeline: Pipeline): Promise<Pipeline> {
return this.request<Pipeline>(`/apis/pipeline.yaacov.io/v1/namespaces/${namespace}/pipelines`, {
method: 'POST',
body: JSON.stringify(pipeline),
});
}
/**
* Update an existing pipeline
*/
async updatePipeline(namespace: string, name: string, pipeline: Pipeline): Promise<Pipeline> {
return this.request<Pipeline>(
`/apis/pipeline.yaacov.io/v1/namespaces/${namespace}/pipelines/${name}`,
{
method: 'PUT',
body: JSON.stringify(pipeline),
}
);
}
/**
* Delete a pipeline
*/
async deletePipeline(namespace: string, name: string): Promise<void> {
await this.request(`/apis/pipeline.yaacov.io/v1/namespaces/${namespace}/pipelines/${name}`, {
method: 'DELETE',
});
}
/**
* Watch pipelines for real-time updates
*/
watchPipelines(
namespace: string,
callback: (event: WatchEvent<Pipeline>) => void,
resourceVersion?: string
): () => void {
const path =
namespace === '_all'
? '/apis/pipeline.yaacov.io/v1/pipelines'
: `/apis/pipeline.yaacov.io/v1/namespaces/${namespace}/pipelines`;
const params = new URLSearchParams({ watch: 'true' });
if (resourceVersion) {
params.set('resourceVersion', resourceVersion);
}
const abortController = new AbortController();
const startWatch = async () => {
try {
const response = await fetch(`${this.baseUrl}${path}?${params}`, {
signal: abortController.signal,
});
const reader = response.body?.getReader();
if (!reader) return;
const decoder = new TextDecoder();
let buffer = '';
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
if (line.trim()) {
try {
const event: WatchEvent<Pipeline> = JSON.parse(line);
callback(event);
} catch (e) {
console.error('Failed to parse watch event:', e);
}
}
}
}
} catch (e) {
if ((e as Error).name !== 'AbortError') {
console.error('Watch error:', e);
// Retry after a delay
setTimeout(startWatch, 5000);
}
}
};
startWatch();
return () => abortController.abort();
}
/**
* Get pod logs for a step
*/
async getPodLogs(
namespace: string,
podName: string,
options: {
container?: string;
follow?: boolean;
tailLines?: number;
sinceSeconds?: number;
} = {}
): Promise<string> {
const params = new URLSearchParams();
if (options.container) params.set('container', options.container);
if (options.follow) params.set('follow', 'true');
if (options.tailLines) params.set('tailLines', String(options.tailLines));
if (options.sinceSeconds) params.set('sinceSeconds', String(options.sinceSeconds));
const response = await fetch(
`${this.baseUrl}/api/v1/namespaces/${namespace}/pods/${podName}/log?${params}`
);
if (!response.ok) {
throw new Error(`Failed to get logs: ${response.status}`);
}
return response.text();
}
/**
* Stream pod logs
*/
streamPodLogs(
namespace: string,
podName: string,
callback: (line: string) => void,
options: { container?: string; tailLines?: number } = {}
): () => void {
const params = new URLSearchParams({ follow: 'true' });
if (options.container) params.set('container', options.container);
if (options.tailLines) params.set('tailLines', String(options.tailLines));
const abortController = new AbortController();
const startStream = async () => {
try {
const response = await fetch(
`${this.baseUrl}/api/v1/namespaces/${namespace}/pods/${podName}/log?${params}`,
{ signal: abortController.signal }
);
const reader = response.body?.getReader();
if (!reader) return;
const decoder = new TextDecoder();
while (true) {
const { done, value } = await reader.read();
if (done) break;
const text = decoder.decode(value, { stream: true });
const lines = text.split('\n');
for (const line of lines) {
if (line) callback(line);
}
}
} catch (e) {
if ((e as Error).name !== 'AbortError') {
console.error('Log stream error:', e);
}
}
};
startStream();
return () => abortController.abort();
}
/**
* Get events for a resource
*/
async getEvents(
namespace: string,
fieldSelector: string
): Promise<
Array<{
type: string;
reason: string;
message: string;
firstTimestamp: string;
lastTimestamp: string;
count: number;
}>
> {
const params = new URLSearchParams({ fieldSelector });
const result = await this.request<{
items: Array<{
type: string;
reason: string;
message: string;
firstTimestamp: string;
lastTimestamp: string;
count: number;
}>;
}>(`/api/v1/namespaces/${namespace}/events?${params}`);
return result.items || [];
}
/**
* List available namespaces
*/
async listNamespaces(): Promise<string[]> {
try {
const result = await this.request<{ items: Array<{ metadata: { name: string } }> }>(
'/api/v1/namespaces'
);
return result.items.map(ns => ns.metadata.name);
} catch {
// If we can't list namespaces, return default
return ['default'];
}
}
/**
* Get a Job by name
*/
async getJob(
namespace: string,
name: string
): Promise<{
metadata: { name: string; namespace: string };
spec: Record<string, unknown>;
status: Record<string, unknown>;
}> {
return this.request(`/apis/batch/v1/namespaces/${namespace}/jobs/${name}`);
}
/**
* Get a Pod by name
*/
async getPod(
namespace: string,
name: string
): Promise<{
metadata: { name: string; namespace: string };
spec: Record<string, unknown>;
status: Record<string, unknown>;
}> {
return this.request(`/api/v1/namespaces/${namespace}/pods/${name}`);
}
/**
* List pods by label selector
*/
async listPods(
namespace: string,
labelSelector: string
): Promise<
Array<{
metadata: { name: string; namespace: string };
status: { phase: string };
}>
> {
const params = new URLSearchParams({ labelSelector });
const result = await this.request<{
items: Array<{
metadata: { name: string; namespace: string };
status: { phase: string };
}>;
}>(`/api/v1/namespaces/${namespace}/pods?${params}`);
return result.items || [];
}
/**
* List PersistentVolumeClaims in a namespace
*/
async listPVCs(namespace: string): Promise<
Array<{
metadata: { name: string; namespace: string; creationTimestamp?: string };
spec: {
accessModes?: string[];
storageClassName?: string;
volumeMode?: string;
resources?: { requests?: { storage?: string } };
};
status: { phase: string };
}>
> {
try {
const result = await this.request<{
items: Array<{
metadata: { name: string; namespace: string; creationTimestamp?: string };
spec: {
accessModes?: string[];
storageClassName?: string;
volumeMode?: string;
resources?: { requests?: { storage?: string } };
};
status: { phase: string };
}>;
}>(`/api/v1/namespaces/${namespace}/persistentvolumeclaims`);
return result.items || [];
} catch {
// If we can't list PVCs, return empty array
return [];
}
}
/**
* Create a PersistentVolumeClaim
*/
async createPVC(
namespace: string,
pvc: {
name: string;
storageClassName?: string;
accessModes: string[];
volumeMode: string;
storage: string;
}
): Promise<void> {
await this.request(`/api/v1/namespaces/${namespace}/persistentvolumeclaims`, {
method: 'POST',
body: JSON.stringify({
apiVersion: 'v1',
kind: 'PersistentVolumeClaim',
metadata: {
name: pvc.name,
namespace: namespace,
},
spec: {
accessModes: pvc.accessModes,
volumeMode: pvc.volumeMode,
storageClassName: pvc.storageClassName || undefined,
resources: {
requests: {
storage: pvc.storage,
},
},
},
}),
});
}
/**
* Delete a PersistentVolumeClaim
*/
async deletePVC(namespace: string, name: string): Promise<void> {
await this.request(`/api/v1/namespaces/${namespace}/persistentvolumeclaims/${name}`, {
method: 'DELETE',
});
}
/**
* List StorageClasses
*/
async listStorageClasses(): Promise<
Array<{
metadata: { name: string };
provisioner: string;
reclaimPolicy?: string;
volumeBindingMode?: string;
allowVolumeExpansion?: boolean;
}>
> {
try {
const result = await this.request<{
items: Array<{
metadata: { name: string };
provisioner: string;
reclaimPolicy?: string;
volumeBindingMode?: string;
allowVolumeExpansion?: boolean;
}>;
}>('/apis/storage.k8s.io/v1/storageclasses');
return result.items || [];
} catch {
return [];
}
}
/**
* List Secrets in a namespace
*/
async listSecrets(namespace: string): Promise<
Array<{
metadata: { name: string; namespace: string; creationTimestamp?: string };
type: string;
data?: Record<string, string>;
}>
> {
try {
const result = await this.request<{
items: Array<{
metadata: { name: string; namespace: string; creationTimestamp?: string };
type: string;
data?: Record<string, string>;
}>;
}>(`/api/v1/namespaces/${namespace}/secrets`);
return result.items || [];
} catch {
return [];
}
}
/**
* Get a single Secret
*/
async getSecret(
namespace: string,
name: string
): Promise<{
metadata: { name: string; namespace: string; creationTimestamp?: string };
type: string;
data?: Record<string, string>;
}> {
return this.request(`/api/v1/namespaces/${namespace}/secrets/${name}`);
}
/**
* Create an Opaque Secret
*/
async createSecret(
namespace: string,
secret: {
name: string;
data: Record<string, string>;
}
): Promise<void> {
// Base64 encode all values
const encodedData: Record<string, string> = {};
for (const [key, value] of Object.entries(secret.data)) {
encodedData[key] = btoa(value);
}
await this.request(`/api/v1/namespaces/${namespace}/secrets`, {
method: 'POST',
body: JSON.stringify({
apiVersion: 'v1',
kind: 'Secret',
metadata: {
name: secret.name,
namespace: namespace,
},
type: 'Opaque',
data: encodedData,
}),
});
}
/**
* Delete a Secret
*/
async deleteSecret(namespace: string, name: string): Promise<void> {
await this.request(`/api/v1/namespaces/${namespace}/secrets/${name}`, { method: 'DELETE' });
}
/**
* Update an Opaque Secret
*/
async updateSecret(namespace: string, name: string, data: Record<string, string>): Promise<void> {
// Base64 encode all values
const encodedData: Record<string, string> = {};
for (const [key, value] of Object.entries(data)) {
encodedData[key] = btoa(value);
}
await this.request(`/api/v1/namespaces/${namespace}/secrets/${name}`, {
method: 'PUT',
body: JSON.stringify({
apiVersion: 'v1',
kind: 'Secret',
metadata: {
name: name,
namespace: namespace,
},
type: 'Opaque',
data: encodedData,
}),
});
}
}
// Singleton instance
export const k8sClient = new K8sClient();
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/lib/router.ts | TypeScript | /**
* Simple router using native URLPattern API
* No external dependencies required
*/
export interface Route {
pattern: URLPattern;
component: string;
name: string;
}
export interface RouteMatch {
route: Route;
params: Record<string, string>;
}
class AppRouter {
private routes: Route[] = [];
private outlet: HTMLElement | null = null;
private currentComponent: HTMLElement | null = null;
/**
* Initialize the router with an outlet element
*/
init(outlet: HTMLElement): void {
this.outlet = outlet;
// Listen for navigation events
window.addEventListener('popstate', () => this.navigate(window.location.pathname));
// Handle link clicks
document.addEventListener('click', e => {
const link = (e.target as HTMLElement).closest('a[href]');
if (link && link.getAttribute('href')?.startsWith('/')) {
e.preventDefault();
const href = link.getAttribute('href')!;
this.go(href);
}
});
// Initial navigation
this.navigate(window.location.pathname);
}
/**
* Add a route
*/
addRoute(path: string, component: string, name?: string): void {
// Convert path params like :namespace to named groups
const patternPath = path.replace(/:(\w+)/g, ':$1');
this.routes.push({
pattern: new URLPattern({ pathname: patternPath }),
component,
name: name || component,
});
}
/**
* Set multiple routes at once
*/
setRoutes(routes: Array<{ path: string; component: string; redirect?: string }>): void {
this.routes = [];
for (const route of routes) {
if (route.redirect) {
// Handle redirects by adding a special route
this.addRoute(route.path, `__redirect:${route.redirect}`, 'redirect');
} else {
this.addRoute(route.path, route.component);
}
}
}
/**
* Match a path against routes
*/
match(path: string): RouteMatch | null {
const url = new URL(path, window.location.origin);
for (const route of this.routes) {
const result = route.pattern.exec(url);
if (result) {
const params: Record<string, string> = {};
// Extract pathname groups
for (const [key, value] of Object.entries(result.pathname.groups)) {
if (value !== undefined) {
params[key] = value;
}
}
return { route, params };
}
}
return null;
}
/**
* Navigate to a path
*/
navigate(path: string): void {
const match = this.match(path);
if (!match) {
console.warn(`No route matched for path: ${path}`);
// Try to navigate to root
if (path !== '/') {
this.go('/');
}
return;
}
// Handle redirects
if (match.route.component.startsWith('__redirect:')) {
const redirectPath = match.route.component.replace('__redirect:', '');
this.go(redirectPath);
return;
}
this.renderComponent(match.route.component, match.params);
}
/**
* Programmatic navigation
*/
go(path: string): void {
window.history.pushState({}, '', path);
this.navigate(path);
}
/**
* Render a component in the outlet
*/
private renderComponent(componentName: string, params: Record<string, string>): void {
if (!this.outlet) {
console.error('Router outlet not initialized');
return;
}
// Remove current component
if (this.currentComponent) {
this.currentComponent.remove();
}
// Create new component
const component = document.createElement(componentName);
// Pass route params as a property
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(component as any).location = { params };
// Add to outlet
this.outlet.appendChild(component);
this.currentComponent = component;
// Dispatch navigation event
window.dispatchEvent(
new CustomEvent('router-navigate', {
detail: { path: window.location.pathname, params, component: componentName },
})
);
}
/**
* Get current route params
*/
getCurrentParams(): Record<string, string> {
const match = this.match(window.location.pathname);
return match?.params || {};
}
}
// Singleton instance
export const router = new AppRouter();
/**
* Navigate to a path (convenience function)
*/
export function navigate(path: string): void {
router.go(path);
}
/**
* Initialize the router
*/
export function initRouter(outlet: HTMLElement): void {
router.setRoutes([
{ path: '/', component: 'pipeline-list', redirect: '/pipelines' },
{ path: '/pipelines', component: 'pipeline-list' },
{ path: '/pipelines/:namespace/:name', component: 'pipeline-detail' },
{ path: '/builder', component: 'pipeline-canvas' },
{ path: '/builder/:namespace/:name', component: 'pipeline-canvas' },
{ path: '/storage', component: 'pvc-list' },
{ path: '/secrets', component: 'secret-list' },
]);
router.init(outlet);
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/main.ts | TypeScript | /**
* JobRunner UI - Main Entry Point
*
* A pipeline builder and monitor for Kubernetes using
* Red Hat Design System components.
*/
// ============================================
// RHDS Elements - Import all components via main entry
// ============================================
import '@rhds/elements';
import { RhIcon } from '@rhds/elements/rh-icon/rh-icon.js';
// ============================================
// Configure Icon Resolver
// Icons are loaded dynamically from node_modules
// ============================================
RhIcon.resolve = async (set: string, icon: string): Promise<Node> => {
try {
const response = await fetch(`/node_modules/@rhds/icons/${set}/${icon}.js`);
if (!response.ok) {
console.warn(`Icon not found: ${set}/${icon}`);
return createFallbackIcon();
}
const text = await response.text();
// Extract the SVG from the module's template.innerHTML
// Format: const t = document.createElement('template');t.innerHTML=`<svg...>`;export default t.content.cloneNode(true);
const match = text.match(/innerHTML\s*=\s*`([\s\S]*?)`/);
if (match) {
const template = document.createElement('template');
template.innerHTML = match[1].trim();
return template.content.cloneNode(true);
}
console.warn(`Could not parse icon: ${set}/${icon}`);
return createFallbackIcon();
} catch (e) {
console.warn(`Error loading icon ${set}/${icon}:`, e);
return createFallbackIcon();
}
};
// Create a fallback icon (empty placeholder) when icon loading fails
function createFallbackIcon(): Node {
const template = document.createElement('template');
template.innerHTML = `<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32"></svg>`;
return template.content.cloneNode(true);
}
// ============================================
// Force Eager Icon Loading
// Override the loading property to always return 'eager'
// This fixes lazy loading issues in production (including Shadow DOM)
// ============================================
Object.defineProperty(RhIcon.prototype, 'loading', {
get() {
return 'eager';
},
set() {
// Ignore any attempts to set loading - always use eager
},
configurable: true,
});
// ============================================
// Custom Components
// ============================================
import './components/app-shell.js';
import './components/shared/status-badge.js';
import './components/shared/side-drawer.js';
import './components/shared/code-editor.js';
import './components/monitor/pipeline-list.js';
import './components/monitor/pipeline-detail.js';
import './components/monitor/step-detail.js';
import './components/builder/pipeline-canvas.js';
import './components/builder/step-editor.js';
import './components/builder/global-settings.js';
import './components/storage/pvc-list.js';
import './components/storage/secret-list.js';
// ============================================
// Router Setup
// ============================================
import { initRouter } from './lib/router.js';
// Initialize app when DOM is ready
function initApp() {
const app = document.getElementById('app');
if (!app) {
console.error('App container not found');
return;
}
// Remove loading state
app.classList.remove('app-loading');
app.innerHTML = '';
// Create app shell with router outlet
const shell = document.createElement('app-shell');
const outlet = document.createElement('div');
outlet.id = 'router-outlet';
shell.appendChild(outlet);
app.appendChild(shell);
// Initialize router
initRouter(outlet);
console.log('🏃 JobRunner UI initialized');
}
// Wait for DOM
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', initApp);
} else {
initApp();
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/server.ts | TypeScript | /**
* JobRunner UI Development Server
* Built with Bun for fast development and production serving
*/
import { existsSync } from 'fs';
import { join, extname } from 'path';
const isProduction = Bun.argv.includes('--production');
const PORT = Number(process.env.PORT) || 3000;
const K8S_API_URL = process.env.K8S_API_URL || 'http://127.0.0.1:8001';
// MIME types for static files
const MIME_TYPES: Record<string, string> = {
'.html': 'text/html; charset=utf-8',
'.js': 'application/javascript; charset=utf-8',
'.mjs': 'application/javascript; charset=utf-8',
'.css': 'text/css; charset=utf-8',
'.json': 'application/json; charset=utf-8',
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.svg': 'image/svg+xml',
'.ico': 'image/x-icon',
'.woff': 'font/woff',
'.woff2': 'font/woff2',
'.ttf': 'font/ttf',
'.map': 'application/json',
};
/**
* Build the application bundle
*/
async function buildApp(): Promise<boolean> {
console.log('📦 Building application...');
const result = await Bun.build({
entrypoints: ['./src/main.ts'],
outdir: './dist',
minify: isProduction,
sourcemap: isProduction ? 'none' : 'external',
target: 'browser',
splitting: false,
external: [], // Bundle everything
});
if (!result.success) {
console.error('❌ Build failed:');
for (const log of result.logs) {
console.error(log);
}
return false;
}
console.log('✅ Build complete');
return true;
}
/**
* Serve static files from public and dist directories
*/
function serveStatic(path: string): Response | null {
const publicPath = join(import.meta.dir, '../public', path);
const distPath = join(import.meta.dir, '../dist', path);
// Try public directory first, then dist
for (const filePath of [publicPath, distPath]) {
if (existsSync(filePath)) {
const ext = extname(path).toLowerCase();
const contentType = MIME_TYPES[ext] || 'application/octet-stream';
const file = Bun.file(filePath);
return new Response(file, {
headers: {
'Content-Type': contentType,
'Cache-Control': isProduction ? 'public, max-age=31536000' : 'no-cache',
},
});
}
}
return null;
}
/**
* Proxy requests to Kubernetes API
*/
async function proxyToK8s(req: Request, path: string): Promise<Response> {
const targetUrl = `${K8S_API_URL}${path}`;
try {
const proxyReq = new Request(targetUrl, {
method: req.method,
headers: {
'Content-Type': 'application/json',
Accept: 'application/json',
},
body: req.body,
});
const response = await fetch(proxyReq);
return new Response(response.body, {
status: response.status,
headers: {
'Content-Type': response.headers.get('Content-Type') || 'application/json',
'Access-Control-Allow-Origin': '*',
},
});
} catch (error) {
console.error(`Proxy error: ${error}`);
return new Response(JSON.stringify({ error: 'Proxy error', message: String(error) }), {
status: 502,
headers: { 'Content-Type': 'application/json' },
});
}
}
/**
* Main server
*/
Bun.serve({
port: PORT,
async fetch(req) {
const url = new URL(req.url);
const path = url.pathname;
// Handle CORS preflight
if (req.method === 'OPTIONS') {
return new Response(null, {
headers: {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET, POST, PUT, DELETE, PATCH, OPTIONS',
'Access-Control-Allow-Headers': 'Content-Type, Authorization',
},
});
}
// Proxy Kubernetes API requests
if (path.startsWith('/api/') || path.startsWith('/apis/')) {
return proxyToK8s(req, path + url.search);
}
// Serve static files
if (path !== '/' && (path.includes('.') || path.startsWith('/node_modules/'))) {
// Handle node_modules for RHDS elements
if (path.startsWith('/node_modules/')) {
const modulePath = join(import.meta.dir, '..', path);
if (existsSync(modulePath)) {
const ext = extname(path).toLowerCase();
const contentType = MIME_TYPES[ext] || 'application/javascript';
const file = Bun.file(modulePath);
return new Response(file, {
headers: { 'Content-Type': contentType },
});
}
}
const staticResponse = serveStatic(path);
if (staticResponse) {
return staticResponse;
}
}
// SPA fallback - serve index.html for all other routes
const indexPath = join(import.meta.dir, '../public/index.html');
if (existsSync(indexPath)) {
const indexFile = Bun.file(indexPath);
return new Response(indexFile, {
headers: { 'Content-Type': 'text/html; charset=utf-8' },
});
}
return new Response('Not Found', { status: 404 });
},
error(error) {
console.error('Server error:', error);
return new Response('Internal Server Error', { status: 500 });
},
});
// Build and start
async function start() {
const buildSuccess = await buildApp();
if (!buildSuccess && isProduction) {
console.error('Production build failed, exiting');
process.exit(1);
}
console.log(`
🏃 JobRunner UI
Local: http://localhost:${PORT}
Mode: ${isProduction ? 'production' : 'development'}
K8s API: ${K8S_API_URL}
${!isProduction ? ' Hot reload is not automatic. Restart the server after changes.' : ''}
Press Ctrl+C to stop
`);
}
start();
// Handle graceful shutdown
process.on('SIGINT', () => {
console.log('\n👋 Shutting down...');
process.exit(0);
});
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/styles/global.css | CSS | /**
* Global styles for JobRunner UI
* Uses Red Hat Design System tokens
*/
/* Import RHDS tokens if not loaded via CDN */
/* @import '@rhds/tokens/css/global.css'; */
/* Reset and base styles */
*,
*::before,
*::after {
box-sizing: border-box;
margin: 0;
padding: 0;
}
html {
font-size: 16px;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
body {
font-family: var(--rh-font-family-body-text, 'Red Hat Text', system-ui, sans-serif);
font-size: var(--rh-font-size-body-text-md, 1rem);
line-height: var(--rh-line-height-body-text, 1.5);
color: var(--rh-color-text-primary-on-light, #151515);
background-color: var(--rh-color-surface-lightest, #ffffff);
}
/* Focus styles */
:focus-visible {
outline: var(--rh-border-width-md, 2px) solid var(--rh-color-interactive-blue-darker, #0066cc);
outline-offset: 2px;
}
/* Remove focus outline from RHDS tabs components */
rh-tabs,
rh-tabs:focus,
rh-tabs:focus-visible {
outline: none !important;
--rh-tabs-link-focus-outline: none;
--rh-tabs-focus-outline: none;
}
rh-tab,
rh-tab:focus,
rh-tab:focus-within,
rh-tab:focus-visible {
outline: none !important;
box-shadow: none !important;
--rh-tab-focus-outline: none;
--rh-focus-outline-color: transparent;
--rh-focus-outline-width: 0;
}
rh-tab-panel,
rh-tab-panel:focus,
rh-tab-panel:focus-visible {
outline: none !important;
box-shadow: none !important;
}
/* Skip link (for accessibility) */
.skip-link {
position: absolute;
left: -9999px;
top: auto;
width: 1px;
height: 1px;
overflow: hidden;
}
.skip-link:focus {
position: fixed;
top: 0;
left: 0;
width: auto;
height: auto;
padding: var(--rh-space-md, 16px);
background: var(--rh-color-surface-darkest, #151515);
color: var(--rh-color-text-primary-on-dark, #ffffff);
z-index: 1000;
font-weight: var(--rh-font-weight-body-text-medium, 500);
}
/* Visually hidden (for screen readers) */
.visually-hidden {
position: absolute;
width: 1px;
height: 1px;
padding: 0;
margin: -1px;
overflow: hidden;
clip: rect(0, 0, 0, 0);
white-space: nowrap;
border: 0;
}
/* Link styles */
a {
color: var(--rh-color-interactive-blue-darker, #0066cc);
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
/* Code styles */
code,
pre {
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
}
/* Selection styles */
::selection {
background-color: var(--rh-color-blue-200, #bee1f4);
color: var(--rh-color-text-primary-on-light, #151515);
}
/* Scrollbar styles (Webkit) */
::-webkit-scrollbar {
width: 8px;
height: 8px;
}
::-webkit-scrollbar-track {
background: var(--rh-color-surface-lighter, #f5f5f5);
}
::-webkit-scrollbar-thumb {
background: var(--rh-color-gray-40, #8a8d90);
border-radius: 4px;
}
::-webkit-scrollbar-thumb:hover {
background: var(--rh-color-gray-50, #6a6e73);
}
/* Loading animation */
@keyframes spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
@keyframes pulse {
0%,
100% {
opacity: 1;
}
50% {
opacity: 0.4;
}
}
@keyframes fadeIn {
from {
opacity: 0;
}
to {
opacity: 1;
}
}
@keyframes slideUp {
from {
opacity: 0;
transform: translateY(10px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
/* Utility classes */
.text-center {
text-align: center;
}
.text-left {
text-align: left;
}
.text-right {
text-align: right;
}
.font-mono {
font-family: var(--rh-font-family-code, 'Red Hat Mono', monospace);
}
.font-heading {
font-family: var(--rh-font-family-heading, 'Red Hat Display', sans-serif);
}
.text-primary {
color: var(--rh-color-text-primary-on-light, #151515);
}
.text-secondary {
color: var(--rh-color-text-secondary-on-light, #6a6e73);
}
.text-success {
color: var(--rh-color-green-600, #3e8635);
}
.text-danger {
color: var(--rh-color-red-600, #c9190b);
}
.text-warning {
color: var(--rh-color-yellow-600, #f0ab00);
}
.bg-surface {
background: var(--rh-color-surface-lightest, #ffffff);
}
.bg-surface-light {
background: var(--rh-color-surface-lighter, #f5f5f5);
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
ui/src/types/pipeline.ts | TypeScript | /**
* Pipeline CRD TypeScript definitions
* Matches the Go types in api/v1/pipeline_types.go
*/
export interface Pipeline {
apiVersion: 'pipeline.yaacov.io/v1';
kind: 'Pipeline';
metadata: ObjectMeta;
spec: PipelineSpec;
status?: PipelineStatus;
}
export interface PipelineList {
apiVersion: string;
kind: string;
metadata: ListMeta;
items: Pipeline[];
}
export interface ObjectMeta {
name: string;
namespace?: string;
uid?: string;
creationTimestamp?: string;
labels?: Record<string, string>;
annotations?: Record<string, string>;
resourceVersion?: string;
}
export interface ListMeta {
resourceVersion?: string;
continue?: string;
}
// ============================================
// PipelineSpec
// ============================================
export interface PipelineSpec {
/** List of steps/jobs to run */
steps: PipelineStep[];
/** Service account for all jobs (can be overridden per step) */
serviceAccountName?: string;
/** Shared volume mounted to all steps */
sharedVolume?: SharedVolumeSpec;
/** Common pod configuration applied to all steps */
podTemplate?: PodTemplateDefaults;
}
export interface PipelineStep {
/** Unique identifier for this step (1-63 chars, lowercase alphanumeric + hyphens) */
name: string;
/** Conditional execution - if not specified, runs sequentially */
runIf?: RunIfCondition;
/** Kubernetes Job specification */
jobSpec: JobSpec;
}
export interface RunIfCondition {
/** Whether to check for success or failure (default: success) */
condition?: 'success' | 'fail';
/** Whether ALL or ANY steps must meet condition (default: and) */
operator?: 'and' | 'or';
/** List of step names to check */
steps: string[];
}
export interface SharedVolumeSpec {
/** Volume name (default: workspace) */
name?: string;
/** Mount path in each step (default: /workspace) */
mountPath?: string;
/** PersistentVolumeClaim source */
persistentVolumeClaim?: { claimName: string };
/** EmptyDir source */
emptyDir?: { medium?: string; sizeLimit?: string };
/** ConfigMap source */
configMap?: { name: string };
/** Secret source */
secret?: { secretName: string };
}
export interface PodTemplateDefaults {
/** Default container image for steps */
image?: string;
/** Environment variables injected into all containers */
env?: EnvVar[];
/** Environment from ConfigMaps/Secrets */
envFrom?: EnvFromSource[];
/** Node selector for pod scheduling */
nodeSelector?: Record<string, string>;
/** Tolerations for pod scheduling */
tolerations?: Toleration[];
/** Affinity rules */
affinity?: Affinity;
/** Pod security context */
securityContext?: PodSecurityContext;
/** Image pull secrets */
imagePullSecrets?: LocalObjectReference[];
/** Priority class name */
priorityClassName?: string;
/** Runtime class name */
runtimeClassName?: string;
/** Custom scheduler name */
schedulerName?: string;
/** Labels added to all pods */
labels?: Record<string, string>;
/** Annotations added to all pods */
annotations?: Record<string, string>;
/** Default resource requirements */
defaultResources?: ResourceRequirements;
}
// ============================================
// JobSpec (simplified Kubernetes Job)
// ============================================
export interface JobSpec {
template: PodTemplateSpec;
backoffLimit?: number;
activeDeadlineSeconds?: number;
ttlSecondsAfterFinished?: number;
parallelism?: number;
completions?: number;
}
export interface PodTemplateSpec {
metadata?: ObjectMeta;
spec: PodSpec;
}
export interface PodSpec {
containers: Container[];
initContainers?: Container[];
restartPolicy: 'Never' | 'OnFailure' | 'Always';
serviceAccountName?: string;
nodeSelector?: Record<string, string>;
tolerations?: Toleration[];
affinity?: Affinity;
volumes?: Volume[];
securityContext?: PodSecurityContext;
}
export interface Container {
name: string;
image?: string;
command?: string[];
args?: string[];
workingDir?: string;
env?: EnvVar[];
envFrom?: EnvFromSource[];
resources?: ResourceRequirements;
volumeMounts?: VolumeMount[];
securityContext?: SecurityContext;
ports?: ContainerPort[];
}
export interface EnvVar {
name: string;
value?: string;
valueFrom?: EnvVarSource;
}
export interface EnvVarSource {
secretKeyRef?: SecretKeySelector;
configMapKeyRef?: ConfigMapKeySelector;
fieldRef?: ObjectFieldSelector;
}
export interface SecretKeySelector {
name: string;
key: string;
optional?: boolean;
}
export interface ConfigMapKeySelector {
name: string;
key: string;
optional?: boolean;
}
export interface ObjectFieldSelector {
fieldPath: string;
apiVersion?: string;
}
export interface EnvFromSource {
prefix?: string;
secretRef?: SecretEnvSource;
configMapRef?: ConfigMapEnvSource;
}
export interface SecretEnvSource {
name: string;
optional?: boolean;
}
export interface ConfigMapEnvSource {
name: string;
optional?: boolean;
}
export interface ResourceRequirements {
requests?: { cpu?: string; memory?: string; [key: string]: string | undefined };
limits?: { cpu?: string; memory?: string; [key: string]: string | undefined };
}
export interface VolumeMount {
name: string;
mountPath: string;
subPath?: string;
readOnly?: boolean;
}
export interface Volume {
name: string;
persistentVolumeClaim?: { claimName: string };
emptyDir?: { medium?: string; sizeLimit?: string };
configMap?: { name: string; items?: KeyToPath[] };
secret?: { secretName: string; items?: KeyToPath[] };
}
export interface KeyToPath {
key: string;
path: string;
mode?: number;
}
export interface Toleration {
key?: string;
operator?: 'Exists' | 'Equal';
value?: string;
effect?: 'NoSchedule' | 'PreferNoSchedule' | 'NoExecute';
tolerationSeconds?: number;
}
export interface Affinity {
nodeAffinity?: NodeAffinity;
podAffinity?: PodAffinity;
podAntiAffinity?: PodAntiAffinity;
}
export interface NodeAffinity {
requiredDuringSchedulingIgnoredDuringExecution?: NodeSelector;
preferredDuringSchedulingIgnoredDuringExecution?: PreferredSchedulingTerm[];
}
export interface NodeSelector {
nodeSelectorTerms: NodeSelectorTerm[];
}
export interface NodeSelectorTerm {
matchExpressions?: NodeSelectorRequirement[];
matchFields?: NodeSelectorRequirement[];
}
export interface NodeSelectorRequirement {
key: string;
operator: 'In' | 'NotIn' | 'Exists' | 'DoesNotExist' | 'Gt' | 'Lt';
values?: string[];
}
export interface PreferredSchedulingTerm {
weight: number;
preference: NodeSelectorTerm;
}
export interface PodAffinity {
requiredDuringSchedulingIgnoredDuringExecution?: PodAffinityTerm[];
preferredDuringSchedulingIgnoredDuringExecution?: WeightedPodAffinityTerm[];
}
export interface PodAntiAffinity {
requiredDuringSchedulingIgnoredDuringExecution?: PodAffinityTerm[];
preferredDuringSchedulingIgnoredDuringExecution?: WeightedPodAffinityTerm[];
}
export interface PodAffinityTerm {
labelSelector?: LabelSelector;
topologyKey: string;
namespaces?: string[];
}
export interface WeightedPodAffinityTerm {
weight: number;
podAffinityTerm: PodAffinityTerm;
}
export interface LabelSelector {
matchLabels?: Record<string, string>;
matchExpressions?: LabelSelectorRequirement[];
}
export interface LabelSelectorRequirement {
key: string;
operator: 'In' | 'NotIn' | 'Exists' | 'DoesNotExist';
values?: string[];
}
export interface PodSecurityContext {
runAsUser?: number;
runAsGroup?: number;
runAsNonRoot?: boolean;
fsGroup?: number;
supplementalGroups?: number[];
}
export interface SecurityContext {
runAsUser?: number;
runAsGroup?: number;
runAsNonRoot?: boolean;
readOnlyRootFilesystem?: boolean;
allowPrivilegeEscalation?: boolean;
privileged?: boolean;
capabilities?: Capabilities;
}
export interface Capabilities {
add?: string[];
drop?: string[];
}
export interface LocalObjectReference {
name: string;
}
export interface ContainerPort {
name?: string;
containerPort: number;
protocol?: 'TCP' | 'UDP' | 'SCTP';
}
// ============================================
// PipelineStatus
// ============================================
export interface PipelineStatus {
/** Current phase of the pipeline */
phase: PipelinePhase;
/** When the pipeline started */
startTime?: string;
/** When the pipeline completed */
completionTime?: string;
/** Status of each step */
steps: StepStatus[];
/** Kubernetes-style conditions */
conditions?: Condition[];
}
export type PipelinePhase = 'Pending' | 'Running' | 'Suspended' | 'Succeeded' | 'Failed';
export type StepPhase = 'Pending' | 'Running' | 'Suspended' | 'Succeeded' | 'Failed' | 'Skipped';
export interface StepStatus {
/** Step name */
name: string;
/** Current phase of the step */
phase: StepPhase;
/** Name of the Job created for this step */
jobName?: string;
/** Status from the underlying Kubernetes Job */
jobStatus?: JobStatus;
}
export interface JobStatus {
active?: number;
succeeded?: number;
failed?: number;
startTime?: string;
completionTime?: string;
conditions?: JobCondition[];
}
export interface JobCondition {
type: 'Complete' | 'Failed' | 'Suspended';
status: 'True' | 'False' | 'Unknown';
reason?: string;
message?: string;
lastProbeTime?: string;
lastTransitionTime?: string;
}
export interface Condition {
type: string;
status: 'True' | 'False' | 'Unknown';
reason?: string;
message?: string;
lastTransitionTime?: string;
observedGeneration?: number;
}
// ============================================
// Helper Types for UI
// ============================================
/** Graph node for pipeline visualization */
export interface PipelineNode {
id: string;
type: 'step';
data: {
step: PipelineStep;
status?: StepStatus;
};
position: { x: number; y: number };
}
/** Graph edge for pipeline visualization */
export interface PipelineEdge {
id: string;
source: string;
target: string;
type: 'sequential' | 'success' | 'failure';
data?: {
condition?: 'success' | 'fail';
operator?: 'and' | 'or';
};
}
/** Pipeline graph representation */
export interface PipelineGraph {
nodes: PipelineNode[];
edges: PipelineEdge[];
}
// ============================================
// API Response Types
// ============================================
export interface WatchEvent<T> {
type: 'ADDED' | 'MODIFIED' | 'DELETED' | 'ERROR';
object: T;
}
export interface ApiError {
kind: 'Status';
apiVersion: 'v1';
metadata: Record<string, unknown>;
status: 'Failure';
message: string;
reason: string;
code: number;
}
| yaacov/jobrunner | 1 | A job runner runs Kubernetes jobs sequentially. | TypeScript | yaacov | Yaacov Zamir | Red Hat |
cmd/karl/main.go | Go | package main
import (
"encoding/json"
"flag"
"fmt"
"os"
"github.com/yaacov/karl-interpreter/pkg/karl"
"gopkg.in/yaml.v2"
)
func main() {
var (
outputFormat = flag.String("format", "yaml", "Output format: yaml or json")
prettyOutput = flag.Bool("pretty", false, "Pretty print output")
showHelp = flag.Bool("help", false, "Show help message")
)
flag.Parse()
if *showHelp {
printHelp()
return
}
args := flag.Args()
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Error: No KARL rule provided\n")
printUsage()
os.Exit(1)
}
karlRule := args[0]
// Create interpreter and parse the rule
interpreter := karl.NewKARLInterpreter()
err := interpreter.Parse(karlRule)
if err != nil {
fmt.Fprintf(os.Stderr, "Parse error: %v\n", err)
os.Exit(1)
}
// Validate the rule
err = interpreter.Validate()
if err != nil {
fmt.Fprintf(os.Stderr, "Validation error: %v\n", err)
os.Exit(1)
}
// Convert to Kubernetes affinity
affinity, err := interpreter.ToAffinity()
if err != nil {
fmt.Fprintf(os.Stderr, "Conversion error: %v\n", err)
os.Exit(1)
}
// Output in requested format
switch *outputFormat {
case "json":
if *prettyOutput {
output, err := json.MarshalIndent(affinity, "", " ")
if err != nil {
fmt.Fprintf(os.Stderr, "JSON marshal error: %v\n", err)
os.Exit(1)
}
fmt.Println(string(output))
} else {
output, err := json.Marshal(affinity)
if err != nil {
fmt.Fprintf(os.Stderr, "JSON marshal error: %v\n", err)
os.Exit(1)
}
fmt.Println(string(output))
}
case "yaml":
output, err := yaml.Marshal(affinity)
if err != nil {
fmt.Fprintf(os.Stderr, "YAML marshal error: %v\n", err)
os.Exit(1)
}
fmt.Print(string(output))
default:
fmt.Fprintf(os.Stderr, "Error: Unknown format '%s'. Use 'yaml' or 'json'\n", *outputFormat)
os.Exit(1)
}
}
func printHelp() {
fmt.Printf(`KARL - Kubernetes Affinity Rule Language Interpreter
USAGE:
karl [OPTIONS] "<KARL_RULE>"
DESCRIPTION:
Convert a single KARL rule into Kubernetes Affinity/Anti-Affinity YAML or JSON.
Outputs only the affinity structure, ready to be used in pod specifications.
KARL RULE SYNTAX:
RULE_TYPE TARGET_SELECTOR on TOPOLOGY [weight=N]
Rule Types:
REQUIRE - Hard affinity constraint (must schedule near target pods)
PREFER - Soft affinity constraint (prefer to schedule near target pods with weight)
AVOID - Hard anti-affinity constraint (must not schedule near target pods)
REPEL - Soft anti-affinity constraint (prefer not to schedule near target pods with weight)
Target Selectors:
pods(label_selector) - Select pods by expressive label selectors:
pods(app=web) - Simple equality
pods(app=web,tier=frontend) - Multiple labels (AND operation)
pods(app in [web,api]) - Label value in list
pods(app not in [batch,test]) - Label value not in list
pods(has monitoring) - Label exists
pods(not has debug) - Label does not exist
Topology Keys:
node - Same Kubernetes node (kubernetes.io/hostname)
zone - Same availability zone (topology.kubernetes.io/zone)
region - Same region (topology.kubernetes.io/region)
rack - Same rack (topology.kubernetes.io/rack)
OPTIONS:
-format string
Output format: yaml or json (default "yaml")
-pretty
Pretty print output (for JSON)
-help
Show this help message
EXAMPLES:
# Hard affinity: require database pods on same node
karl "REQUIRE pods(app=database) on node"
# Soft anti-affinity: spread web/frontend pods across zones
karl "REPEL pods(app in [web,frontend]) on zone weight=80"
# Hard anti-affinity: avoid pods with debug labels
karl "AVOID pods(has debug) on node"
# Soft affinity: prefer to be near production workloads
karl "PREFER pods(not has test) on zone weight=90"
# Complex label selector with multiple conditions
karl "REQUIRE pods(app=web,tier=frontend,env not in [test,debug]) on node"
OUTPUT:
The tool outputs only the affinity structure in YAML or JSON format.
You can use this output directly in your pod specifications under spec.affinity.
`)
}
func printUsage() {
fmt.Printf(`Usage: karl [OPTIONS] "<KARL_RULE>"
Try 'karl -help' for more information.
`)
}
| yaacov/karl-interpreter | 0 | KARL (Kubernetes Affinity Rule Language) is a human-readable domain-specific language for expressing Kubernetes pod affinity and anti-affinity rules. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/karl/converter.go | Go | package karl
import (
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Converter handles conversion of KARL rules to Kubernetes affinity
type Converter struct{}
// NewConverter creates a new converter instance
func NewConverter() *Converter {
return &Converter{}
}
// ToAffinity converts a KARL rule to Kubernetes Affinity
func (c *Converter) ToAffinity(rule KARLRule) (*corev1.Affinity, error) {
affinity := &corev1.Affinity{}
if err := c.addRuleToAffinity(affinity, rule); err != nil {
return nil, err
}
return affinity, nil
}
// addRuleToAffinity adds a single rule to the affinity structure
func (c *Converter) addRuleToAffinity(affinity *corev1.Affinity, rule KARLRule) error {
// Get topology key
topologyKey := c.getTopologyKey(rule)
// Create label selector
labelSelector, err := c.createLabelSelector(rule.TargetSelector)
if err != nil {
return err
}
// Determine if this is affinity or anti-affinity based on rule type
isAntiAffinity := (rule.RuleType == RuleTypeAvoid || rule.RuleType == RuleTypeRepel)
// Determine if this is a hard or soft constraint
isHardConstraint := (rule.RuleType == RuleTypeRequire || rule.RuleType == RuleTypeAvoid)
// Create pod affinity term
if isHardConstraint {
// Hard constraint
term := corev1.PodAffinityTerm{
LabelSelector: labelSelector,
TopologyKey: topologyKey,
}
if isAntiAffinity {
if affinity.PodAntiAffinity == nil {
affinity.PodAntiAffinity = &corev1.PodAntiAffinity{}
}
affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, term)
} else {
if affinity.PodAffinity == nil {
affinity.PodAffinity = &corev1.PodAffinity{}
}
affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, term)
}
} else {
// Soft constraint (PREFER and REPEL)
weightedTerm := corev1.WeightedPodAffinityTerm{
Weight: rule.Weight,
PodAffinityTerm: corev1.PodAffinityTerm{
LabelSelector: labelSelector,
TopologyKey: topologyKey,
},
}
if isAntiAffinity {
if affinity.PodAntiAffinity == nil {
affinity.PodAntiAffinity = &corev1.PodAntiAffinity{}
}
affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, weightedTerm)
} else {
if affinity.PodAffinity == nil {
affinity.PodAffinity = &corev1.PodAffinity{}
}
affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution, weightedTerm)
}
}
return nil
}
// getTopologyKey converts KARL topology key to Kubernetes topology key
func (c *Converter) getTopologyKey(rule KARLRule) string {
switch rule.TopologyKey {
case TopologyNode:
return "kubernetes.io/hostname"
case TopologyZone:
return "topology.kubernetes.io/zone"
case TopologyRegion:
return "topology.kubernetes.io/region"
case TopologyRack:
return "topology.kubernetes.io/rack"
default:
return "kubernetes.io/hostname" // fallback
}
}
// createLabelSelector creates a Kubernetes label selector from target selector
func (c *Converter) createLabelSelector(target TargetSelector) (*metav1.LabelSelector, error) {
if target.Type == "pods" {
labelSelector := &metav1.LabelSelector{}
// Separate different types of selectors
matchLabels := make(map[string]string)
var matchExpressions []metav1.LabelSelectorRequirement
for _, selector := range target.LabelSelectors {
switch selector.Operation {
case LabelOpEquals:
if len(selector.Values) > 0 {
matchLabels[selector.Key] = selector.Values[0]
}
case LabelOpIn:
req := metav1.LabelSelectorRequirement{
Key: selector.Key,
Operator: metav1.LabelSelectorOpIn,
Values: selector.Values,
}
matchExpressions = append(matchExpressions, req)
case LabelOpNotIn:
req := metav1.LabelSelectorRequirement{
Key: selector.Key,
Operator: metav1.LabelSelectorOpNotIn,
Values: selector.Values,
}
matchExpressions = append(matchExpressions, req)
case LabelOpExists:
req := metav1.LabelSelectorRequirement{
Key: selector.Key,
Operator: metav1.LabelSelectorOpExists,
}
matchExpressions = append(matchExpressions, req)
case LabelOpNotExists:
req := metav1.LabelSelectorRequirement{
Key: selector.Key,
Operator: metav1.LabelSelectorOpDoesNotExist,
}
matchExpressions = append(matchExpressions, req)
default:
return nil, fmt.Errorf("unsupported label operation: %s", selector.Operation)
}
}
if len(matchLabels) > 0 {
labelSelector.MatchLabels = matchLabels
}
if len(matchExpressions) > 0 {
labelSelector.MatchExpressions = matchExpressions
}
return labelSelector, nil
}
return &metav1.LabelSelector{}, fmt.Errorf("unsupported target type: %s", target.Type)
}
| yaacov/karl-interpreter | 0 | KARL (Kubernetes Affinity Rule Language) is a human-readable domain-specific language for expressing Kubernetes pod affinity and anti-affinity rules. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/karl/converter_test.go | Go | package karl
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestToAffinity(t *testing.T) {
converter := &Converter{}
tests := []struct {
name string
rule KARLRule
expectPodAffinity bool
expectAntiAffinity bool
expectHardConstraint bool
expectSoftConstraint bool
expectedWeight int32
}{
{
name: "REQUIRE rule creates hard affinity",
rule: KARLRule{
RuleType: RuleTypeRequire,
TargetSelector: TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{
{Key: "app", Operation: LabelOpEquals, Values: []string{"web"}},
},
},
TopologyKey: TopologyNode,
},
expectPodAffinity: true,
expectAntiAffinity: false,
expectHardConstraint: true,
expectSoftConstraint: false,
},
{
name: "PREFER rule creates soft affinity",
rule: KARLRule{
RuleType: RuleTypePrefer,
TargetSelector: TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{
{Key: "app", Operation: LabelOpEquals, Values: []string{"web"}},
},
},
TopologyKey: TopologyZone,
Weight: 80,
},
expectPodAffinity: true,
expectAntiAffinity: false,
expectHardConstraint: false,
expectSoftConstraint: true,
expectedWeight: 80,
},
{
name: "AVOID rule creates hard anti-affinity",
rule: KARLRule{
RuleType: RuleTypeAvoid,
TargetSelector: TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{
{Key: "app", Operation: LabelOpEquals, Values: []string{"test"}},
},
},
TopologyKey: TopologyNode,
},
expectPodAffinity: false,
expectAntiAffinity: true,
expectHardConstraint: true,
expectSoftConstraint: false,
},
{
name: "REPEL rule creates soft anti-affinity",
rule: KARLRule{
RuleType: RuleTypeRepel,
TargetSelector: TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{
{Key: "app", Operation: LabelOpEquals, Values: []string{"batch"}},
},
},
TopologyKey: TopologyZone,
Weight: 90,
},
expectPodAffinity: false,
expectAntiAffinity: true,
expectHardConstraint: false,
expectSoftConstraint: true,
expectedWeight: 90,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
affinity, err := converter.ToAffinity(tt.rule)
require.NoError(t, err, "ToAffinity() should not return error")
// Check pod affinity vs anti-affinity
if tt.expectPodAffinity {
assert.NotNil(t, affinity.PodAffinity, "Expected PodAffinity to be set")
}
if tt.expectAntiAffinity {
assert.NotNil(t, affinity.PodAntiAffinity, "Expected PodAntiAffinity to be set")
}
// Check hard vs soft constraints
if tt.expectHardConstraint {
if tt.expectPodAffinity {
assert.NotEmpty(t, affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, "Expected hard affinity constraint")
} else if tt.expectAntiAffinity {
assert.NotEmpty(t, affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, "Expected hard anti-affinity constraint")
}
}
if tt.expectSoftConstraint {
if tt.expectPodAffinity {
require.NotEmpty(t, affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution, "Expected soft affinity constraint")
weight := affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight
assert.Equal(t, tt.expectedWeight, weight, "Weight mismatch")
} else if tt.expectAntiAffinity {
require.NotEmpty(t, affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, "Expected soft anti-affinity constraint")
weight := affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight
assert.Equal(t, tt.expectedWeight, weight, "Weight mismatch")
}
}
})
}
}
func TestGetTopologyKey(t *testing.T) {
converter := &Converter{}
tests := []struct {
topologyKey TopologyKey
expected string
}{
{TopologyNode, "kubernetes.io/hostname"},
{TopologyZone, "topology.kubernetes.io/zone"},
{TopologyRegion, "topology.kubernetes.io/region"},
{TopologyRack, "topology.kubernetes.io/rack"},
}
for _, tt := range tests {
t.Run(string(tt.topologyKey), func(t *testing.T) {
rule := KARLRule{TopologyKey: tt.topologyKey}
result := converter.getTopologyKey(rule)
if result != tt.expected {
t.Errorf("getTopologyKey() = %v, expected %v", result, tt.expected)
}
})
}
}
func TestCreateLabelSelector(t *testing.T) {
converter := &Converter{}
tests := []struct {
name string
target TargetSelector
expectMatchLabels bool
expectExpressions bool
expectedLabelsCount int
expectedExpCount int
}{
{
name: "Simple equality selector",
target: TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{
{Key: "app", Operation: LabelOpEquals, Values: []string{"web"}},
},
},
expectMatchLabels: true,
expectExpressions: false,
expectedLabelsCount: 1,
expectedExpCount: 0,
},
{
name: "In operation selector",
target: TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{
{Key: "env", Operation: LabelOpIn, Values: []string{"prod", "staging"}},
},
},
expectMatchLabels: false,
expectExpressions: true,
expectedLabelsCount: 0,
expectedExpCount: 1,
},
{
name: "Mixed selectors",
target: TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{
{Key: "app", Operation: LabelOpEquals, Values: []string{"web"}},
{Key: "env", Operation: LabelOpIn, Values: []string{"prod", "staging"}},
{Key: "monitoring", Operation: LabelOpExists},
},
},
expectMatchLabels: true,
expectExpressions: true,
expectedLabelsCount: 1,
expectedExpCount: 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
labelSelector, err := converter.createLabelSelector(tt.target)
if err != nil {
t.Errorf("createLabelSelector() error = %v", err)
return
}
if tt.expectMatchLabels {
if labelSelector.MatchLabels == nil {
t.Error("Expected MatchLabels to be set")
} else if len(labelSelector.MatchLabels) != tt.expectedLabelsCount {
t.Errorf("Expected %d match labels, got %d", tt.expectedLabelsCount, len(labelSelector.MatchLabels))
}
} else {
if len(labelSelector.MatchLabels) > 0 {
t.Error("Expected MatchLabels to be empty")
}
}
if tt.expectExpressions {
if len(labelSelector.MatchExpressions) != tt.expectedExpCount {
t.Errorf("Expected %d match expressions, got %d", tt.expectedExpCount, len(labelSelector.MatchExpressions))
}
} else {
if len(labelSelector.MatchExpressions) > 0 {
t.Error("Expected MatchExpressions to be empty")
}
}
})
}
}
func TestLabelOperationToKubernetes(t *testing.T) {
tests := []struct {
operation LabelOperation
expected metav1.LabelSelectorOperator
}{
{LabelOpIn, metav1.LabelSelectorOpIn},
{LabelOpNotIn, metav1.LabelSelectorOpNotIn},
{LabelOpExists, metav1.LabelSelectorOpExists},
{LabelOpNotExists, metav1.LabelSelectorOpDoesNotExist},
}
for _, tt := range tests {
t.Run(string(tt.operation), func(t *testing.T) {
converter := &Converter{}
target := TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{
{Key: "test", Operation: tt.operation, Values: []string{"value"}},
},
}
labelSelector, err := converter.createLabelSelector(target)
if err != nil {
t.Errorf("createLabelSelector() error = %v", err)
return
}
if len(labelSelector.MatchExpressions) == 0 {
t.Error("Expected at least one match expression")
return
}
if labelSelector.MatchExpressions[0].Operator != tt.expected {
t.Errorf("Expected operator %v, got %v", tt.expected, labelSelector.MatchExpressions[0].Operator)
}
})
}
}
| yaacov/karl-interpreter | 0 | KARL (Kubernetes Affinity Rule Language) is a human-readable domain-specific language for expressing Kubernetes pod affinity and anti-affinity rules. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/karl/interpreter.go | Go | package karl
import (
corev1 "k8s.io/api/core/v1"
)
// NewKARLInterpreter creates a new KARL interpreter instance
func NewKARLInterpreter() *KARLInterpreter {
return &KARLInterpreter{
parser: NewParser(),
validator: NewValidator(),
converter: NewConverter(),
}
}
// Parse parses a single KARL rule from a string
func (k *KARLInterpreter) Parse(karlRule string) error {
rule, err := k.parser.ParseRule(karlRule)
if err != nil {
return err
}
k.rule = rule
return nil
}
// Validate validates the parsed rule
func (k *KARLInterpreter) Validate() error {
return k.validator.ValidateRule(k.rule)
}
// ToAffinity converts the parsed KARL rule to Kubernetes Affinity
func (k *KARLInterpreter) ToAffinity() (*corev1.Affinity, error) {
return k.converter.ToAffinity(k.rule)
}
| yaacov/karl-interpreter | 0 | KARL (Kubernetes Affinity Rule Language) is a human-readable domain-specific language for expressing Kubernetes pod affinity and anti-affinity rules. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/karl/interpreter_test.go | Go | package karl
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestExpressiveLabelSelectors(t *testing.T) {
interpreter := NewKARLInterpreter()
tests := []struct {
name string
rule string
expectError bool
}{
{
name: "Simple equality",
rule: "REQUIRE pods(app=web) on node",
expectError: false,
},
{
name: "Multiple labels",
rule: "PREFER pods(app=web,tier=frontend) on zone weight=80",
expectError: false,
},
{
name: "In operation",
rule: "REPEL pods(app in [web,api,frontend]) on zone weight=90",
expectError: false,
},
{
name: "Not in operation",
rule: "AVOID pods(env not in [test,debug]) on node",
expectError: false,
},
{
name: "Has operation",
rule: "REQUIRE pods(has monitoring) on zone",
expectError: false,
},
{
name: "Not has operation",
rule: "REPEL pods(not has debug) on node weight=75",
expectError: false,
},
{
name: "Mixed operations",
rule: "PREFER pods(app=web,env not in [test,debug],has monitoring) on zone weight=85",
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := interpreter.Parse(tt.rule)
if tt.expectError {
assert.Error(t, err, "Expected parse error for rule: %s", tt.rule)
} else {
require.NoError(t, err, "Unexpected parse error for rule: %s", tt.rule)
// Validate the parsed rule
err = interpreter.Validate()
require.NoError(t, err, "Validation failed for rule: %s", tt.rule)
// Test conversion to affinity
_, err = interpreter.ToAffinity()
assert.NoError(t, err, "ToAffinity conversion failed for rule: %s", tt.rule)
}
})
}
}
func TestKARLInterpreterIntegration(t *testing.T) {
interpreter := NewKARLInterpreter()
// Test complete workflow
rule := "REPEL pods(app in [web,api],has debug) on zone weight=85"
err := interpreter.Parse(rule)
require.NoError(t, err, "Failed to parse rule")
err = interpreter.Validate()
require.NoError(t, err, "Failed to validate rule")
affinity, err := interpreter.ToAffinity()
require.NoError(t, err, "Failed to convert to affinity")
// Verify the result has pod anti-affinity
require.NotNil(t, affinity.PodAntiAffinity, "Expected PodAntiAffinity to be set")
assert.NotEmpty(t, affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
"Expected preferred anti-affinity terms")
term := affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0]
assert.Equal(t, int32(85), term.Weight, "Expected weight 85")
assert.Equal(t, "topology.kubernetes.io/zone", term.PodAffinityTerm.TopologyKey,
"Expected zone topology key")
}
| yaacov/karl-interpreter | 0 | KARL (Kubernetes Affinity Rule Language) is a human-readable domain-specific language for expressing Kubernetes pod affinity and anti-affinity rules. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/karl/parser.go | Go | package karl
import (
"fmt"
"strings"
)
// Parser handles parsing of KARL rules and label selectors
type Parser struct{}
// NewParser creates a new parser instance
func NewParser() *Parser {
return &Parser{}
}
// ParseRule parses a complete KARL rule string
func (p *Parser) ParseRule(karlRule string) (KARLRule, error) {
rule := KARLRule{}
// Clean up the rule
line := strings.TrimSpace(karlRule)
line = strings.TrimSuffix(line, ";")
// Tokenize the rule
tokens := p.tokenize(line)
if len(tokens) < 4 {
return rule, fmt.Errorf("invalid rule syntax: %s", line)
}
// Parse rule type
ruleType, err := p.parseRuleType(tokens[0])
if err != nil {
return rule, err
}
rule.RuleType = ruleType
// Parse target selector
targetSelector, err := p.parseTargetSelector(tokens[1])
if err != nil {
return rule, fmt.Errorf("invalid target selector: %w", err)
}
rule.TargetSelector = targetSelector
// Expect "on" keyword at index 2
if strings.ToLower(tokens[2]) != "on" {
return rule, fmt.Errorf("expected 'on' keyword, got: %s", tokens[2])
}
// Parse topology key (now at index 3)
topologyKey, err := p.parseTopologyKey(tokens[3])
if err != nil {
return rule, err
}
rule.TopologyKey = topologyKey
// Parse weight for soft constraint rules (PREFER and REPEL)
if rule.RuleType == RuleTypePrefer || rule.RuleType == RuleTypeRepel {
rule.Weight = 100 // default weight
// Look for weight parameter (starting from index 4)
for i := 4; i < len(tokens); i++ {
if strings.HasPrefix(tokens[i], "weight=") {
weight, err := p.parseWeight(tokens[i])
if err != nil {
return rule, err
}
rule.Weight = weight
break
}
}
}
return rule, nil
}
// parseRuleType parses the rule type from a token
func (p *Parser) parseRuleType(token string) (RuleType, error) {
switch strings.ToUpper(token) {
case "REQUIRE":
return RuleTypeRequire, nil
case "PREFER":
return RuleTypePrefer, nil
case "AVOID":
return RuleTypeAvoid, nil
case "REPEL":
return RuleTypeRepel, nil
default:
return "", fmt.Errorf("unknown rule type: %s", token)
}
}
// parseTargetSelector parses target selector with expressive syntax
func (p *Parser) parseTargetSelector(selector string) (TargetSelector, error) {
target := TargetSelector{}
if !strings.Contains(selector, "(") || !strings.HasSuffix(selector, ")") {
return target, fmt.Errorf("invalid selector format: %s", selector)
}
// Extract type and content
parenIndex := strings.Index(selector, "(")
selectorType := selector[:parenIndex]
content := selector[parenIndex+1 : len(selector)-1]
target.Type = selectorType
switch selectorType {
case "pods":
// Parse expressive label selectors
selectors, err := p.parseLabelSelectors(content)
if err != nil {
return target, fmt.Errorf("invalid label selector: %w", err)
}
target.LabelSelectors = selectors
default:
return target, fmt.Errorf("unsupported target type: %s", selectorType)
}
return target, nil
}
// parseTopologyKey parses the topology key from a token
func (p *Parser) parseTopologyKey(token string) (TopologyKey, error) {
switch strings.ToLower(token) {
case "node":
return TopologyNode, nil
case "zone":
return TopologyZone, nil
case "region":
return TopologyRegion, nil
case "rack":
return TopologyRack, nil
default:
return "", fmt.Errorf("unknown topology key: %s", token)
}
}
// parseWeight parses weight from a weight= token
func (p *Parser) parseWeight(token string) (int32, error) {
weightStr := strings.TrimPrefix(token, "weight=")
var weight int
if _, err := fmt.Sscanf(weightStr, "%d", &weight); err != nil {
return 0, fmt.Errorf("invalid weight value: %s", weightStr)
}
if weight < 1 || weight > 100 {
return 0, fmt.Errorf("weight must be between 1 and 100: %d", weight)
}
return int32(weight), nil
}
// parseLabelSelectors parses expressive label selector syntax
func (p *Parser) parseLabelSelectors(content string) ([]LabelSelector, error) {
var selectors []LabelSelector
if content == "" {
return selectors, nil
}
// Split by comma, but preserve content within brackets
expressions := p.splitLabelExpressions(content)
for _, expr := range expressions {
expr = strings.TrimSpace(expr)
if expr == "" {
continue
}
selector, err := p.parseSingleLabelSelector(expr)
if err != nil {
return nil, err
}
selectors = append(selectors, selector)
}
return selectors, nil
}
// splitLabelExpressions splits expressions by comma while preserving brackets
func (p *Parser) splitLabelExpressions(content string) []string {
var expressions []string
var current strings.Builder
bracketDepth := 0
for _, char := range content {
switch char {
case '[':
bracketDepth++
current.WriteRune(char)
case ']':
bracketDepth--
current.WriteRune(char)
case ',':
if bracketDepth == 0 {
expressions = append(expressions, current.String())
current.Reset()
} else {
current.WriteRune(char)
}
default:
current.WriteRune(char)
}
}
if current.Len() > 0 {
expressions = append(expressions, current.String())
}
return expressions
}
// parseSingleLabelSelector parses a single label selector expression
func (p *Parser) parseSingleLabelSelector(expr string) (LabelSelector, error) {
expr = strings.TrimSpace(expr)
// Handle "has key" syntax
if strings.HasPrefix(expr, "has ") {
key := strings.TrimSpace(expr[4:])
return LabelSelector{
Key: key,
Operation: LabelOpExists,
}, nil
}
// Handle "not has key" syntax
if strings.HasPrefix(expr, "not has ") {
key := strings.TrimSpace(expr[8:])
return LabelSelector{
Key: key,
Operation: LabelOpNotExists,
}, nil
}
// Handle "key not in [value1,value2]" syntax (must come before "in" check)
if strings.Contains(expr, " not in [") && strings.HasSuffix(expr, "]") {
parts := strings.Split(expr, " not in [")
if len(parts) != 2 {
return LabelSelector{}, fmt.Errorf("invalid 'not in' expression: %s", expr)
}
key := strings.TrimSpace(parts[0])
valueList := strings.TrimSuffix(parts[1], "]")
values := p.parseValueList(valueList)
return LabelSelector{
Key: key,
Operation: LabelOpNotIn,
Values: values,
}, nil
}
// Handle "key in [value1,value2]" syntax
if strings.Contains(expr, " in [") && strings.HasSuffix(expr, "]") {
parts := strings.Split(expr, " in [")
if len(parts) != 2 {
return LabelSelector{}, fmt.Errorf("invalid 'in' expression: %s", expr)
}
key := strings.TrimSpace(parts[0])
valueList := strings.TrimSuffix(parts[1], "]")
values := p.parseValueList(valueList)
return LabelSelector{
Key: key,
Operation: LabelOpIn,
Values: values,
}, nil
}
// Handle simple "key=value" syntax
if strings.Contains(expr, "=") {
kv := strings.Split(expr, "=")
if len(kv) != 2 {
return LabelSelector{}, fmt.Errorf("invalid equality expression: %s", expr)
}
key := strings.TrimSpace(kv[0])
value := strings.TrimSpace(kv[1])
return LabelSelector{
Key: key,
Operation: LabelOpEquals,
Values: []string{value},
}, nil
}
return LabelSelector{}, fmt.Errorf("unrecognized label selector expression: %s", expr)
}
// parseValueList parses a comma-separated list of values
func (p *Parser) parseValueList(valueList string) []string {
var values []string
parts := strings.Split(valueList, ",")
for _, part := range parts {
value := strings.TrimSpace(part)
if value != "" {
values = append(values, value)
}
}
return values
}
// tokenize splits a line into tokens while preserving quoted strings and parentheses
func (p *Parser) tokenize(line string) []string {
var tokens []string
var current strings.Builder
inQuotes := false
parenDepth := 0
for _, char := range line {
switch char {
case '"':
inQuotes = !inQuotes
current.WriteRune(char)
case '(':
parenDepth++
current.WriteRune(char)
case ')':
parenDepth--
current.WriteRune(char)
case ' ', '\t':
if inQuotes || parenDepth > 0 {
current.WriteRune(char)
} else if current.Len() > 0 {
tokens = append(tokens, current.String())
current.Reset()
}
default:
current.WriteRune(char)
}
}
if current.Len() > 0 {
tokens = append(tokens, current.String())
}
return tokens
}
| yaacov/karl-interpreter | 0 | KARL (Kubernetes Affinity Rule Language) is a human-readable domain-specific language for expressing Kubernetes pod affinity and anti-affinity rules. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/karl/parser_test.go | Go | package karl
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestParseRule(t *testing.T) {
parser := &Parser{}
tests := []struct {
name string
rule string
expectError bool
expectedType RuleType
}{
{
name: "REQUIRE rule",
rule: "REQUIRE pods(app=web) on node",
expectError: false,
expectedType: RuleTypeRequire,
},
{
name: "PREFER rule with weight",
rule: "PREFER pods(app=web) on zone weight=80",
expectError: false,
expectedType: RuleTypePrefer,
},
{
name: "AVOID rule",
rule: "AVOID pods(app=test) on node",
expectError: false,
expectedType: RuleTypeAvoid,
},
{
name: "REPEL rule with weight",
rule: "REPEL pods(app=batch) on zone weight=90",
expectError: false,
expectedType: RuleTypeRepel,
},
{
name: "Invalid rule type",
rule: "INVALID pods(app=web) on node",
expectError: true,
},
{
name: "Too few tokens",
rule: "REQUIRE pods(app=web)",
expectError: true,
},
{
name: "Invalid topology",
rule: "REQUIRE pods(app=web) on invalid",
expectError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
rule, err := parser.ParseRule(tt.rule)
if tt.expectError {
assert.Error(t, err, "Expected error for rule: %s", tt.rule)
} else {
require.NoError(t, err, "Unexpected error for rule: %s", tt.rule)
assert.Equal(t, tt.expectedType, rule.RuleType, "Rule type mismatch")
}
})
}
}
func TestParseLabelSelectors(t *testing.T) {
parser := &Parser{}
tests := []struct {
name string
input string
expected int // number of label selectors expected
}{
{
name: "Simple equality",
input: "app=web",
expected: 1,
},
{
name: "Multiple labels",
input: "app=web,tier=frontend",
expected: 2,
},
{
name: "In operation",
input: "app in [web,api]",
expected: 1,
},
{
name: "Not in operation",
input: "env not in [test,debug]",
expected: 1,
},
{
name: "Has operation",
input: "has monitoring",
expected: 1,
},
{
name: "Not has operation",
input: "not has debug",
expected: 1,
},
{
name: "Mixed operations",
input: "app=web,env not in [test],has prod",
expected: 3,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
selectors, err := parser.parseLabelSelectors(tt.input)
require.NoError(t, err, "Unexpected error parsing label selectors")
assert.Len(t, selectors, tt.expected, "Number of selectors mismatch for input: %s", tt.input)
})
}
}
func TestParseSingleLabelSelector(t *testing.T) {
parser := &Parser{}
tests := []struct {
name string
input string
expectedOperation LabelOperation
expectedKey string
expectedValues []string
}{
{
name: "Simple equality",
input: "app=web",
expectedOperation: LabelOpEquals,
expectedKey: "app",
expectedValues: []string{"web"},
},
{
name: "In operation",
input: "app in [web,api]",
expectedOperation: LabelOpIn,
expectedKey: "app",
expectedValues: []string{"web", "api"},
},
{
name: "Not in operation",
input: "env not in [test,debug]",
expectedOperation: LabelOpNotIn,
expectedKey: "env",
expectedValues: []string{"test", "debug"},
},
{
name: "Has operation",
input: "has monitoring",
expectedOperation: LabelOpExists,
expectedKey: "monitoring",
expectedValues: nil,
},
{
name: "Not has operation",
input: "not has debug",
expectedOperation: LabelOpNotExists,
expectedKey: "debug",
expectedValues: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
selector, err := parser.parseSingleLabelSelector(tt.input)
require.NoError(t, err, "Unexpected error parsing single label selector")
assert.Equal(t, tt.expectedOperation, selector.Operation, "Operation mismatch")
assert.Equal(t, tt.expectedKey, selector.Key, "Key mismatch")
assert.Equal(t, tt.expectedValues, selector.Values, "Values mismatch")
})
}
}
func TestParseTargetSelector(t *testing.T) {
parser := &Parser{}
tests := []struct {
name string
input string
expectedType string
expectedLabels int
expectError bool
}{
{
name: "Simple pods selector",
input: "pods(app=web)",
expectedType: "pods",
expectedLabels: 1,
expectError: false,
},
{
name: "Multiple labels",
input: "pods(app=web,tier=frontend)",
expectedType: "pods",
expectedLabels: 2,
expectError: false,
},
{
name: "Unsupported target type",
input: "services(my-service)",
expectError: true,
},
{
name: "Invalid format",
input: "invalid",
expectError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
target, err := parser.parseTargetSelector(tt.input)
if tt.expectError {
assert.Error(t, err, "Expected error for input: %s", tt.input)
} else {
require.NoError(t, err, "Unexpected error for input: %s", tt.input)
assert.Equal(t, tt.expectedType, target.Type, "Target type mismatch")
assert.Len(t, target.LabelSelectors, tt.expectedLabels, "Label selectors count mismatch")
}
})
}
}
| yaacov/karl-interpreter | 0 | KARL (Kubernetes Affinity Rule Language) is a human-readable domain-specific language for expressing Kubernetes pod affinity and anti-affinity rules. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/karl/types.go | Go | package karl
import (
corev1 "k8s.io/api/core/v1"
)
// RuleType represents the type of affinity rule
type RuleType string
const (
RuleTypeRequire RuleType = "REQUIRE" // Hard affinity
RuleTypePrefer RuleType = "PREFER" // Soft affinity
RuleTypeAvoid RuleType = "AVOID" // Hard anti-affinity
RuleTypeRepel RuleType = "REPEL" // Soft anti-affinity
)
// TopologyKey represents the topology domain
type TopologyKey string
const (
TopologyNode TopologyKey = "node"
TopologyZone TopologyKey = "zone"
TopologyRegion TopologyKey = "region"
TopologyRack TopologyKey = "rack"
)
// LabelOperation represents the type of label operation
type LabelOperation string
const (
LabelOpEquals LabelOperation = "="
LabelOpIn LabelOperation = "in"
LabelOpNotIn LabelOperation = "not in"
LabelOpExists LabelOperation = "exists"
LabelOpNotExists LabelOperation = "not exists"
)
// LabelSelector represents a single label selection criterion
type LabelSelector struct {
Key string
Operation LabelOperation
Values []string // for equality, in, not in operations
}
// TargetSelector represents how to select target pods
type TargetSelector struct {
Type string // pods
LabelSelectors []LabelSelector // expressive label selectors
}
// KARLRule represents a parsed KARL rule
type KARLRule struct {
RuleType RuleType
TargetSelector TargetSelector
TopologyKey TopologyKey
Weight int32 // for soft constraint rules
}
// KARLInterpreter handles parsing and conversion of KARL rules
type KARLInterpreter struct {
rule KARLRule
parser *Parser
validator *Validator
converter *Converter
}
// AffinityConverter interface for converting KARL rules to Kubernetes affinity
type AffinityConverter interface {
ToAffinity(rule KARLRule) (*corev1.Affinity, error)
}
// RuleParser interface for parsing KARL rules
type RuleParser interface {
ParseRule(karlRule string) (KARLRule, error)
}
// RuleValidator interface for validating KARL rules
type RuleValidator interface {
ValidateRule(rule KARLRule) error
}
| yaacov/karl-interpreter | 0 | KARL (Kubernetes Affinity Rule Language) is a human-readable domain-specific language for expressing Kubernetes pod affinity and anti-affinity rules. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/karl/validator.go | Go | package karl
import "fmt"
// Validator handles validation of KARL rules
type Validator struct{}
// NewValidator creates a new validator instance
func NewValidator() *Validator {
return &Validator{}
}
// ValidateRule validates a single KARL rule
func (v *Validator) ValidateRule(rule KARLRule) error {
// Validate rule type
if rule.RuleType == "" {
return fmt.Errorf("missing rule type")
}
// Validate target selector
if rule.TargetSelector.Type == "" {
return fmt.Errorf("missing target selector")
}
if rule.TargetSelector.Type == "pods" && len(rule.TargetSelector.LabelSelectors) == 0 {
return fmt.Errorf("pods selector requires labels")
}
// Validate label selectors
if err := v.validateLabelSelectors(rule.TargetSelector.LabelSelectors); err != nil {
return fmt.Errorf("invalid label selectors: %w", err)
}
// Validate topology key
if rule.TopologyKey == "" {
return fmt.Errorf("missing topology key")
}
// Validate weight for soft constraint rules
if (rule.RuleType == RuleTypePrefer || rule.RuleType == RuleTypeRepel) && (rule.Weight < 1 || rule.Weight > 100) {
return fmt.Errorf("soft constraint rule weight must be between 1 and 100")
}
// Validate target selector type
if err := v.validateTargetSelector(rule.TargetSelector); err != nil {
return fmt.Errorf("invalid target selector: %w", err)
}
return nil
}
// validateLabelSelectors validates individual label selectors
func (v *Validator) validateLabelSelectors(selectors []LabelSelector) error {
for i, selector := range selectors {
if err := v.validateLabelSelector(selector); err != nil {
return fmt.Errorf("selector %d: %w", i, err)
}
}
return nil
}
// validateLabelSelector validates a single label selector
func (v *Validator) validateLabelSelector(selector LabelSelector) error {
// Validate key
if selector.Key == "" {
return fmt.Errorf("label key cannot be empty")
}
// Validate operation
switch selector.Operation {
case LabelOpEquals:
if len(selector.Values) != 1 {
return fmt.Errorf("equality operation requires exactly one value")
}
if selector.Values[0] == "" {
return fmt.Errorf("equality operation value cannot be empty")
}
case LabelOpIn, LabelOpNotIn:
if len(selector.Values) == 0 {
return fmt.Errorf("%s operation requires at least one value", selector.Operation)
}
for i, value := range selector.Values {
if value == "" {
return fmt.Errorf("%s operation value %d cannot be empty", selector.Operation, i)
}
}
case LabelOpExists, LabelOpNotExists:
if len(selector.Values) != 0 {
return fmt.Errorf("%s operation should not have values", selector.Operation)
}
default:
return fmt.Errorf("unknown label operation: %s", selector.Operation)
}
return nil
}
// validateTargetSelector validates the target selector
func (v *Validator) validateTargetSelector(target TargetSelector) error {
switch target.Type {
case "pods":
if len(target.LabelSelectors) == 0 {
return fmt.Errorf("pods target requires at least one label selector")
}
return nil
default:
return fmt.Errorf("unsupported target type: %s", target.Type)
}
}
| yaacov/karl-interpreter | 0 | KARL (Kubernetes Affinity Rule Language) is a human-readable domain-specific language for expressing Kubernetes pod affinity and anti-affinity rules. | Go | yaacov | Yaacov Zamir | Red Hat |
pkg/karl/validator_test.go | Go | package karl
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestValidateRule(t *testing.T) {
validator := &Validator{}
tests := []struct {
name string
rule KARLRule
expectError bool
errorMsg string
}{
{
name: "Valid REQUIRE rule",
rule: KARLRule{
RuleType: RuleTypeRequire,
TargetSelector: TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{
{Key: "app", Operation: LabelOpEquals, Values: []string{"web"}},
},
},
TopologyKey: TopologyNode,
},
expectError: false,
},
{
name: "Valid PREFER rule with weight",
rule: KARLRule{
RuleType: RuleTypePrefer,
TargetSelector: TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{
{Key: "app", Operation: LabelOpEquals, Values: []string{"web"}},
},
},
TopologyKey: TopologyZone,
Weight: 80,
},
expectError: false,
},
{
name: "Missing rule type",
rule: KARLRule{
TargetSelector: TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{
{Key: "app", Operation: LabelOpEquals, Values: []string{"web"}},
},
},
TopologyKey: TopologyNode,
},
expectError: true,
errorMsg: "missing rule type",
},
{
name: "Missing target selector",
rule: KARLRule{
RuleType: RuleTypeRequire,
TopologyKey: TopologyNode,
},
expectError: true,
errorMsg: "missing target selector",
},
{
name: "Pods selector without labels",
rule: KARLRule{
RuleType: RuleTypeRequire,
TargetSelector: TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{},
},
TopologyKey: TopologyNode,
},
expectError: true,
errorMsg: "pods selector requires labels",
},
{
name: "Missing topology key",
rule: KARLRule{
RuleType: RuleTypeRequire,
TargetSelector: TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{
{Key: "app", Operation: LabelOpEquals, Values: []string{"web"}},
},
},
},
expectError: true,
errorMsg: "missing topology key",
},
{
name: "Invalid weight for soft constraint",
rule: KARLRule{
RuleType: RuleTypePrefer,
TargetSelector: TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{
{Key: "app", Operation: LabelOpEquals, Values: []string{"web"}},
},
},
TopologyKey: TopologyZone,
Weight: 150, // Invalid weight > 100
},
expectError: true,
errorMsg: "soft constraint rule weight must be between 1 and 100",
},
{
name: "Zero weight for soft constraint",
rule: KARLRule{
RuleType: RuleTypeRepel,
TargetSelector: TargetSelector{
Type: "pods",
LabelSelectors: []LabelSelector{
{Key: "app", Operation: LabelOpEquals, Values: []string{"web"}},
},
},
TopologyKey: TopologyZone,
Weight: 0, // Invalid weight = 0
},
expectError: true,
errorMsg: "soft constraint rule weight must be between 1 and 100",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validator.ValidateRule(tt.rule)
if tt.expectError {
require.Error(t, err, "Expected error for test case: %s", tt.name)
assert.Equal(t, tt.errorMsg, err.Error(), "Error message mismatch")
} else {
assert.NoError(t, err, "Unexpected error for test case: %s", tt.name)
}
})
}
}
func TestValidateLabelSelector(t *testing.T) {
validator := &Validator{}
tests := []struct {
name string
selector LabelSelector
expectError bool
errorMsg string
}{
{
name: "Valid equality selector",
selector: LabelSelector{
Key: "app",
Operation: LabelOpEquals,
Values: []string{"web"},
},
expectError: false,
},
{
name: "Valid in selector",
selector: LabelSelector{
Key: "env",
Operation: LabelOpIn,
Values: []string{"prod", "staging"},
},
expectError: false,
},
{
name: "Valid exists selector",
selector: LabelSelector{
Key: "monitoring",
Operation: LabelOpExists,
},
expectError: false,
},
{
name: "Missing key",
selector: LabelSelector{
Operation: LabelOpEquals,
Values: []string{"web"},
},
expectError: true,
errorMsg: "label key cannot be empty",
},
{
name: "Equality selector without values",
selector: LabelSelector{
Key: "app",
Operation: LabelOpEquals,
Values: []string{},
},
expectError: true,
errorMsg: "equality operation requires exactly one value",
},
{
name: "In selector without values",
selector: LabelSelector{
Key: "env",
Operation: LabelOpIn,
Values: []string{},
},
expectError: true,
errorMsg: "in operation requires at least one value",
},
{
name: "Exists selector with values",
selector: LabelSelector{
Key: "monitoring",
Operation: LabelOpExists,
Values: []string{"true"},
},
expectError: true,
errorMsg: "exists operation should not have values",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validator.validateLabelSelector(tt.selector)
if tt.expectError {
require.Error(t, err, "Expected error for test case: %s", tt.name)
assert.Equal(t, tt.errorMsg, err.Error(), "Error message mismatch")
} else {
assert.NoError(t, err, "Unexpected error for test case: %s", tt.name)
}
})
}
}
| yaacov/karl-interpreter | 0 | KARL (Kubernetes Affinity Rule Language) is a human-readable domain-specific language for expressing Kubernetes pod affinity and anti-affinity rules. | Go | yaacov | Yaacov Zamir | Red Hat |
demo/setup-aws-ec2.sh | Shell | #!/bin/bash
# Minimal AWS EC2 Infrastructure Setup Script with idempotency
# Check if EC2_REGION is set
if [ -z "$EC2_REGION" ]; then
echo "ERROR: EC2_REGION environment variable is not set"
echo "Please set it first: export EC2_REGION=us-east-1"
exit 1
fi
# Set default VM name if not provided
if [ -z "$EC2_VM_NAME" ]; then
export EC2_VM_NAME="rhel9-instance"
echo "EC2_VM_NAME not set, using default: $EC2_VM_NAME"
else
echo "Using EC2_VM_NAME: $EC2_VM_NAME"
fi
echo "Setting up minimal AWS EC2 infrastructure in region: $EC2_REGION"
# 1. Create or use existing VPC
echo "Checking for existing VPC..."
export EC2_VPC_ID=$(aws ec2 describe-vpcs \
--filters "Name=tag:Name,Values=default-vpc" \
--region $EC2_REGION \
--query 'Vpcs[0].VpcId' \
--output text)
if [ "$EC2_VPC_ID" == "None" ] || [ -z "$EC2_VPC_ID" ]; then
echo "Creating new VPC..."
export EC2_VPC_ID=$(aws ec2 create-vpc \
--cidr-block 10.0.0.0/16 \
--tag-specifications 'ResourceType=vpc,Tags=[{Key=Name,Value=default-vpc}]' \
--region $EC2_REGION \
--query 'Vpc.VpcId' \
--output text)
echo "VPC Created: $EC2_VPC_ID"
else
echo "Using existing VPC: $EC2_VPC_ID"
fi
# 2. Create or use existing Subnet
echo "Checking for existing Subnet..."
export EC2_SUBNET_ID=$(aws ec2 describe-subnets \
--filters "Name=tag:Name,Values=default-subnet" "Name=vpc-id,Values=$EC2_VPC_ID" \
--region $EC2_REGION \
--query 'Subnets[0].SubnetId' \
--output text)
if [ "$EC2_SUBNET_ID" == "None" ] || [ -z "$EC2_SUBNET_ID" ]; then
echo "Creating new Subnet..."
export EC2_SUBNET_ID=$(aws ec2 create-subnet \
--vpc-id $EC2_VPC_ID \
--cidr-block 10.0.1.0/24 \
--tag-specifications 'ResourceType=subnet,Tags=[{Key=Name,Value=default-subnet}]' \
--region $EC2_REGION \
--query 'Subnet.SubnetId' \
--output text)
echo "Subnet Created: $EC2_SUBNET_ID"
else
echo "Using existing Subnet: $EC2_SUBNET_ID"
fi
# 3. Create or use existing Security Group
echo "Checking for existing Security Group..."
export EC2_SECURITY_GROUP_ID=$(aws ec2 describe-security-groups \
--filters "Name=group-name,Values=default-sg" "Name=vpc-id,Values=$EC2_VPC_ID" \
--region $EC2_REGION \
--query 'SecurityGroups[0].GroupId' \
--output text)
if [ "$EC2_SECURITY_GROUP_ID" == "None" ] || [ -z "$EC2_SECURITY_GROUP_ID" ]; then
echo "Creating new Security Group..."
export EC2_SECURITY_GROUP_ID=$(aws ec2 create-security-group \
--group-name default-sg \
--description "Default security group" \
--vpc-id $EC2_VPC_ID \
--region $EC2_REGION \
--query 'GroupId' \
--output text)
echo "Security Group Created: $EC2_SECURITY_GROUP_ID"
else
echo "Using existing Security Group: $EC2_SECURITY_GROUP_ID"
fi
# 4. Get RHEL9 AMI ID
echo "Finding RHEL9 AMI..."
export EC2_AMI_ID=$(aws ec2 describe-images \
--owners 309956199498 \
--filters "Name=name,Values=RHEL-9*" "Name=architecture,Values=x86_64" \
--query 'sort_by(Images, &CreationDate)[-1].ImageId' \
--region $EC2_REGION \
--output text)
echo "RHEL9 AMI ID: $EC2_AMI_ID"
# 5. Print all environment variables
echo ""
echo "==================================="
echo "Environment Variables Set:"
echo "==================================="
echo "export EC2_REGION=$EC2_REGION"
echo "export EC2_VPC_ID=$EC2_VPC_ID"
echo "export EC2_SUBNET_ID=$EC2_SUBNET_ID"
echo "export EC2_SECURITY_GROUP_ID=$EC2_SECURITY_GROUP_ID"
echo "export EC2_AMI_ID=$EC2_AMI_ID"
echo "export EC2_VM_NAME=$EC2_VM_NAME"
echo ""
echo "==================================="
echo "Launch Instance Command:"
echo "==================================="
echo "aws ec2 run-instances \\"
echo " --image-id \$EC2_AMI_ID \\"
echo " --instance-type t2.micro \\"
echo " --security-group-ids \$EC2_SECURITY_GROUP_ID \\"
echo " --subnet-id \$EC2_SUBNET_ID \\"
echo " --region \$EC2_REGION \\"
echo " --tag-specifications 'ResourceType=instance,Tags=[{Key=Name,Value='$EC2_VM_NAME'}]'"
echo ""
echo "==================================="
echo "List Instances:"
echo "==================================="
echo "aws ec2 describe-instances \\"
echo " --region $EC2_REGION \\"
echo " --output json | \\"
echo " jq '.Reservations[].Instances[] | {InstanceId, Tags}'"
echo ""
| yaacov/kube-setup | 0 | A cross-platform shell script to mount NFS-shared OpenShift cluster credentials and export them as environment variables. | Shell | yaacov | Yaacov Zamir | Red Hat |
forklift-cleanup.sh | Shell | #!/bin/bash
# forklift-cleanup.sh - Remove Forklift operator from a Kubernetes/OpenShift cluster
#
# This script removes the Forklift installation in the correct order:
# 1. Delete ForkliftController instance (and wait for cleanup)
# 2. Delete the Forklift operator subscription and CSV
# 3. Delete the namespace
#
# Usage: ./forklift-cleanup.sh [--force]
set -e
FORCE=0
NAMESPACE="konveyor-forklift"
for arg in "$@"; do
case "$arg" in
--force|-f)
FORCE=1
;;
--help|-h)
echo "Usage: $0 [--force]"
echo ""
echo "Removes Forklift operator from a Kubernetes or OpenShift cluster."
echo ""
echo "Flags:"
echo " --force, -f Skip confirmation prompt"
echo " --help, -h Show this help message"
exit 0
;;
*)
echo "Unknown option: $arg"
echo "Use --help for usage information"
exit 1
;;
esac
done
# Check for required tools
if ! command -v kubectl >/dev/null 2>&1; then
echo "Error: kubectl not found"
exit 1
fi
echo "=========================================="
echo "Forklift Cleanup"
echo "=========================================="
echo ""
echo "This will remove Forklift from namespace: $NAMESPACE"
echo ""
# Confirmation prompt
if [ "$FORCE" = "0" ]; then
read -p "Are you sure you want to continue? (y/N) " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Aborted."
exit 0
fi
echo ""
fi
# Step 1: Delete ForkliftController instance
echo "Step 1: Deleting ForkliftController instance..."
if kubectl get forkliftcontroller -n "$NAMESPACE" forklift-controller >/dev/null 2>&1; then
kubectl delete forkliftcontroller -n "$NAMESPACE" forklift-controller --timeout=120s || true
echo " ForkliftController deleted."
else
echo " No ForkliftController found, skipping."
fi
echo ""
# Wait for controller pods to terminate
echo "Waiting for controller pods to terminate..."
while kubectl get pods -n "$NAMESPACE" -l app=forklift 2>/dev/null | grep -q forklift; do
echo " Waiting for forklift pods to terminate..."
sleep 5
done
echo " Controller pods terminated."
echo ""
# Step 2: Delete the Forklift operator subscription
echo "Step 2: Deleting Forklift operator subscription..."
if kubectl get subscription -n "$NAMESPACE" forklift-operator >/dev/null 2>&1; then
kubectl delete subscription -n "$NAMESPACE" forklift-operator --timeout=60s || true
echo " Subscription deleted."
else
echo " No subscription found, skipping."
fi
echo ""
# Step 3: Delete the ClusterServiceVersion (CSV)
echo "Step 3: Deleting Forklift ClusterServiceVersion..."
CSV_NAME=$(kubectl get csv -n "$NAMESPACE" -o name 2>/dev/null | grep forklift || true)
if [ -n "$CSV_NAME" ]; then
kubectl delete "$CSV_NAME" -n "$NAMESPACE" --timeout=60s || true
echo " CSV deleted."
else
echo " No CSV found, skipping."
fi
echo ""
# Step 4: Delete the CatalogSource
echo "Step 4: Deleting Forklift CatalogSource..."
if kubectl get catalogsource -n "$NAMESPACE" forklift >/dev/null 2>&1; then
kubectl delete catalogsource -n "$NAMESPACE" forklift --timeout=60s || true
echo " CatalogSource deleted."
else
echo " No CatalogSource found, skipping."
fi
echo ""
# Step 5: Delete the OperatorGroup
echo "Step 5: Deleting OperatorGroup..."
if kubectl get operatorgroup -n "$NAMESPACE" forklift >/dev/null 2>&1; then
kubectl delete operatorgroup -n "$NAMESPACE" forklift --timeout=60s || true
echo " OperatorGroup deleted."
else
echo " No OperatorGroup found, skipping."
fi
echo ""
# Step 6: Delete Forklift CRDs
echo "Step 6: Deleting Forklift CRDs..."
FORKLIFT_CRDS=$(kubectl get crd -o name 2>/dev/null | grep -E "forklift|konveyor" || true)
if [ -n "$FORKLIFT_CRDS" ]; then
for crd in $FORKLIFT_CRDS; do
echo " Deleting $crd..."
kubectl delete "$crd" --timeout=60s || true
done
echo " CRDs deleted."
else
echo " No Forklift CRDs found, skipping."
fi
echo ""
# Step 7: Delete the namespace
echo "Step 7: Deleting namespace $NAMESPACE..."
if kubectl get namespace "$NAMESPACE" >/dev/null 2>&1; then
kubectl delete namespace "$NAMESPACE" --timeout=120s || true
echo " Namespace deleted."
else
echo " Namespace not found, skipping."
fi
echo ""
echo "=========================================="
echo "Forklift cleanup complete!"
echo "=========================================="
echo ""
| yaacov/kube-setup | 0 | A cross-platform shell script to mount NFS-shared OpenShift cluster credentials and export them as environment variables. | Shell | yaacov | Yaacov Zamir | Red Hat |
forklift-images.sh | Shell | #!/bin/bash
# forklift-images.sh - Manage ForkliftController FQIN images
#
# This script lists, clears, or sets FQIN (Fully Qualified Image Name) images
# in the ForkliftController spec.
#
# Usage:
# ./forklift-images.sh # List current FQIN images
# ./forklift-images.sh --list # List current FQIN images
# ./forklift-images.sh --clear # Clear all FQIN images
# ./forklift-images.sh --set <image> # Set an image based on its name
set -e
CONTROLLER_NAME="${CONTROLLER_NAME:-forklift-controller}"
# Auto-detect namespace if not explicitly set
# Check both konveyor-forklift and openshift-mtv namespaces
detect_namespace() {
# If NAMESPACE is explicitly set, use it
if [ -n "${NAMESPACE:-}" ]; then
echo "$NAMESPACE"
return
fi
# Check konveyor-forklift first
if kubectl get forkliftcontroller "$CONTROLLER_NAME" -n "konveyor-forklift" >/dev/null 2>&1; then
echo "konveyor-forklift"
return
fi
# Check openshift-mtv
if kubectl get forkliftcontroller "$CONTROLLER_NAME" -n "openshift-mtv" >/dev/null 2>&1; then
echo "openshift-mtv"
return
fi
# Default to konveyor-forklift if not found in either
echo "konveyor-forklift"
}
NAMESPACE="${NAMESPACE:-$(detect_namespace)}"
# All known FQIN fields - format: "image_name:fqin_field"
FQIN_MAPPINGS="
forklift-controller:controller_image_fqin
forklift-api:api_image_fqin
forklift-validation:validation_image_fqin
forklift-console-plugin:ui_plugin_image_fqin
forklift-must-gather:must_gather_image_fqin
forklift-virt-v2v:virt_v2v_image_fqin
forklift-cli-download:cli_download_image_fqin
populator-controller:populator_controller_image_fqin
ovirt-populator:populator_ovirt_image_fqin
openstack-populator:populator_openstack_image_fqin
vsphere-xcopy-volume-populator:populator_vsphere_xcopy_volume_image_fqin
forklift-ova-provider-server:ova_provider_server_fqin
forklift-ova-proxy:ova_proxy_fqin
forklift-hyperv-provider-server:hyperv_provider_server_fqin
"
# All FQIN field names for iteration
ALL_FQIN_FIELDS="
controller_image_fqin
api_image_fqin
validation_image_fqin
ui_plugin_image_fqin
must_gather_image_fqin
virt_v2v_image_fqin
cli_download_image_fqin
populator_controller_image_fqin
populator_ovirt_image_fqin
populator_openstack_image_fqin
populator_vsphere_xcopy_volume_image_fqin
ova_provider_server_fqin
ova_proxy_fqin
hyperv_provider_server_fqin
"
# Get FQIN field name from image name
get_fqin_field() {
local image_name="$1"
echo "$FQIN_MAPPINGS" | grep "^${image_name}:" | cut -d: -f2
}
# Get image name from FQIN field
get_image_name() {
local fqin_field="$1"
echo "$FQIN_MAPPINGS" | grep ":${fqin_field}$" | cut -d: -f1
}
# Display usage information
usage() {
cat << EOF
Usage: $0 [OPTIONS]
Manage ForkliftController FQIN (Fully Qualified Image Name) images.
Options:
--list List current FQIN images (default action)
--clear Clear all FQIN images from the controller spec
--set <image> Set an image based on its name
Example: --set quay.io/kubev2v/forklift-controller:latest
--help, -h Show this help message
Environment Variables:
NAMESPACE Namespace containing ForkliftController (auto-detected from
konveyor-forklift or openshift-mtv if not set)
CONTROLLER_NAME Name of the ForkliftController (default: forklift-controller)
Supported Image Names:
EOF
echo "$FQIN_MAPPINGS" | grep -v '^$' | while read -r line; do
name=$(echo "$line" | cut -d: -f1)
field=$(echo "$line" | cut -d: -f2)
printf " %-40s -> %s\n" "$name" "$field"
done | sort
echo ""
}
# Check for required tools
check_prerequisites() {
if ! command -v kubectl >/dev/null 2>&1; then
echo "Error: kubectl not found"
exit 1
fi
if ! command -v jq >/dev/null 2>&1; then
echo "Error: jq not found (required for JSON parsing)"
exit 1
fi
}
# Get the ForkliftController JSON
get_controller() {
kubectl get forkliftcontroller "$CONTROLLER_NAME" -n "$NAMESPACE" -o json 2>/dev/null
}
# List current FQIN images
list_images() {
echo "ForkliftController FQIN Images"
echo "==============================="
echo "Namespace: $NAMESPACE"
echo "Controller: $CONTROLLER_NAME"
echo ""
local controller_json
controller_json=$(get_controller)
if [ -z "$controller_json" ]; then
echo "Error: ForkliftController '$CONTROLLER_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
echo "Currently set FQIN images:"
echo ""
local found=0
for field in $ALL_FQIN_FIELDS; do
[ -z "$field" ] && continue
value=$(printf '%s' "$controller_json" | jq -r ".spec.$field // empty")
if [ -n "$value" ]; then
printf " %-45s = %s\n" "$field" "$value"
found=1
fi
done
if [ "$found" = "0" ]; then
echo " (no FQIN images are currently set)"
fi
echo ""
}
# Clear all FQIN images
clear_images() {
echo "Clearing all FQIN images from ForkliftController..."
echo "Namespace: $NAMESPACE"
echo "Controller: $CONTROLLER_NAME"
echo ""
local controller_json
controller_json=$(get_controller)
if [ -z "$controller_json" ]; then
echo "Error: ForkliftController '$CONTROLLER_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
# Build patch to remove all FQIN fields
local patch_ops=""
local first=1
for field in $ALL_FQIN_FIELDS; do
[ -z "$field" ] && continue
value=$(printf '%s' "$controller_json" | jq -r ".spec.$field // empty")
if [ -n "$value" ]; then
if [ "$first" = "1" ]; then
first=0
else
patch_ops="${patch_ops},"
fi
patch_ops="${patch_ops}{\"op\":\"remove\",\"path\":\"/spec/$field\"}"
echo " Removing: $field"
fi
done
if [ -z "$patch_ops" ]; then
echo " No FQIN images to clear."
return
fi
patch_ops="[$patch_ops]"
echo ""
kubectl patch forkliftcontroller "$CONTROLLER_NAME" -n "$NAMESPACE" \
--type=json \
-p "$patch_ops"
echo ""
echo "All FQIN images cleared successfully."
}
# Set an image based on its name
set_image() {
local image="$1"
if [ -z "$image" ]; then
echo "Error: No image specified"
echo "Usage: $0 --set <image>"
exit 1
fi
echo "Setting FQIN image..."
echo "Namespace: $NAMESPACE"
echo "Controller: $CONTROLLER_NAME"
echo "Image: $image"
echo ""
# Extract image name from the full image path
# e.g., quay.io/kubev2v/forklift-controller:latest -> forklift-controller
local image_name
image_name=$(echo "$image" | sed 's|.*/||' | sed 's|:.*||')
# Find the corresponding FQIN field
local fqin_field
fqin_field=$(get_fqin_field "$image_name")
if [ -z "$fqin_field" ]; then
echo "Error: Unknown image name '$image_name'"
echo ""
echo "Supported image names:"
echo "$FQIN_MAPPINGS" | grep -v '^$' | cut -d: -f1 | sort | sed 's/^/ /'
exit 1
fi
echo "Detected image name: $image_name"
echo "Setting field: $fqin_field"
echo ""
# Check if controller exists
if ! get_controller >/dev/null; then
echo "Error: ForkliftController '$CONTROLLER_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
# Patch the controller
kubectl patch forkliftcontroller "$CONTROLLER_NAME" -n "$NAMESPACE" \
--type=merge \
-p "{\"spec\":{\"$fqin_field\":\"$image\"}}"
echo ""
echo "Image set successfully: $fqin_field = $image"
}
# Main entry point
main() {
check_prerequisites
# Default action is list
local action="list"
local image=""
while [ $# -gt 0 ]; do
case "$1" in
--list)
action="list"
shift
;;
--clear)
action="clear"
shift
;;
--set)
action="set"
shift
if [ $# -gt 0 ]; then
image="$1"
shift
fi
;;
--help|-h)
usage
exit 0
;;
*)
echo "Unknown option: $1"
echo "Use --help for usage information"
exit 1
;;
esac
done
case "$action" in
list)
list_images
;;
clear)
clear_images
;;
set)
set_image "$image"
;;
esac
}
main "$@"
| yaacov/kube-setup | 0 | A cross-platform shell script to mount NFS-shared OpenShift cluster credentials and export them as environment variables. | Shell | yaacov | Yaacov Zamir | Red Hat |
forklift-install.sh | Shell | #!/bin/bash
# forklift-install.sh - Install Forklift operator on a Kubernetes/OpenShift cluster
#
# This script installs the Forklift operator from the official kubev2v/forklift repository.
# On vanilla Kubernetes, it also installs OLM (Operator Lifecycle Manager).
# On OpenShift, OLM is already present, so it's skipped automatically.
#
# Architecture support:
# - amd64: Uses official kubev2v index (default on x86_64)
# - arm64: Uses yaacov/forklift-operator-index:devel-arm64 (default on ARM)
#
# Usage: ./forklift-install.sh [--k8s] [--ocp] [--arm64] [--amd64] [--no-controller]
set -e
# Parse flags
FORCE_K8S=0
FORCE_OCP=0
FORCE_ARM64=0
FORCE_AMD64=0
NO_CONTROLLER=0
for arg in "$@"; do
case "$arg" in
--k8s)
FORCE_K8S=1
;;
--ocp)
FORCE_OCP=1
;;
--arm64)
FORCE_ARM64=1
;;
--amd64)
FORCE_AMD64=1
;;
--no-controller)
NO_CONTROLLER=1
;;
--help|-h)
echo "Usage: $0 [--k8s] [--ocp] [--arm64] [--amd64] [--no-controller]"
echo ""
echo "Installs Forklift operator on a Kubernetes or OpenShift cluster."
echo "The script auto-detects OpenShift and architecture, skipping OLM on OpenShift."
echo ""
echo "Flags:"
echo " --k8s Force Kubernetes mode (install OLM)"
echo " --ocp Force OpenShift mode (skip OLM installation)"
echo " --arm64 Force ARM64 architecture (use ARM operator index)"
echo " --amd64 Force AMD64 architecture (use official index)"
echo " --no-controller Skip creating the ForkliftController instance"
echo " --help, -h Show this help message"
exit 0
;;
*)
echo "Unknown option: $arg"
echo "Use --help for usage information"
exit 1
;;
esac
done
# Check for required tools
if ! command -v kubectl >/dev/null 2>&1; then
echo "Error: kubectl not found"
exit 1
fi
# Detect cluster type (OpenShift vs vanilla Kubernetes)
detect_openshift() {
# Check if OpenShift API resources exist
if kubectl api-resources --api-group=config.openshift.io >/dev/null 2>&1; then
return 0 # OpenShift detected
fi
return 1 # Not OpenShift
}
IS_OPENSHIFT=0
if [ "$FORCE_OCP" = "1" ]; then
IS_OPENSHIFT=1
elif [ "$FORCE_K8S" = "1" ]; then
IS_OPENSHIFT=0
elif detect_openshift; then
IS_OPENSHIFT=1
fi
# Detect architecture
detect_architecture() {
# Try to get architecture from Kubernetes nodes first
local node_arch
node_arch=$(kubectl get nodes -o jsonpath='{.items[0].status.nodeInfo.architecture}' 2>/dev/null || true)
if [ -n "$node_arch" ]; then
echo "$node_arch"
return
fi
# Fall back to local machine architecture
local local_arch
local_arch=$(uname -m)
case "$local_arch" in
x86_64)
echo "amd64"
;;
aarch64|arm64)
echo "arm64"
;;
*)
echo "amd64" # Default to amd64
;;
esac
}
IS_ARM64=0
if [ "$FORCE_ARM64" = "1" ]; then
IS_ARM64=1
elif [ "$FORCE_AMD64" = "1" ]; then
IS_ARM64=0
else
DETECTED_ARCH=$(detect_architecture)
if [ "$DETECTED_ARCH" = "arm64" ]; then
IS_ARM64=1
fi
fi
echo "=========================================="
echo "Forklift Operator Installation"
echo "=========================================="
echo ""
if [ "$IS_OPENSHIFT" = "1" ]; then
echo "Cluster type: OpenShift (OLM pre-installed)"
else
echo "Cluster type: Kubernetes"
fi
if [ "$IS_ARM64" = "1" ]; then
echo "Architecture: ARM64 (using yaacov ARM index)"
else
echo "Architecture: AMD64 (using official kubev2v index)"
fi
echo ""
# Install OLM only on vanilla Kubernetes
if [ "$IS_OPENSHIFT" = "0" ]; then
echo "Installing Operator Lifecycle Manager (OLM)..."
kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml
kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml
echo "Waiting for OLM operator deployment..."
while ! kubectl get deployment -n olm olm-operator 2>/dev/null; do
echo " Waiting for olm-operator deployment to be created..."
sleep 10
done
kubectl wait deployment -n olm olm-operator --for condition=Available=True --timeout=180s
echo "OLM installed successfully."
echo ""
else
echo "Skipping OLM installation (OpenShift has OLM pre-installed)"
echo ""
fi
# Set operator index image based on architecture
FORKLIFT_INDEX_AMD64="quay.io/kubev2v/forklift-operator-index:latest"
FORKLIFT_INDEX_ARM64="quay.io/yaacov/forklift-operator-index:devel-arm64"
if [ "$IS_ARM64" = "1" ]; then
FORKLIFT_INDEX_IMAGE="$FORKLIFT_INDEX_ARM64"
else
FORKLIFT_INDEX_IMAGE="$FORKLIFT_INDEX_AMD64"
fi
# Install Forklift operator
echo "Installing Forklift operator..."
echo " Index image: $FORKLIFT_INDEX_IMAGE"
# Create namespace, CatalogSource, OperatorGroup, and Subscription
cat << EOF | kubectl apply -f -
---
apiVersion: v1
kind: Namespace
metadata:
name: konveyor-forklift
---
apiVersion: operators.coreos.com/v1alpha1
kind: CatalogSource
metadata:
name: konveyor-forklift
namespace: konveyor-forklift
spec:
displayName: Forklift Operator
publisher: Konveyor
sourceType: grpc
image: ${FORKLIFT_INDEX_IMAGE}
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: konveyor-forklift
namespace: konveyor-forklift
spec:
targetNamespaces:
- konveyor-forklift
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: forklift-operator
namespace: konveyor-forklift
spec:
channel: development
installPlanApproval: Automatic
name: forklift-operator
source: konveyor-forklift
sourceNamespace: konveyor-forklift
EOF
echo "Waiting for Forklift operator deployment..."
while ! kubectl get deployment -n konveyor-forklift forklift-operator 2>/dev/null; do
echo " Waiting for forklift-operator deployment to be created..."
sleep 10
done
kubectl wait deployment -n konveyor-forklift forklift-operator --for condition=Available=True --timeout=180s
echo "Forklift operator installed successfully."
echo ""
# Create ForkliftController instance if not skipped
if [ "$NO_CONTROLLER" = "0" ]; then
echo "Creating ForkliftController instance..."
cat << EOF | kubectl -n konveyor-forklift apply -f -
apiVersion: forklift.konveyor.io/v1beta1
kind: ForkliftController
metadata:
name: forklift-controller
namespace: konveyor-forklift
spec:
# Feature flags
feature_ui_plugin: "true"
feature_validation: "true"
feature_volume_populator: "true"
feature_auth_required: "true"
# VMware VDDK image (required for VMware migrations)
# vddk_image: quay.io/kubev2v/vddk:8.0.0
# Logging
controller_log_level: 3
# Container images (from quay.io/kubev2v/forklift-operator-index:latest)
#controller_image_fqin: quay.io/kubev2v/forklift-controller:latest
#api_image_fqin: quay.io/kubev2v/forklift-api:latest
# validation_image_fqin: quay.io/kubev2v/forklift-validation:latest
# ui_plugin_image_fqin: quay.io/kubev2v/forklift-console-plugin:latest
# must_gather_image_fqin: quay.io/kubev2v/forklift-must-gather:latest
virt_v2v_image_fqin: quay.io/kubev2v/forklift-virt-v2v:latest
# cli_download_image_fqin: quay.io/kubev2v/forklift-cli-download:latest
# populator_controller_image_fqin: quay.io/kubev2v/populator-controller:latest
# populator_ovirt_image_fqin: quay.io/kubev2v/ovirt-populator:latest
# populator_openstack_image_fqin: quay.io/kubev2v/openstack-populator:latest
# populator_vsphere_xcopy_volume_image_fqin: quay.io/kubev2v/vsphere-xcopy-volume-populator:latest
# ova_provider_server_fqin: quay.io/kubev2v/forklift-ova-provider-server:latest
# ova_proxy_fqin: quay.io/kubev2v/forklift-ova-proxy:latest
# hyperv_provider_server_fqin: quay.io/kubev2v/forklift-hyperv-provider-server:latest
EOF
echo "ForkliftController created successfully."
echo ""
else
echo "Skipping ForkliftController creation (--no-controller flag set)"
echo ""
fi
echo "=========================================="
echo "Forklift installation complete!"
echo "=========================================="
echo ""
echo "Namespace: konveyor-forklift"
echo ""
echo "To check the status:"
echo " kubectl get pods -n konveyor-forklift"
echo " kubectl get forkliftcontroller -n konveyor-forklift"
echo ""
| yaacov/kube-setup | 0 | A cross-platform shell script to mount NFS-shared OpenShift cluster credentials and export them as environment variables. | Shell | yaacov | Yaacov Zamir | Red Hat |
kube-setup.sh | Shell | #!/bin/sh
# setup-cluster.sh - Setup NFS mount or CI zip file and export OpenShift cluster credentials
# This script must be SOURCED, not executed: source ./setup-cluster.sh [--login] [--cleanup]
#
# Cross-platform: Linux/macOS, bash/zsh
# Supports two modes:
# 1. NFS mount mode: Set NFS_SERVER to mount credentials from NFS share
# 2. CI zip file mode: Set CI_ZIP_FILE to extract credentials from a zip archive
# Determine script directory (works when sourced from any location)
if [ -n "$BASH_SOURCE" ]; then
_script_dir="$(cd "$(dirname "$BASH_SOURCE")" && pwd)"
elif [ -n "$ZSH_VERSION" ]; then
_script_dir="$(cd "$(dirname "${(%):-%x}")" && pwd)"
else
_script_dir="$(cd "$(dirname "$0")" && pwd)"
fi
# Parse flags
_setup_cluster_login=0
_setup_cluster_cleanup=0
_setup_cluster_kubevirt=0
_setup_cluster_forklift=0
_setup_cluster_forklift_cleanup=0
_setup_cluster_forklift_images=""
_setup_cluster_forklift_images_arg=""
_setup_cluster_login_default=0
_skip_next=0
_arg_index=0
for _arg in "$@"; do
_arg_index=$((_arg_index + 1))
if [ "$_skip_next" = "1" ]; then
_skip_next=0
continue
fi
case "$_arg" in
--login)
_setup_cluster_login=1
;;
--login-default)
_setup_cluster_login=1
_setup_cluster_login_default=1
;;
--cleanup)
_setup_cluster_cleanup=1
;;
--kubevirt)
_setup_cluster_kubevirt=1
;;
--forklift)
_setup_cluster_forklift=1
;;
--forklift-cleanup)
_setup_cluster_forklift_cleanup=1
;;
--forklift-images)
_setup_cluster_forklift_images="list"
;;
--forklift-images-clear)
_setup_cluster_forklift_images="clear"
;;
--forklift-images-set)
_setup_cluster_forklift_images="set"
# Get next argument as the image
_next_arg=$(eval "echo \${$((_arg_index + 1))}")
if [ -n "$_next_arg" ] && [ "${_next_arg#-}" = "$_next_arg" ]; then
_setup_cluster_forklift_images_arg="$_next_arg"
_skip_next=1
fi
;;
--help|-h)
echo "Usage: source $0 [--login] [--cleanup] [--kubevirt] [--forklift] [--forklift-cleanup]"
echo ""
echo "Environment variables (required):"
echo " CLUSTER - Cluster name"
echo ""
echo "Environment variables (optional - for mounting/extracting new sources):"
echo " NFS_SERVER - NFS server address (e.g., server:/path)"
echo " CI_ZIP_FILE - Path to CI zip file containing cluster credentials"
echo " (zip structure: home/jenkins/cnv-qe.rhood.us/<CLUSTER>/auth/)"
echo " If empty, auto-detects <CLUSTER>*.zip in DOWNLOADS_DIR"
echo ""
echo " Note: Cluster lookup order:"
echo " 1. NFS mount directory"
echo " 2. CI zip extracted directory (auto-extracts from Downloads if CI_ZIP_FILE empty)"
echo ""
echo "Environment variables (optional):"
echo " MOUNT_DIR - NFS mount point directory (default: ~/cluster-credentials)"
echo " CI_EXTRACT_DIR - CI zip extraction directory (default: ~/ci-credentials)"
echo " DOWNLOADS_DIR - Directory to search for cluster zip files (default: ~/Downloads)"
echo ""
echo "Flags:"
echo " --login Also export KUBECONFIG to login to the cluster"
echo " --login-default Copy kubeconfig to ~/.kube/config for global login"
echo " --cleanup Unset all variables and unmount NFS / remove extracted files"
echo " --kubevirt Install KubeVirt and CDI"
echo " --forklift Install Forklift operator"
echo " --forklift-cleanup Remove Forklift operator"
echo " --forklift-images List ForkliftController FQIN images"
echo " --forklift-images-set IMG Set a specific FQIN image"
echo " --forklift-images-clear Clear all FQIN images"
echo " --help, -h Show this help message"
return 0 2>/dev/null || exit 0
;;
esac
done
unset _skip_next _arg_index _next_arg
# KubeVirt-only flow (assumes already logged in, no env vars set/unset)
if [ "$_setup_cluster_kubevirt" = "1" ]; then
_kubevirt_script="$_script_dir/kubevirt-install.sh"
if [ -f "$_kubevirt_script" ]; then
echo "Running KubeVirt installation..."
"$_kubevirt_script"
_kubevirt_exit=$?
else
echo "Error: kubevirt-install.sh not found at $_kubevirt_script"
_kubevirt_exit=1
fi
# Cleanup only the temporary variables we created
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _arg
unset _setup_cluster_forklift_images _setup_cluster_forklift_images_arg
unset _script_dir _kubevirt_script _kubevirt_exit
return 0 2>/dev/null || exit 0
fi
# Forklift-only flow (assumes already logged in, no env vars set/unset)
if [ "$_setup_cluster_forklift" = "1" ]; then
_forklift_script="$_script_dir/forklift-install.sh"
if [ -f "$_forklift_script" ]; then
echo "Running Forklift installation..."
"$_forklift_script"
_forklift_exit=$?
else
echo "Error: forklift-install.sh not found at $_forklift_script"
_forklift_exit=1
fi
# Cleanup only the temporary variables we created
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _arg
unset _setup_cluster_forklift_images _setup_cluster_forklift_images_arg
unset _script_dir _forklift_script _forklift_exit
return 0 2>/dev/null || exit 0
fi
# Forklift cleanup flow (assumes already logged in, no env vars set/unset)
if [ "$_setup_cluster_forklift_cleanup" = "1" ]; then
_forklift_cleanup_script="$_script_dir/forklift-cleanup.sh"
if [ -f "$_forklift_cleanup_script" ]; then
echo "Running Forklift cleanup..."
"$_forklift_cleanup_script" --force
_forklift_cleanup_exit=$?
else
echo "Error: forklift-cleanup.sh not found at $_forklift_cleanup_script"
_forklift_cleanup_exit=1
fi
# Cleanup only the temporary variables we created
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _arg
unset _setup_cluster_forklift_images _setup_cluster_forklift_images_arg
unset _script_dir _forklift_cleanup_script _forklift_cleanup_exit
return 0 2>/dev/null || exit 0
fi
# Forklift images flow (assumes already logged in, no env vars set/unset)
if [ -n "$_setup_cluster_forklift_images" ]; then
_forklift_images_script="$_script_dir/forklift-images.sh"
if [ -f "$_forklift_images_script" ]; then
case "$_setup_cluster_forklift_images" in
list)
"$_forklift_images_script" --list
;;
clear)
"$_forklift_images_script" --clear
;;
set)
if [ -z "$_setup_cluster_forklift_images_arg" ]; then
echo "Error: --forklift-images-set requires an image argument"
echo "Usage: kube-setup --forklift-images-set <image>"
else
"$_forklift_images_script" --set "$_setup_cluster_forklift_images_arg"
fi
;;
esac
_forklift_images_exit=$?
else
echo "Error: forklift-images.sh not found at $_forklift_images_script"
_forklift_images_exit=1
fi
# Cleanup only the temporary variables we created
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _arg
unset _setup_cluster_forklift_images _setup_cluster_forklift_images_arg
unset _script_dir _forklift_images_script _forklift_images_exit
return 0 2>/dev/null || exit 0
fi
# Cleanup flow
if [ "$_setup_cluster_cleanup" = "1" ]; then
echo "Cleaning up cluster environment..."
# Unset all exported variables
unset KUBE_USER
unset KUBE_PASSWORD
unset KUBE_API_URL
unset KUBE_UI_URL
unset KUBE_TOKEN
unset KUBECONFIG
# Cleanup NFS mount (always try)
if [ -z "$MOUNT_DIR" ]; then
MOUNT_DIR="$HOME/cluster-credentials"
fi
if mount | grep -q " $MOUNT_DIR "; then
echo "Unmounting $MOUNT_DIR..."
sudo umount "$MOUNT_DIR"
if [ $? -eq 0 ]; then
echo "Successfully unmounted $MOUNT_DIR"
else
echo "Error: Failed to unmount $MOUNT_DIR"
fi
fi
# Cleanup CI zip extracted files (always try if directory exists)
if [ -z "$CI_EXTRACT_DIR" ]; then
CI_EXTRACT_DIR="$HOME/ci-credentials"
fi
if [ -d "$CI_EXTRACT_DIR" ]; then
echo "Removing extracted CI files from $CI_EXTRACT_DIR..."
rm -rf "$CI_EXTRACT_DIR"
if [ $? -eq 0 ]; then
echo "Successfully removed extracted files"
else
echo "Error: Failed to remove extracted files"
fi
fi
# Cleanup temporary variables
unset _setup_cluster_login
unset _setup_cluster_cleanup
unset _setup_cluster_kubevirt
unset _setup_cluster_forklift
unset _setup_cluster_forklift_cleanup
unset _setup_cluster_forklift_images
unset _setup_cluster_forklift_images_arg
unset _arg
unset _script_dir
echo "Cleanup complete."
return 0 2>/dev/null || exit 0
fi
# Normal setup flow - validate required variables
if [ -z "$CLUSTER" ]; then
echo "Error: CLUSTER environment variable is not set"
echo "Usage: export CLUSTER=<cluster-name> && source $0"
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _setup_cluster_forklift_images _setup_cluster_forklift_images_arg _arg _script_dir
return 1 2>/dev/null || exit 1
fi
# Set default MOUNT_DIR if not set
if [ -z "$MOUNT_DIR" ]; then
MOUNT_DIR="$HOME/cluster-credentials"
fi
# Mount NFS if NFS_SERVER is set
if [ -n "$NFS_SERVER" ]; then
echo "Setting up NFS mount..."
# Ensure mount directory exists
if [ ! -d "$MOUNT_DIR" ]; then
echo "Creating mount directory: $MOUNT_DIR"
mkdir -p "$MOUNT_DIR"
if [ $? -ne 0 ]; then
echo "Error: Failed to create mount directory $MOUNT_DIR"
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _setup_cluster_forklift_images _setup_cluster_forklift_images_arg _arg _script_dir
return 1 2>/dev/null || exit 1
fi
fi
# Check if NFS is mounted, mount if not
if ! mount | grep -q " $MOUNT_DIR "; then
echo "Mounting NFS share $NFS_SERVER to $MOUNT_DIR..."
sudo mount -t nfs "$NFS_SERVER" "$MOUNT_DIR"
if [ $? -ne 0 ]; then
echo "Error: Failed to mount NFS share"
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _setup_cluster_forklift_images _setup_cluster_forklift_images_arg _arg _script_dir
return 1 2>/dev/null || exit 1
fi
echo "NFS mounted successfully."
else
echo "NFS already mounted at $MOUNT_DIR"
fi
fi
# Set NFS cluster dir path (may exist from previous mount)
_nfs_cluster_dir="$MOUNT_DIR/$CLUSTER"
# Set default CI_EXTRACT_DIR if not set
if [ -z "$CI_EXTRACT_DIR" ]; then
CI_EXTRACT_DIR="$HOME/ci-credentials"
fi
# Set default DOWNLOADS_DIR if not set
if [ -z "$DOWNLOADS_DIR" ]; then
DOWNLOADS_DIR="$HOME/Downloads"
fi
# Auto-detect CI_ZIP_FILE from Downloads if not set
if [ -z "$CI_ZIP_FILE" ] && [ -d "$DOWNLOADS_DIR" ]; then
# Look for zip file matching <cluster-name>*.zip pattern (case-insensitive)
_auto_zip=$(find "$DOWNLOADS_DIR" -maxdepth 1 -type f -iname "$CLUSTER*.zip" 2>/dev/null | head -1)
if [ -n "$_auto_zip" ]; then
echo "Auto-detected zip file from Downloads: $_auto_zip"
CI_ZIP_FILE="$_auto_zip"
fi
unset _auto_zip
fi
# Extract CI zip file if CI_ZIP_FILE is set
if [ -n "$CI_ZIP_FILE" ]; then
echo "Setting up CI zip file..."
# Verify zip file exists (warn and continue if not found - Downloads fallback may work)
if [ ! -f "$CI_ZIP_FILE" ]; then
echo "Warning: CI zip file not found: $CI_ZIP_FILE (will try Downloads fallback)"
else
# Verify unzip is available
if ! command -v unzip >/dev/null 2>&1; then
echo "Error: unzip command not found"
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _setup_cluster_forklift_images _setup_cluster_forklift_images_arg _arg _script_dir _nfs_cluster_dir
return 1 2>/dev/null || exit 1
fi
# Remove old extracted data and create fresh extraction directory
if [ -d "$CI_EXTRACT_DIR" ]; then
echo "Removing old extracted data from $CI_EXTRACT_DIR..."
rm -rf "$CI_EXTRACT_DIR"
fi
echo "Creating extraction directory: $CI_EXTRACT_DIR"
mkdir -p "$CI_EXTRACT_DIR"
if [ $? -ne 0 ]; then
echo "Error: Failed to create extraction directory $CI_EXTRACT_DIR"
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _setup_cluster_forklift_images _setup_cluster_forklift_images_arg _arg _script_dir _nfs_cluster_dir
return 1 2>/dev/null || exit 1
fi
# Extract the zip file
echo "Extracting CI zip file to $CI_EXTRACT_DIR..."
unzip -o -q "$CI_ZIP_FILE" -d "$CI_EXTRACT_DIR"
if [ $? -ne 0 ]; then
echo "Error: Failed to extract CI zip file"
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _setup_cluster_forklift_images _setup_cluster_forklift_images_arg _arg _script_dir _nfs_cluster_dir
return 1 2>/dev/null || exit 1
fi
echo "CI zip file extracted successfully."
fi
fi
# Set zip cluster dir path (may exist from previous extraction)
# Expected structure: home/jenkins/cnv-qe.rhood.us/<CLUSTER>/auth/
_zip_cluster_dir="$CI_EXTRACT_DIR/home/jenkins/cnv-qe.rhood.us/$CLUSTER"
# Find cluster directory: try NFS first, then ZIP, then Downloads
_cluster_dir=""
if [ -n "$_nfs_cluster_dir" ] && [ -d "$_nfs_cluster_dir" ]; then
echo "Found cluster in NFS: $_nfs_cluster_dir"
_cluster_dir="$_nfs_cluster_dir"
elif [ -n "$_zip_cluster_dir" ] && [ -d "$_zip_cluster_dir" ]; then
echo "Found cluster in CI zip: $_zip_cluster_dir"
_cluster_dir="$_zip_cluster_dir"
else
# Try to find and extract zip file from Downloads directory
if [ -d "$DOWNLOADS_DIR" ]; then
# Look for zip file matching <cluster-name>*.zip pattern (case-insensitive)
_found_zip=$(find "$DOWNLOADS_DIR" -maxdepth 1 -type f -iname "$CLUSTER*.zip" 2>/dev/null | head -1)
if [ -n "$_found_zip" ]; then
echo "Found cluster zip in Downloads: $_found_zip"
# Verify unzip is available
if ! command -v unzip >/dev/null 2>&1; then
echo "Error: unzip command not found"
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _setup_cluster_forklift_images _setup_cluster_forklift_images_arg _arg _script_dir _nfs_cluster_dir _zip_cluster_dir _found_zip
return 1 2>/dev/null || exit 1
fi
# Remove old extracted data and create fresh extraction directory
if [ -d "$CI_EXTRACT_DIR" ]; then
echo "Removing old extracted data from $CI_EXTRACT_DIR..."
rm -rf "$CI_EXTRACT_DIR"
fi
echo "Creating extraction directory: $CI_EXTRACT_DIR"
mkdir -p "$CI_EXTRACT_DIR"
if [ $? -ne 0 ]; then
echo "Error: Failed to create extraction directory $CI_EXTRACT_DIR"
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _setup_cluster_forklift_images _setup_cluster_forklift_images_arg _arg _script_dir _nfs_cluster_dir _zip_cluster_dir _found_zip
return 1 2>/dev/null || exit 1
fi
# Extract the zip file
echo "Extracting zip file to $CI_EXTRACT_DIR..."
unzip -o -q "$_found_zip" -d "$CI_EXTRACT_DIR"
if [ $? -ne 0 ]; then
echo "Error: Failed to extract zip file"
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _setup_cluster_forklift_images _setup_cluster_forklift_images_arg _arg _script_dir _nfs_cluster_dir _zip_cluster_dir _found_zip
return 1 2>/dev/null || exit 1
fi
echo "Zip file extracted successfully."
# Re-check the zip cluster directory
if [ -d "$_zip_cluster_dir" ]; then
echo "Found cluster in extracted zip: $_zip_cluster_dir"
_cluster_dir="$_zip_cluster_dir"
fi
fi
unset _found_zip
fi
fi
# Verify cluster directory was found
if [ -z "$_cluster_dir" ]; then
echo "Error: Cluster '$CLUSTER' not found in any configured source"
if [ -d "$MOUNT_DIR" ] && [ -n "$(ls -A "$MOUNT_DIR" 2>/dev/null)" ]; then
echo " Checked NFS: $_nfs_cluster_dir"
echo " Available clusters in NFS:"
ls "$MOUNT_DIR" 2>/dev/null | head -20 | sed 's/^/ /'
fi
if [ -d "$CI_EXTRACT_DIR/home/jenkins/cnv-qe.rhood.us" ]; then
echo " Checked CI zip: $_zip_cluster_dir"
echo " Available clusters in CI zip:"
ls "$CI_EXTRACT_DIR/home/jenkins/cnv-qe.rhood.us" 2>/dev/null | head -20 | sed 's/^/ /'
fi
if [ -d "$DOWNLOADS_DIR" ]; then
echo " Checked Downloads: $DOWNLOADS_DIR/$CLUSTER*.zip (not found)"
fi
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _arg _cluster_dir _nfs_cluster_dir _zip_cluster_dir _script_dir
return 1 2>/dev/null || exit 1
fi
unset _nfs_cluster_dir _zip_cluster_dir
# Verify auth directory exists
_auth_dir="$_cluster_dir/auth"
if [ ! -d "$_auth_dir" ]; then
echo "Error: Auth directory not found: $_auth_dir"
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _arg _cluster_dir _auth_dir _nfs_cluster_dir _zip_cluster_dir _script_dir
return 1 2>/dev/null || exit 1
fi
# Verify required files exist
_kubeconfig_file="$_auth_dir/kubeconfig"
_password_file="$_auth_dir/kubeadmin-password"
if [ ! -f "$_kubeconfig_file" ]; then
echo "Error: kubeconfig file not found: $_kubeconfig_file"
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _arg _cluster_dir _auth_dir _kubeconfig_file _password_file _script_dir
return 1 2>/dev/null || exit 1
fi
if [ ! -f "$_password_file" ]; then
echo "Error: kubeadmin-password file not found: $_password_file"
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _arg _cluster_dir _auth_dir _kubeconfig_file _password_file _script_dir
return 1 2>/dev/null || exit 1
fi
# Export credentials
export KUBE_USER="kubeadmin"
export KUBE_PASSWORD=$(cat "$_password_file")
export KUBE_API_URL=$(grep "server:" "$_kubeconfig_file" | head -1 | awk '{print $2}')
# Extract token if available (some clusters use certificate auth instead)
_token_value=$(grep "token:" "$_kubeconfig_file" | awk '{print $2}')
if [ -z "$_token_value" ]; then
# No token in kubeconfig - get one using kubectl create token
if ! command -v kubectl >/dev/null 2>&1; then
echo "Error: kubectl not found (requires kubectl 1.24+)"
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _arg _cluster_dir _auth_dir _kubeconfig_file _password_file _script_dir
return 1 2>/dev/null || exit 1
fi
# Use default SA in openshift-cluster-version namespace (has cluster-admin)
echo "Getting token for SA default in openshift-cluster-version..."
_token_value=$(KUBECONFIG="$_kubeconfig_file" kubectl create token default -n openshift-cluster-version 2>&1)
if [ $? -ne 0 ] || [ -z "$_token_value" ]; then
echo "Error: Could not obtain SA token"
echo " $_token_value"
echo " Note: Requires kubectl 1.24+"
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _arg _cluster_dir _auth_dir _kubeconfig_file _password_file _token_value _script_dir
return 1 2>/dev/null || exit 1
fi
echo "Successfully obtained cluster-admin token"
fi
if [ -n "$_token_value" ]; then
export KUBE_TOKEN="$_token_value"
else
export KUBE_TOKEN=""
fi
# Derive UI URL from API URL
# Transform: https://api.cluster.domain:6443 -> https://console-openshift-console.apps.cluster.domain
_api_url_no_port=$(echo "$KUBE_API_URL" | sed 's/:6443$//')
export KUBE_UI_URL=$(echo "$_api_url_no_port" | sed 's|https://api\.|https://console-openshift-console.apps.|')
# Export KUBECONFIG if --login flag is set
if [ "$_setup_cluster_login" = "1" ]; then
if [ "$_setup_cluster_login_default" = "1" ]; then
# Copy kubeconfig to ~/.kube/config for global access
mkdir -p "$HOME/.kube"
cp "$_kubeconfig_file" "$HOME/.kube/config"
if [ $? -eq 0 ]; then
export KUBECONFIG="$HOME/.kube/config"
echo "Kubeconfig copied to ~/.kube/config - kubectl will use cluster: $CLUSTER"
else
echo "Warning: Failed to copy kubeconfig to ~/.kube/config, using original path"
export KUBECONFIG="$_kubeconfig_file"
fi
else
export KUBECONFIG="$_kubeconfig_file"
echo "KUBECONFIG exported - kubectl will use cluster: $CLUSTER"
fi
fi
# Cleanup temporary variables
unset _setup_cluster_login _setup_cluster_login_default _setup_cluster_cleanup _setup_cluster_kubevirt _setup_cluster_forklift _setup_cluster_forklift_cleanup _arg
unset _setup_cluster_forklift_images _setup_cluster_forklift_images_arg
unset _cluster_dir _auth_dir _kubeconfig_file _password_file _api_url_no_port _token_value
unset _script_dir _forklift_script
# Print summary
echo ""
echo "Cluster credentials loaded for: $CLUSTER"
echo " KUBE_USER: $KUBE_USER"
echo " KUBE_PASSWORD: ********"
echo " KUBE_API_URL: $KUBE_API_URL"
echo " KUBE_UI_URL: $KUBE_UI_URL"
if [ -n "$KUBE_TOKEN" ]; then
# Show first 20 chars of token (POSIX-compatible way)
_token_preview=$(echo "$KUBE_TOKEN" | cut -c1-20)
echo " KUBE_TOKEN: ${_token_preview}..."
unset _token_preview
else
echo " KUBE_TOKEN: (not available - cluster uses certificate auth)"
fi
if [ -n "$KUBECONFIG" ]; then
echo " KUBECONFIG: $KUBECONFIG"
else
echo " KUBECONFIG: (not set - use --login flag to set)"
fi
echo ""
| yaacov/kube-setup | 0 | A cross-platform shell script to mount NFS-shared OpenShift cluster credentials and export them as environment variables. | Shell | yaacov | Yaacov Zamir | Red Hat |
kubevirt-install.sh | Shell | #!/bin/bash
# kubevirt-install.sh - Install KubeVirt and CDI on a Kubernetes cluster
#
# This script installs KubeVirt and CDI (Containerized Data Importer) from
# the official kubevirt repositories.
#
# Usage: ./kubevirt-install.sh [--no-cdi] [--version VERSION] [--cdi-version VERSION]
set -e
# Parse flags
NO_CDI=0
KUBEVIRT_VERSION=""
CDI_VERSION=""
while [ $# -gt 0 ]; do
case "$1" in
--no-cdi)
NO_CDI=1
shift
;;
--version)
KUBEVIRT_VERSION="$2"
shift 2
;;
--cdi-version)
CDI_VERSION="$2"
shift 2
;;
--help|-h)
echo "Usage: $0 [--no-cdi] [--version VERSION] [--cdi-version VERSION]"
echo ""
echo "Installs KubeVirt and CDI on a Kubernetes cluster."
echo ""
echo "Flags:"
echo " --no-cdi Skip CDI installation"
echo " --version VERSION Install specific KubeVirt version (default: latest)"
echo " --cdi-version VERSION Install specific CDI version (default: latest)"
echo " --help, -h Show this help message"
exit 0
;;
*)
echo "Unknown option: $1"
echo "Use --help for usage information"
exit 1
;;
esac
done
# Check for required tools
if ! command -v kubectl >/dev/null 2>&1; then
echo "Error: kubectl not found"
exit 1
fi
if ! command -v curl >/dev/null 2>&1; then
echo "Error: curl not found"
exit 1
fi
if ! command -v jq >/dev/null 2>&1; then
echo "Error: jq not found"
exit 1
fi
echo "=========================================="
echo "KubeVirt Installation"
echo "=========================================="
echo ""
# Get KubeVirt version
if [ -z "$KUBEVIRT_VERSION" ]; then
echo "Fetching latest KubeVirt version..."
KUBEVIRT_VERSION=$(curl -s https://api.github.com/repos/kubevirt/kubevirt/releases/latest | jq -r .tag_name)
if [ -z "$KUBEVIRT_VERSION" ] || [ "$KUBEVIRT_VERSION" = "null" ]; then
echo "Error: Failed to fetch latest KubeVirt version"
exit 1
fi
fi
echo "KubeVirt version: $KUBEVIRT_VERSION"
echo ""
# Install KubeVirt operator
echo "Installing KubeVirt operator..."
KUBEVIRT_OPERATOR_URL="https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator.yaml"
echo " Source: $KUBEVIRT_OPERATOR_URL"
kubectl apply -f "$KUBEVIRT_OPERATOR_URL"
echo "Waiting for KubeVirt operator deployment..."
while ! kubectl get deployment -n kubevirt virt-operator 2>/dev/null; do
echo " Waiting for virt-operator deployment to be created..."
sleep 10
done
kubectl wait deployment -n kubevirt virt-operator --for condition=Available=True --timeout=300s
echo "KubeVirt operator installed successfully."
echo ""
# Install KubeVirt CR
echo "Creating KubeVirt CR..."
KUBEVIRT_CR_URL="https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-cr.yaml"
echo " Source: $KUBEVIRT_CR_URL"
kubectl apply -f "$KUBEVIRT_CR_URL"
echo "Waiting for KubeVirt to be ready..."
kubectl wait kubevirt -n kubevirt kubevirt --for condition=Available=True --timeout=600s
echo "KubeVirt installed successfully."
echo ""
# Check if cluster nodes have KVM support and enable emulation if not
echo "Checking cluster KVM support..."
# Wait for virt-handler to register device plugin
sleep 10
KVM_AVAILABLE=$(kubectl get nodes -o jsonpath='{.items[0].status.allocatable.devices\.kubevirt\.io/kvm}' 2>/dev/null)
if [ -z "$KVM_AVAILABLE" ] || [ "$KVM_AVAILABLE" = "0" ]; then
echo "No KVM support detected on cluster nodes - enabling software emulation..."
kubectl patch kubevirt kubevirt -n kubevirt --type=merge \
-p '{"spec":{"configuration":{"developerConfiguration":{"useEmulation":true}}}}'
echo "Restarting virt-handler pods..."
kubectl delete pods -n kubevirt -l kubevirt.io=virt-handler
echo "Waiting for virt-handler to restart..."
sleep 30
echo "Software emulation enabled."
else
echo "KVM support detected on cluster nodes (kvm devices: $KVM_AVAILABLE)."
fi
echo ""
# Install CDI if not skipped
if [ "$NO_CDI" = "0" ]; then
echo "=========================================="
echo "CDI (Containerized Data Importer) Installation"
echo "=========================================="
echo ""
# Get CDI version
if [ -z "$CDI_VERSION" ]; then
echo "Fetching latest CDI version..."
CDI_VERSION=$(curl -s https://api.github.com/repos/kubevirt/containerized-data-importer/releases/latest | jq -r .tag_name)
if [ -z "$CDI_VERSION" ] || [ "$CDI_VERSION" = "null" ]; then
echo "Error: Failed to fetch latest CDI version"
exit 1
fi
fi
echo "CDI version: $CDI_VERSION"
echo ""
# Install CDI operator
echo "Installing CDI operator..."
CDI_OPERATOR_URL="https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_VERSION}/cdi-operator.yaml"
echo " Source: $CDI_OPERATOR_URL"
kubectl apply -f "$CDI_OPERATOR_URL"
echo "Waiting for CDI operator deployment..."
while ! kubectl get deployment -n cdi cdi-operator 2>/dev/null; do
echo " Waiting for cdi-operator deployment to be created..."
sleep 10
done
kubectl wait deployment -n cdi cdi-operator --for condition=Available=True --timeout=300s
echo "CDI operator installed successfully."
echo ""
# Install CDI CR
echo "Creating CDI CR..."
CDI_CR_URL="https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_VERSION}/cdi-cr.yaml"
echo " Source: $CDI_CR_URL"
kubectl apply -f "$CDI_CR_URL"
echo "Waiting for CDI to be ready..."
kubectl wait cdi -n cdi cdi --for condition=Available=True --timeout=300s
echo "CDI installed successfully."
echo ""
else
echo "Skipping CDI installation (--no-cdi flag set)"
echo ""
fi
echo "=========================================="
echo "Installation complete!"
echo "=========================================="
echo ""
echo "KubeVirt namespace: kubevirt"
if [ "$NO_CDI" = "0" ]; then
echo "CDI namespace: cdi"
fi
echo ""
echo "To check the status:"
echo " kubectl get pods -n kubevirt"
echo " kubectl get kubevirt -n kubevirt"
if [ "$NO_CDI" = "0" ]; then
echo " kubectl get pods -n cdi"
echo " kubectl get cdi -n cdi"
fi
echo ""
if [ -z "$KVM_AVAILABLE" ] || [ "$KVM_AVAILABLE" = "0" ]; then
echo "NOTE: Software emulation was automatically enabled (no KVM on cluster nodes)."
echo " VMs will run slower but work without hardware virtualization support."
else
echo "NOTE: KVM hardware virtualization is available on this cluster."
fi
echo ""
| yaacov/kube-setup | 0 | A cross-platform shell script to mount NFS-shared OpenShift cluster credentials and export them as environment variables. | Shell | yaacov | Yaacov Zamir | Red Hat |
_includes/custom-head.html | HTML | <!-- Anchor links for section headers -->
<style>
/* Hide the header navigation links (chapter list) */
.site-nav,
.site-header .site-nav,
header .site-nav,
.trigger,
nav.site-nav {
display: none !important;
}
/* Header anchor link styles */
.header-anchor {
opacity: 0;
margin-left: 0.5em;
font-size: 0.8em;
text-decoration: none;
color: #828282;
transition: opacity 0.2s ease-in-out;
vertical-align: middle;
}
.header-anchor:hover {
color: #2a7ae2;
}
h1:hover .header-anchor,
h2:hover .header-anchor,
h3:hover .header-anchor,
h4:hover .header-anchor,
h5:hover .header-anchor,
h6:hover .header-anchor,
.header-anchor:focus {
opacity: 1;
}
/* Toast notification for copy feedback */
.copy-toast {
position: fixed;
bottom: 20px;
left: 50%;
transform: translateX(-50%);
background-color: #333;
color: #fff;
padding: 12px 24px;
border-radius: 4px;
font-size: 14px;
z-index: 1000;
opacity: 0;
transition: opacity 0.3s ease-in-out;
pointer-events: none;
}
.copy-toast.show {
opacity: 1;
}
/* Make headers position relative for anchor positioning */
h1[id], h2[id], h3[id], h4[id], h5[id], h6[id] {
position: relative;
}
/* Ensure anchor links are keyboard accessible */
.header-anchor:focus {
outline: 2px solid #2a7ae2;
outline-offset: 2px;
}
</style>
<script>
document.addEventListener('DOMContentLoaded', function() {
// Create toast element for copy notifications
var toast = document.createElement('div');
toast.className = 'copy-toast';
toast.setAttribute('role', 'status');
toast.setAttribute('aria-live', 'polite');
toast.textContent = 'Link copied to clipboard!';
document.body.appendChild(toast);
var toastTimeout;
function showToast() {
clearTimeout(toastTimeout);
toast.classList.add('show');
toastTimeout = setTimeout(function() {
toast.classList.remove('show');
}, 2000);
}
// Find all headers with IDs and add anchor links
var headers = document.querySelectorAll('h1[id], h2[id], h3[id], h4[id], h5[id], h6[id]');
headers.forEach(function(header) {
var id = header.getAttribute('id');
if (!id) return;
// Create anchor link element
var anchor = document.createElement('a');
anchor.className = 'header-anchor';
anchor.href = '#' + id;
anchor.setAttribute('aria-label', 'Copy link to this section: ' + header.textContent.trim());
anchor.setAttribute('title', 'Copy link to section');
// SVG link icon
anchor.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" aria-hidden="true"><path d="M10 13a5 5 0 0 0 7.54.54l3-3a5 5 0 0 0-7.07-7.07l-1.72 1.71"></path><path d="M14 11a5 5 0 0 0-7.54-.54l-3 3a5 5 0 0 0 7.07 7.07l1.71-1.71"></path></svg>';
// Handle click to copy link
anchor.addEventListener('click', function(e) {
e.preventDefault();
// Build the full URL with anchor
var url = window.location.origin + window.location.pathname + '#' + id;
// Copy to clipboard
if (navigator.clipboard && navigator.clipboard.writeText) {
navigator.clipboard.writeText(url).then(function() {
showToast();
// Update URL without scrolling
history.pushState(null, null, '#' + id);
}).catch(function() {
// Fallback: just navigate to the anchor
window.location.hash = id;
});
} else {
// Fallback for older browsers
var textArea = document.createElement('textarea');
textArea.value = url;
textArea.style.position = 'fixed';
textArea.style.left = '-9999px';
document.body.appendChild(textArea);
textArea.select();
try {
document.execCommand('copy');
showToast();
history.pushState(null, null, '#' + id);
} catch (err) {
window.location.hash = id;
}
document.body.removeChild(textArea);
}
});
// Append anchor to header
header.appendChild(anchor);
});
});
</script>
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/archive/archive.go | Go | package archive
import (
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
// NewArchiveCmd creates the archive command with all its subcommands
func NewArchiveCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
cmd := &cobra.Command{
Use: "archive",
Short: "Archive resources",
Long: `Archive various MTV resources`,
SilenceUsage: true,
}
// Add plan subcommand with plural alias
planCmd := NewPlanCmd(kubeConfigFlags)
planCmd.Aliases = []string{"plans"}
cmd.AddCommand(planCmd)
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/archive/plan.go | Go | package archive
import (
"errors"
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/archive/plan"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewPlanCmd creates the plan archiving command
func NewPlanCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
var all bool
var planNames []string
cmd := &cobra.Command{
Use: "plan",
Short: "Archive one or more migration plans",
Long: `Archive one or more migration plans.
Archiving a plan marks it as completed and stops any ongoing operations.
Archived plans are retained for historical reference but cannot be started.
Use 'unarchive' to restore a plan if needed.`,
Example: ` # Archive a completed plan
kubectl-mtv archive plan --name my-migration
# Archive multiple plans
kubectl-mtv archive plans --name plan1,plan2,plan3
# Archive all plans in the namespace
kubectl-mtv archive plans --all`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate mutual exclusivity of --name and --all
if all && len(planNames) > 0 {
return errors.New("cannot use --name with --all")
}
if !all && len(planNames) == 0 {
return errors.New("must specify --name or --all")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
if all {
// Get all plan names from the namespace
var err error
planNames, err = client.GetAllPlanNames(cmd.Context(), kubeConfigFlags, namespace)
if err != nil {
return fmt.Errorf("failed to get all plan names: %v", err)
}
if len(planNames) == 0 {
fmt.Printf("No plans found in namespace %s\n", namespace)
return nil
}
}
// Loop over each plan name and archive it
for _, name := range planNames {
err := plan.Archive(cmd.Context(), kubeConfigFlags, name, namespace, true)
if err != nil {
return err
}
}
return nil
},
}
cmd.Flags().StringSliceVarP(&planNames, "name", "M", nil, "Plan name(s) to archive (comma-separated, e.g. \"plan1,plan2\")")
cmd.Flags().StringSliceVar(&planNames, "names", nil, "Alias for --name")
_ = cmd.Flags().MarkHidden("names")
cmd.Flags().BoolVar(&all, "all", false, "Archive all migration plans in the namespace")
_ = cmd.RegisterFlagCompletionFunc("name", completion.PlanNameCompletion(kubeConfigFlags))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/cancel/cancel.go | Go | package cancel
import (
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
// NewCancelCmd creates the cancel command with all its subcommands
func NewCancelCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
cmd := &cobra.Command{
Use: "cancel",
Short: "Cancel resources",
Long: `Cancel various MTV resources`,
SilenceUsage: true,
}
planCmd := NewPlanCmd(kubeConfigFlags)
planCmd.Aliases = []string{"plans"}
cmd.AddCommand(planCmd)
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/cancel/plan.go | Go | package cancel
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/cancel/plan"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewPlanCmd creates the plan cancellation command
func NewPlanCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
var vmNamesOrFile string
var name string
cmd := &cobra.Command{
Use: "plan",
Short: "Cancel specific VMs in a running migration plan",
Long: `Cancel specific VMs in a running migration plan.
This command allows you to stop the migration of selected VMs while allowing
other VMs in the plan to continue. VMs to cancel can be specified as a
comma-separated list or read from a file.`,
Example: ` # Cancel specific VMs in a plan
kubectl-mtv cancel plan --name my-migration --vms "vm1,vm2"
# Cancel VMs from a file
kubectl-mtv cancel plan --name my-migration --vms @failed-vms.yaml`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
var vmNames []string
if strings.HasPrefix(vmNamesOrFile, "@") {
// It's a file
filePath := vmNamesOrFile[1:]
content, err := os.ReadFile(filePath)
if err != nil {
return fmt.Errorf("failed to read file %s: %v", filePath, err)
}
// Try to unmarshal as JSON or YAML array of strings
var namesArray []string
if err := json.Unmarshal(content, &namesArray); err != nil {
if err := yaml.Unmarshal(content, &namesArray); err != nil {
return fmt.Errorf("failed to parse VM names from file: %v", err)
}
}
vmNames = namesArray
} else {
// It's a comma-separated list
vmNameSlice := strings.Split(vmNamesOrFile, ",")
for _, vmName := range vmNameSlice {
vmNames = append(vmNames, strings.TrimSpace(vmName))
}
}
if len(vmNames) == 0 {
return fmt.Errorf("no VM names specified to cancel")
}
return plan.Cancel(kubeConfigFlags, name, namespace, vmNames)
},
}
cmd.Flags().StringVarP(&name, "name", "M", "", "Plan name")
cmd.Flags().StringVar(&vmNamesOrFile, "vms", "", "List of VM names to cancel (comma-separated) or path to file containing VM names (prefix with @)")
if err := cmd.MarkFlagRequired("name"); err != nil {
fmt.Printf("Warning: error marking 'name' flag as required: %v\n", err)
}
if err := cmd.MarkFlagRequired("vms"); err != nil {
fmt.Printf("Warning: error marking 'vms' flag as required: %v\n", err)
}
_ = cmd.RegisterFlagCompletionFunc("name", completion.PlanNameCompletion(kubeConfigFlags))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/cmd_test.go | Go | package cmd
import (
"testing"
"github.com/spf13/cobra"
)
// TestAllCommandsHelp verifies that all commands can render help without panicking.
// This catches flag shorthand conflicts that cause panics when cobra merges flag sets.
// For example, if a local flag uses "-i" shorthand but a global flag already uses "-i",
// cobra will panic with "unable to redefine shorthand".
func TestAllCommandsHelp(t *testing.T) {
// Get the root command (this triggers all subcommand registration)
root := rootCmd
// Recursively test all commands
var testCommand func(cmd *cobra.Command)
testCommand = func(cmd *cobra.Command) {
t.Run(cmd.CommandPath(), func(t *testing.T) {
// This will panic if there are flag conflicts
defer func() {
if r := recover(); r != nil {
t.Errorf("command %q panicked: %v", cmd.CommandPath(), r)
}
}()
// Trigger flag merging by getting the flag set
cmd.Flags()
cmd.InheritedFlags()
// Try to generate help (this also triggers flag registration)
_ = cmd.UsageString()
})
// Recursively test subcommands
for _, sub := range cmd.Commands() {
testCommand(sub)
}
}
testCommand(root)
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/create/create.go | Go | package create
import (
"github.com/spf13/cobra"
"github.com/yaacov/kubectl-mtv/pkg/util/config"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
// GlobalConfigGetter is a type alias for the shared config interface.
// This maintains backward compatibility while using the centralized interface definition.
type GlobalConfigGetter = config.InventoryConfigWithVerbosity
// NewCreateCmd creates the create command with all its subcommands
func NewCreateCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
cmd := &cobra.Command{
Use: "create",
Short: "Create resources",
Long: `Create various MTV resources like providers, plans, mappings, and VDDK images`,
SilenceUsage: true,
}
providerCmd := NewProviderCmd(kubeConfigFlags)
providerCmd.Aliases = []string{"providers"}
cmd.AddCommand(providerCmd)
planCmd := NewPlanCmd(kubeConfigFlags, globalConfig)
planCmd.Aliases = []string{"plans"}
cmd.AddCommand(planCmd)
mappingCmd := NewMappingCmd(kubeConfigFlags, globalConfig)
mappingCmd.Aliases = []string{"mappings"}
cmd.AddCommand(mappingCmd)
hostCmd := NewHostCmd(kubeConfigFlags, globalConfig)
hostCmd.Aliases = []string{"hosts"}
cmd.AddCommand(hostCmd)
hookCmd := NewHookCmd(kubeConfigFlags)
hookCmd.Aliases = []string{"hooks"}
cmd.AddCommand(hookCmd)
cmd.AddCommand(NewVddkCmd(globalConfig, kubeConfigFlags))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/create/hook.go | Go | package create
import (
"fmt"
"os"
"strings"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/hook"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// NewHookCmd creates the hook creation command
func NewHookCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
var name, image string
var serviceAccount string
var playbook string
var deadline int64
// HookSpec fields
var hookSpec forkliftv1beta1.HookSpec
cmd := &cobra.Command{
Use: "hook",
Short: "Create a migration hook",
Long: `Create a migration hook resource that can be used to run custom automation during migrations.
Hooks allow you to execute custom logic at various points during the migration process by running
container images with Ansible playbooks. Hooks can be used for pre-migration validation,
post-migration cleanup, or any custom automation needs.
The playbook parameter supports the @ convention to read Ansible playbook content from a file.
Examples:
# Create a hook with default image and inline playbook content
kubectl-mtv create hook --name my-hook --playbook "$(cat playbook.yaml)"
# Create a hook with custom image reading playbook from file
kubectl-mtv create hook --name my-hook --image my-registry/hook-image:latest --playbook @playbook.yaml
# Create a hook with service account and deadline (uses default image)
kubectl-mtv create hook --name my-hook --service-account my-sa --deadline 300`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate deadline is positive
if deadline < 0 {
return fmt.Errorf("deadline must be a positive number")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
// Handle playbook file loading if @ convention is used
if strings.HasPrefix(playbook, "@") {
filePath := playbook[1:]
fileContent, err := os.ReadFile(filePath)
if err != nil {
return fmt.Errorf("failed to read playbook file %s: %v", filePath, err)
}
playbook = string(fileContent)
}
// Set the HookSpec fields
hookSpec.Image = image
if serviceAccount != "" {
hookSpec.ServiceAccount = serviceAccount
}
if playbook != "" {
hookSpec.Playbook = playbook
}
if deadline > 0 {
hookSpec.Deadline = deadline
}
opts := hook.CreateHookOptions{
Name: name,
Namespace: namespace,
ConfigFlags: kubeConfigFlags,
HookSpec: hookSpec,
}
return hook.Create(opts)
},
}
cmd.Flags().StringVarP(&name, "name", "M", "", "Hook name")
cmd.Flags().StringVar(&image, "image", "quay.io/kubev2v/hook-runner", "Container image URL to run (default: quay.io/kubev2v/hook-runner)")
cmd.Flags().StringVar(&serviceAccount, "service-account", "", "Service account to use for the hook (optional)")
cmd.Flags().StringVar(&playbook, "playbook", "", "Ansible playbook content, or use @filename to read from file (optional)")
cmd.Flags().Int64Var(&deadline, "deadline", 0, "Hook deadline in seconds (optional)")
if err := cmd.MarkFlagRequired("name"); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/create/host.go | Go | package create
import (
"fmt"
"os"
"strings"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/host"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewHostCmd creates the host creation command
func NewHostCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
var hostIDs []string
var provider string
var username, password string
var existingSecret string
var ipAddress string
var networkAdapterName string
var hostInsecureSkipTLS bool
var cacert string
// HostSpec fields
var hostSpec forkliftv1beta1.HostSpec
cmd := &cobra.Command{
Use: "host",
Short: "Create migration hosts",
Long: `Create migration hosts for vSphere providers. Hosts enable direct data transfer from ESXi hosts, bypassing vCenter for improved performance.
By creating host resources, Forklift can utilize ESXi host interfaces directly for network transfer to OpenShift, provided the OpenShift worker nodes and ESXi host interfaces have network connectivity. This is particularly beneficial when users want to control which specific ESXi interface is used for migration, even without direct access to ESXi host credentials.
Only vSphere providers support host creation. The --host-id flag requires inventory host IDs
(e.g. "host-8"), NOT display names or IP addresses. Use 'kubectl-mtv get inventory host
--provider <name>' to list available host IDs.
Examples:
# First, discover available host IDs from the provider inventory
kubectl-mtv get inventory host --provider my-vsphere-provider
# ESXi endpoint provider with direct IP (no credentials needed - uses provider secret automatically)
kubectl-mtv create host --host-id host-8 --provider my-esxi-provider --ip-address 192.168.1.10
# ESXi endpoint provider with network adapter lookup
kubectl-mtv create host --host-id host-8 --provider my-esxi-provider --network-adapter "Management Network"
# Create a host using existing secret and direct IP
kubectl-mtv create host --host-id host-8 --provider my-vsphere-provider --existing-secret my-secret --ip-address 192.168.1.10
# Create a host with new credentials and direct IP
kubectl-mtv create host --host-id host-8 --provider my-vsphere-provider --username user --password pass --ip-address 192.168.1.10
# Create a host using IP from inventory network adapter
kubectl-mtv create host --host-id host-8 --provider my-vsphere-provider --username user --password pass --network-adapter "Management Network"
# Create multiple hosts (all use same IP resolution method)
kubectl-mtv create host --host-id host-8,host-12,host-15 --provider my-vsphere-provider --existing-secret my-secret --network-adapter "Management Network"`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate input parameters
if provider == "" {
return fmt.Errorf("provider is required")
}
namespace := client.ResolveNamespace(kubeConfigFlags)
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
providerHasESXIEndpoint, _, err := host.CheckProviderESXIEndpoint(cmd.Context(), kubeConfigFlags, provider, namespace)
if err != nil {
return fmt.Errorf("failed to check provider endpoint type: %v", err)
}
if !providerHasESXIEndpoint {
if existingSecret == "" && (username == "" || password == "") {
return fmt.Errorf("either --existing-secret OR both --username and --password must be provided")
}
}
if existingSecret != "" && (username != "" || password != "") {
return fmt.Errorf("cannot use both --existing-secret and --username/--password")
}
if ipAddress == "" && networkAdapterName == "" {
return fmt.Errorf("either --ip-address OR --network-adapter must be provided")
}
if ipAddress != "" && networkAdapterName != "" {
return fmt.Errorf("cannot use both --ip-address and --network-adapter")
}
if strings.HasPrefix(cacert, "@") {
filePath := cacert[1:]
fileContent, err := os.ReadFile(filePath)
if err != nil {
return fmt.Errorf("failed to read CA certificate file %s: %v", filePath, err)
}
cacert = string(fileContent)
}
opts := host.CreateHostOptions{
HostIDs: hostIDs,
Namespace: namespace,
Provider: provider,
ConfigFlags: kubeConfigFlags,
InventoryURL: inventoryURL,
InventoryInsecureSkipTLS: inventoryInsecureSkipTLS,
Username: username,
Password: password,
ExistingSecret: existingSecret,
IPAddress: ipAddress,
NetworkAdapterName: networkAdapterName,
HostInsecureSkipTLS: hostInsecureSkipTLS,
CACert: cacert,
HostSpec: hostSpec,
}
return host.Create(cmd.Context(), opts)
},
}
cmd.Flags().StringSliceVar(&hostIDs, "host-id", nil, "Inventory host ID(s) to create (comma-separated, e.g. \"host-8,host-12\"); use 'get inventory host' to list IDs")
cmd.Flags().StringSliceVar(&hostIDs, "host-ids", nil, "Alias for --host-id")
_ = cmd.Flags().MarkHidden("host-ids")
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name (must be a vSphere provider)")
cmd.Flags().StringVarP(&username, "username", "u", "", "Username for host authentication (required if --existing-secret not provided)")
cmd.Flags().StringVar(&password, "password", "", "Password for host authentication (required if --existing-secret not provided)")
cmd.Flags().StringVar(&existingSecret, "existing-secret", "", "Name of existing secret to use for host authentication")
cmd.Flags().StringVar(&ipAddress, "ip-address", "", "IP address for disk transfer (required - mutually exclusive with --network-adapter)")
cmd.Flags().StringVar(&networkAdapterName, "network-adapter", "", "Network adapter name to get IP address from inventory (required - mutually exclusive with --ip-address)")
cmd.Flags().BoolVar(&hostInsecureSkipTLS, "host-insecure-skip-tls", false, "Skip TLS verification when connecting to the host (only used when creating new secret)")
cmd.Flags().StringVar(&cacert, "cacert", "", "CA certificate for host authentication - provide certificate content directly or use @filename to load from file (only used when creating new secret)")
if err := cmd.MarkFlagRequired("host-id"); err != nil {
panic(err)
}
if err := cmd.MarkFlagRequired("provider"); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletionByType(kubeConfigFlags, "vsphere")); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("host-id", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return completion.HostIDCompletion(kubeConfigFlags, provider, toComplete)
}); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("ip-address", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return completion.HostIPAddressCompletion(kubeConfigFlags, provider, hostIDs, toComplete)
}); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("network-adapter", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return completion.HostNetworkAdapterCompletion(kubeConfigFlags, provider, hostIDs, toComplete)
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/create/mapping.go | Go | package create
import (
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/mapping"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
)
// NewMappingCmd creates the mapping creation command with subcommands
func NewMappingCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
cmd := &cobra.Command{
Use: "mapping",
Short: "Create a new mapping",
Long: `Create a new network or storage mapping.
Mappings define how source provider resources (networks, storage) are translated
to target OpenShift resources. Use 'mapping network' or 'mapping storage' to
create specific mapping types.`,
SilenceUsage: true,
}
// Add subcommands for network and storage
cmd.AddCommand(newNetworkMappingCmd(kubeConfigFlags, globalConfig))
cmd.AddCommand(newStorageMappingCmd(kubeConfigFlags, globalConfig))
return cmd
}
// newNetworkMappingCmd creates the network mapping subcommand
func newNetworkMappingCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
var name, sourceProvider, targetProvider string
var networkPairs string
cmd := &cobra.Command{
Use: "network",
Short: "Create a new network mapping",
Long: `Create a new network mapping between source and target providers.
Network mappings translate source VM network connections to target network
attachment definitions (NADs) or pod networking ('default').
Pair formats:
- source:target-namespace/target-network - Map to specific NAD
- source:target-network - Map to NAD in same namespace
- source:default - Map to pod networking
- source:ignored - Skip this network`,
Example: ` # Create a network mapping to pod networking
kubectl-mtv create mapping network --name my-net-map \
--source vsphere-prod \
--target host \
--network-pairs "VM Network:default"
# Create a network mapping to a specific NAD
kubectl-mtv create mapping network --name my-net-map \
--source vsphere-prod \
--target host \
--network-pairs "VM Network:openshift-cnv/br-external,Management:default"`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return mapping.CreateNetworkWithInsecure(kubeConfigFlags, name, namespace, sourceProvider, targetProvider, networkPairs, inventoryURL, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&name, "name", "M", "", "Network mapping name")
cmd.Flags().StringVarP(&sourceProvider, "source", "S", "", "Source provider name")
cmd.Flags().StringVarP(&targetProvider, "target", "T", "", "Target provider name")
cmd.Flags().StringVar(&networkPairs, "network-pairs", "", "Network mapping pairs in format 'source:target-namespace/target-network', 'source:target-network', 'source:default', or 'source:ignored' (comma-separated)")
if err := cmd.MarkFlagRequired("name"); err != nil {
panic(err)
}
return cmd
}
// newStorageMappingCmd creates the storage mapping subcommand
func newStorageMappingCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
var name, sourceProvider, targetProvider string
var storagePairs string
var defaultVolumeMode string
var defaultAccessMode string
var defaultOffloadPlugin string
var defaultOffloadSecret string
var defaultOffloadVendor string
// Offload secret creation flags
var offloadVSphereUsername, offloadVSpherePassword, offloadVSphereURL string
var offloadStorageUsername, offloadStoragePassword, offloadStorageEndpoint string
var offloadCACert string
var offloadInsecureSkipTLS bool
cmd := &cobra.Command{
Use: "storage",
Short: "Create a new storage mapping",
Long: `Create a new storage mapping between source and target providers.
Storage mappings translate source datastores/storage domains to target Kubernetes
storage classes. Advanced options include volume mode, access mode, and offload
plugin configuration for optimized data transfer.`,
Example: ` # Create a simple storage mapping
kubectl-mtv create mapping storage --name my-storage-map \
--source vsphere-prod \
--target host \
--storage-pairs "datastore1:standard,datastore2:fast"
# Create a storage mapping with volume mode
kubectl-mtv create mapping storage --name my-storage-map \
--source vsphere-prod \
--target host \
--storage-pairs "datastore1:ocs-storagecluster-ceph-rbd" \
--default-volume-mode Block
# Create a storage mapping with offload plugin
kubectl-mtv create mapping storage --name my-storage-map \
--source vsphere-prod \
--target host \
--storage-pairs "datastore1:ocs-storagecluster-ceph-rbd;offloadPlugin=vsphere;offloadVendor=ontap"`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return mapping.CreateStorageWithOptions(mapping.StorageCreateOptions{
ConfigFlags: kubeConfigFlags,
Name: name,
Namespace: namespace,
SourceProvider: sourceProvider,
TargetProvider: targetProvider,
StoragePairs: storagePairs,
InventoryURL: inventoryURL,
InventoryInsecureSkipTLS: inventoryInsecureSkipTLS,
DefaultVolumeMode: defaultVolumeMode,
DefaultAccessMode: defaultAccessMode,
DefaultOffloadPlugin: defaultOffloadPlugin,
DefaultOffloadSecret: defaultOffloadSecret,
DefaultOffloadVendor: defaultOffloadVendor,
// Offload secret creation options
OffloadVSphereUsername: offloadVSphereUsername,
OffloadVSpherePassword: offloadVSpherePassword,
OffloadVSphereURL: offloadVSphereURL,
OffloadStorageUsername: offloadStorageUsername,
OffloadStoragePassword: offloadStoragePassword,
OffloadStorageEndpoint: offloadStorageEndpoint,
OffloadCACert: offloadCACert,
OffloadInsecureSkipTLS: offloadInsecureSkipTLS,
})
},
}
cmd.Flags().StringVarP(&name, "name", "M", "", "Storage mapping name")
cmd.Flags().StringVarP(&sourceProvider, "source", "S", "", "Source provider name")
cmd.Flags().StringVarP(&targetProvider, "target", "T", "", "Target provider name")
cmd.Flags().StringVar(&storagePairs, "storage-pairs", "", "Storage mapping pairs in format 'source:storage-class[;volumeMode=Block|Filesystem][;accessMode=ReadWriteOnce|ReadWriteMany|ReadOnlyMany][;offloadPlugin=vsphere][;offloadSecret=secret-name][;offloadVendor=vantara|ontap|...]' (comma-separated pairs, semicolon-separated parameters)")
cmd.Flags().StringVar(&defaultVolumeMode, "default-volume-mode", "", "Default volume mode for all storage pairs (Filesystem|Block)")
cmd.Flags().StringVar(&defaultAccessMode, "default-access-mode", "", "Default access mode for all storage pairs (ReadWriteOnce|ReadWriteMany|ReadOnlyMany)")
cmd.Flags().StringVar(&defaultOffloadPlugin, "default-offload-plugin", "", "Default offload plugin type for all storage pairs (vsphere)")
cmd.Flags().StringVar(&defaultOffloadSecret, "default-offload-secret", "", "Existing offload secret name to use (creates new secret if not provided and offload credentials given)")
cmd.Flags().StringVar(&defaultOffloadVendor, "default-offload-vendor", "", "Default offload plugin vendor for all storage pairs (flashsystem|vantara|ontap|primera3par|pureFlashArray|powerflex|powermax|powerstore|infinibox)")
// Offload secret creation flags
cmd.Flags().StringVar(&offloadVSphereUsername, "offload-vsphere-username", "", "vSphere username for offload secret (creates new secret if no --default-offload-secret provided)")
cmd.Flags().StringVar(&offloadVSpherePassword, "offload-vsphere-password", "", "vSphere password for offload secret")
cmd.Flags().StringVar(&offloadVSphereURL, "offload-vsphere-url", "", "vSphere vCenter URL for offload secret")
cmd.Flags().StringVar(&offloadStorageUsername, "offload-storage-username", "", "Storage array username for offload secret")
cmd.Flags().StringVar(&offloadStoragePassword, "offload-storage-password", "", "Storage array password for offload secret")
cmd.Flags().StringVar(&offloadStorageEndpoint, "offload-storage-endpoint", "", "Storage array management endpoint URL for offload secret")
cmd.Flags().StringVar(&offloadCACert, "offload-cacert", "", "CA certificate for offload secret (use @filename to load from file)")
cmd.Flags().BoolVar(&offloadInsecureSkipTLS, "offload-insecure-skip-tls", false, "Skip TLS verification for offload connections")
// Add completion for volume mode flag
if err := cmd.RegisterFlagCompletionFunc("default-volume-mode", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"Filesystem", "Block"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add completion for access mode flag
if err := cmd.RegisterFlagCompletionFunc("default-access-mode", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"ReadWriteOnce", "ReadWriteMany", "ReadOnlyMany"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add completion for offload plugin flag
if err := cmd.RegisterFlagCompletionFunc("default-offload-plugin", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"vsphere"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add completion for offload vendor flag
if err := cmd.RegisterFlagCompletionFunc("default-offload-vendor", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"flashsystem", "vantara", "ontap", "primera3par", "pureFlashArray", "powerflex", "powermax", "powerstore", "infinibox"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
if err := cmd.MarkFlagRequired("name"); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/create/plan.go | Go | package create
import (
"encoding/json"
"fmt"
"os"
"strings"
forkliftv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1"
planv1beta1 "github.com/kubev2v/forklift/pkg/apis/forklift/v1beta1/plan"
"github.com/spf13/cobra"
"github.com/yaacov/karl-interpreter/pkg/karl"
"gopkg.in/yaml.v3"
corev1 "k8s.io/api/core/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/plan"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// parseKeyValuePairs parses a slice of strings containing comma-separated key=value pairs
// and returns a map[string]string with trimmed keys and values
func parseKeyValuePairs(pairs []string, fieldName string) (map[string]string, error) {
result := make(map[string]string)
for _, pairGroup := range pairs {
// Split by comma to handle multiple pairs in one flag value
keyValuePairs := strings.Split(pairGroup, ",")
for _, pair := range keyValuePairs {
pair = strings.TrimSpace(pair)
if pair == "" {
continue
}
parts := strings.SplitN(pair, "=", 2)
if len(parts) == 2 {
key := strings.TrimSpace(parts[0])
value := strings.TrimSpace(parts[1])
result[key] = value
} else {
return nil, fmt.Errorf("invalid %s: %s", fieldName, pair)
}
}
}
return result, nil
}
// NewPlanCmd creates the plan creation command
func NewPlanCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
var name, sourceProvider, targetProvider string
var networkMapping, storageMapping string
var vmNamesQuaryOrFile string
var defaultTargetNetwork, defaultTargetStorageClass string
var networkPairs, storagePairs string
var preHook, postHook string
// Storage mapping enhancement options
var defaultVolumeMode, defaultAccessMode string
var defaultOffloadPlugin, defaultOffloadSecret, defaultOffloadVendor string
// Offload secret creation flags
var offloadVSphereUsername, offloadVSpherePassword, offloadVSphereURL string
var offloadStorageUsername, offloadStoragePassword, offloadStorageEndpoint string
var offloadCACert string
var offloadInsecureSkipTLS bool
// PlanSpec fields
var planSpec forkliftv1beta1.PlanSpec
var transferNetwork string
var installLegacyDrivers string // "true", "false", or "" for nil
migrationTypeFlag := flags.NewMigrationTypeFlag()
var targetLabels []string
var targetNodeSelector []string
var useCompatibilityMode bool
var targetAffinity string
var targetPowerState string
// Convertor-related flags
var convertorLabels []string
var convertorNodeSelector []string
var convertorAffinity string
cmd := &cobra.Command{
Use: "plan",
Short: "Create a migration plan",
Long: `Create a migration plan to move VMs from a source provider to OpenShift.
A plan defines which VMs to migrate, the source and target providers, and
network/storage mappings. VMs can be specified as:
- Comma-separated names: --vms "vm1,vm2,vm3"
- TSL query: --vms "where name ~= 'prod-.*' and cpuCount <= 8"
- YAML/JSON file: --vms @vms.yaml
Providers:
--source is the name of the source provider resource (e.g. "vsphere-prod").
--target is the name of the target provider resource (e.g. "host", "ocp-target").
If --target is omitted, the first OpenShift provider in the namespace is used.
Network and storage mappings can be created inline using --network-pairs and
--storage-pairs, or reference existing mapping resources with --network-mapping
and --storage-mapping.
Mapping Pair Formats:
--network-pairs format: "source_network:target" (comma-separated for multiple)
target can be a network name, namespace/name, "default" (pod networking), or "ignored" (skip).
Example: "VM Network:default" or "prod-net:myns/br-ext,mgmt-net:default"
--storage-pairs format: "source_datastore:storage_class" (comma-separated for multiple)
Maps source datastores to OpenShift storage classes.
Example: "datastore1:standard" or "ds1:fast-ssd,ds2:economy"
Query Language (TSL):
The --vms flag accepts TSL queries to select VMs dynamically:
--vms "where name ~= 'prod-.*' and cpuCount <= 8"
--vms "where powerState = 'poweredOn' and memoryMB > 4096"
--vms "where len(disks) > 1"
Run 'kubectl-mtv help tsl' for the full syntax reference and field list.
Affinity Syntax (KARL):
The --target-affinity and --convertor-affinity flags use KARL syntax:
--target-affinity "REQUIRE pods(app=database) on node"
--convertor-affinity "PREFER pods(app=cache) on zone weight=80"
Rule types: REQUIRE, PREFER, AVOID, REPEL. Topology: node, zone, region, rack.
Run 'kubectl-mtv help karl' for the full syntax reference.`,
Example: ` # Create a plan with specific VMs and inline mappings
# --target is the name of the target provider resource (omit to auto-detect)
# --network-pairs maps source networks to target: "source:target"
# use "default" as target for pod networking
# --storage-pairs maps source datastores to storage classes: "source:storageclass"
kubectl-mtv create plan --name my-migration \
--source vsphere-prod \
--target host \
--vms "web-server,db-server" \
--network-pairs "VM Network:default" \
--storage-pairs "datastore1:standard"
# Multiple network and storage mappings (comma-separated pairs)
kubectl-mtv create plan --name multi-map \
--source vsphere-prod \
--target host \
--vms "app-vm,cache-vm" \
--network-pairs "VM Network:default,Production:myns/br-ext" \
--storage-pairs "datastore1:fast-ssd,datastore2:economy"
# Create a plan using VM query to select VMs dynamically
kubectl-mtv create plan --name batch-migration \
--source vsphere-prod \
--target host \
--vms "where name ~= 'legacy-.*'" \
--default-target-network default \
--default-target-storage-class standard
# Create a warm migration plan
kubectl-mtv create plan --name warm-migration \
--source vsphere-prod \
--target host \
--vms "critical-vm" \
--migration-type warm
# Create a plan from VM file
kubectl-mtv get inventory vm --provider vsphere-prod --output planvms > vms.yaml
kubectl-mtv create plan --name file-migration \
--source vsphere-prod \
--target host \
--vms @vms.yaml \
--default-target-network default \
--default-target-storage-class standard`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
// Validate that existing mapping flags and mapping pair flags are not used together
if networkMapping != "" && networkPairs != "" {
return fmt.Errorf("cannot use both --network-mapping and --network-pairs flags")
}
if storageMapping != "" && storagePairs != "" {
return fmt.Errorf("cannot use both --storage-mapping and --storage-pairs flags")
}
// Validate that conversion-only migrations don't use storage mappings
if migrationTypeFlag.GetValue() == "conversion" {
if storageMapping != "" {
return fmt.Errorf("cannot use --storage-mapping with migration type 'conversion'. Conversion-only migrations require empty storage mapping")
}
if storagePairs != "" {
return fmt.Errorf("cannot use --storage-pairs with migration type 'conversion'. Conversion-only migrations require empty storage mapping")
}
}
var vmList []planv1beta1.VM
if strings.HasPrefix(vmNamesQuaryOrFile, "where ") {
// It's a query string - fetch VMs from inventory
query := vmNamesQuaryOrFile // The full string including "where "
// Parse source provider to extract name and namespace
sourceProviderName := sourceProvider
sourceProviderNamespace := namespace
if strings.Contains(sourceProvider, "/") {
parts := strings.SplitN(sourceProvider, "/", 2)
sourceProviderNamespace = strings.TrimSpace(parts[0])
sourceProviderName = strings.TrimSpace(parts[1])
}
fmt.Printf("Fetching VMs from provider '%s' using query: %s\n", sourceProviderName, query)
var err error
vmList, err = inventory.FetchVMsByQueryWithInsecure(cmd.Context(), kubeConfigFlags, sourceProviderName, sourceProviderNamespace, inventoryURL, query, inventoryInsecureSkipTLS)
if err != nil {
return fmt.Errorf("failed to fetch VMs using query: %v", err)
}
if len(vmList) == 0 {
return fmt.Errorf("no VMs found matching the query")
}
fmt.Printf("Found %d VM(s) matching the query\n", len(vmList))
} else if strings.HasPrefix(vmNamesQuaryOrFile, "@") {
// It's a file
filePath := vmNamesQuaryOrFile[1:]
content, err := os.ReadFile(filePath)
if err != nil {
return fmt.Errorf("failed to read file %s: %v", filePath, err)
}
// Attempt to unmarshal as YAML first, then try JSON
err = yaml.Unmarshal(content, &vmList)
if err != nil {
err = json.Unmarshal(content, &vmList)
if err != nil {
return fmt.Errorf("failed to unmarshal file %s as YAML or JSON: %v", filePath, err)
}
}
} else {
// It's a comma-separated list
vmNameSlice := strings.Split(vmNamesQuaryOrFile, ",")
for _, vmName := range vmNameSlice {
newVM := planv1beta1.VM{}
newVM.Name = strings.TrimSpace(vmName)
vmList = append(vmList, newVM)
}
}
// Add hooks to all VMs if specified
if preHook != "" || postHook != "" {
for i := range vmList {
var hooks []planv1beta1.HookRef
// Add pre-hook if specified
if preHook != "" {
preHookRef := planv1beta1.HookRef{
Step: "PreHook",
Hook: corev1.ObjectReference{
Kind: "Hook",
APIVersion: "forklift.konveyor.io/v1beta1",
Name: strings.TrimSpace(preHook),
Namespace: namespace,
},
}
hooks = append(hooks, preHookRef)
}
// Add post-hook if specified
if postHook != "" {
postHookRef := planv1beta1.HookRef{
Step: "PostHook",
Hook: corev1.ObjectReference{
Kind: "Hook",
APIVersion: "forklift.konveyor.io/v1beta1",
Name: strings.TrimSpace(postHook),
Namespace: namespace,
},
}
hooks = append(hooks, postHookRef)
}
// Add hooks to the VM (append to existing hooks if any)
vmList[i].Hooks = append(vmList[i].Hooks, hooks...)
}
}
// Create transfer network reference if provided
if transferNetwork != "" {
transferNetworkName := strings.TrimSpace(transferNetwork)
transferNetworkNamespace := namespace
// If tansferNetwork has "/", the first part is the namespace
if strings.Contains(transferNetwork, "/") {
parts := strings.SplitN(transferNetwork, "/", 2)
transferNetworkName = strings.TrimSpace(parts[1])
transferNetworkNamespace = strings.TrimSpace(parts[0])
}
planSpec.TransferNetwork = &corev1.ObjectReference{
Kind: "NetworkAttachmentDefinition",
APIVersion: "k8s.cni.cncf.io/v1",
Name: transferNetworkName,
Namespace: transferNetworkNamespace,
}
}
// Handle InstallLegacyDrivers flag
if installLegacyDrivers != "" {
switch installLegacyDrivers {
case "true":
val := true
planSpec.InstallLegacyDrivers = &val
case "false":
val := false
planSpec.InstallLegacyDrivers = &val
}
}
// Handle migration type flag
if migrationTypeFlag.GetValue() != "" {
if planSpec.Warm {
return fmt.Errorf("setting --warm flag is not supported when migration type is specified")
}
planSpec.Type = migrationTypeFlag.GetValue()
// Also set the warm field for backward compatibility when migration type is warm
if migrationTypeFlag.GetValue() == "warm" {
planSpec.Warm = true
}
}
// Handle target labels (convert from key=value slice to map)
if len(targetLabels) > 0 {
labels, err := parseKeyValuePairs(targetLabels, "target label")
if err != nil {
return err
}
planSpec.TargetLabels = labels
}
// Handle target node selector (convert from key=value slice to map)
if len(targetNodeSelector) > 0 {
nodeSelector, err := parseKeyValuePairs(targetNodeSelector, "target node selector")
if err != nil {
return err
}
planSpec.TargetNodeSelector = nodeSelector
}
// Handle target affinity (parse KARL rule)
if targetAffinity != "" {
interpreter := karl.NewKARLInterpreter()
err := interpreter.Parse(targetAffinity)
if err != nil {
return fmt.Errorf("failed to parse target affinity KARL rule: %v", err)
}
affinity, err := interpreter.ToAffinity()
if err != nil {
return fmt.Errorf("failed to convert KARL rule to affinity: %v", err)
}
planSpec.TargetAffinity = affinity
}
// Handle target power state
if targetPowerState != "" {
planSpec.TargetPowerState = planv1beta1.TargetPowerState(targetPowerState)
}
// Handle convertor labels (convert from key=value slice to map)
if len(convertorLabels) > 0 {
labels, err := parseKeyValuePairs(convertorLabels, "convertor label")
if err != nil {
return err
}
planSpec.ConvertorLabels = labels
}
// Handle convertor node selector (convert from key=value slice to map)
if len(convertorNodeSelector) > 0 {
nodeSelector, err := parseKeyValuePairs(convertorNodeSelector, "convertor node selector")
if err != nil {
return err
}
planSpec.ConvertorNodeSelector = nodeSelector
}
// Handle convertor affinity (parse KARL rule)
if convertorAffinity != "" {
interpreter := karl.NewKARLInterpreter()
err := interpreter.Parse(convertorAffinity)
if err != nil {
return fmt.Errorf("failed to parse convertor affinity KARL rule: %v", err)
}
affinity, err := interpreter.ToAffinity()
if err != nil {
return fmt.Errorf("failed to convert KARL rule to affinity: %v", err)
}
planSpec.ConvertorAffinity = affinity
}
// Handle use compatibility mode
planSpec.UseCompatibilityMode = useCompatibilityMode
// Set VMs in the PlanSpec
planSpec.VMs = vmList
opts := plan.CreatePlanOptions{
Name: name,
Namespace: namespace,
SourceProvider: sourceProvider,
TargetProvider: targetProvider,
NetworkMapping: networkMapping,
StorageMapping: storageMapping,
ConfigFlags: kubeConfigFlags,
InventoryURL: inventoryURL,
InventoryInsecureSkipTLS: inventoryInsecureSkipTLS,
DefaultTargetNetwork: defaultTargetNetwork,
DefaultTargetStorageClass: defaultTargetStorageClass,
PlanSpec: planSpec,
NetworkPairs: networkPairs,
StoragePairs: storagePairs,
DefaultVolumeMode: defaultVolumeMode,
DefaultAccessMode: defaultAccessMode,
DefaultOffloadPlugin: defaultOffloadPlugin,
DefaultOffloadSecret: defaultOffloadSecret,
DefaultOffloadVendor: defaultOffloadVendor,
// Offload secret creation options
OffloadVSphereUsername: offloadVSphereUsername,
OffloadVSpherePassword: offloadVSpherePassword,
OffloadVSphereURL: offloadVSphereURL,
OffloadStorageUsername: offloadStorageUsername,
OffloadStoragePassword: offloadStoragePassword,
OffloadStorageEndpoint: offloadStorageEndpoint,
OffloadCACert: offloadCACert,
OffloadInsecureSkipTLS: offloadInsecureSkipTLS,
}
err := plan.Create(cmd.Context(), opts)
return err
},
}
cmd.Flags().StringVarP(&name, "name", "M", "", "Plan name")
cmd.Flags().StringVarP(&sourceProvider, "source", "S", "", "Source provider name (supports namespace/name pattern, defaults to plan namespace)")
cmd.Flags().StringVarP(&targetProvider, "target", "t", "", "Target provider name (supports namespace/name pattern, defaults to plan namespace)")
cmd.Flags().StringVar(&networkMapping, "network-mapping", "", "Network mapping name")
cmd.Flags().StringVar(&storageMapping, "storage-mapping", "", "Storage mapping name")
cmd.Flags().StringVar(&networkPairs, "network-pairs", "", "Network mapping pairs in format 'source:target-namespace/target-network', 'source:target-network', 'source:default', or 'source:ignored' (comma-separated)")
cmd.Flags().StringVar(&storagePairs, "storage-pairs", "", "Storage mapping pairs in format 'source:storage-class[;volumeMode=Block|Filesystem][;accessMode=ReadWriteOnce|ReadWriteMany|ReadOnlyMany][;offloadPlugin=vsphere][;offloadSecret=secret-name][;offloadVendor=vantara|ontap|...]' (comma-separated pairs, semicolon-separated parameters)")
// Storage enhancement flags
cmd.Flags().StringVar(&defaultVolumeMode, "default-volume-mode", "", "Default volume mode for storage pairs (Filesystem|Block)")
cmd.Flags().StringVar(&defaultAccessMode, "default-access-mode", "", "Default access mode for storage pairs (ReadWriteOnce|ReadWriteMany|ReadOnlyMany)")
cmd.Flags().StringVar(&defaultOffloadPlugin, "default-offload-plugin", "", "Default offload plugin type for storage pairs (vsphere)")
cmd.Flags().StringVar(&defaultOffloadSecret, "default-offload-secret", "", "Existing offload secret name to use for storage offload")
cmd.Flags().StringVar(&defaultOffloadVendor, "default-offload-vendor", "", "Default offload plugin vendor for storage pairs (flashsystem|vantara|ontap|primera3par|pureFlashArray|powerflex|powermax|powerstore|infinibox)")
// Offload secret creation flags (storage offload/XCOPY is vSphere-only)
cmd.Flags().StringVar(&offloadVSphereUsername, "offload-vsphere-username", "", "vSphere username for offload secret (creates new secret if no --default-offload-secret provided)")
cmd.Flags().StringVar(&offloadVSpherePassword, "offload-vsphere-password", "", "vSphere password for offload secret")
cmd.Flags().StringVar(&offloadVSphereURL, "offload-vsphere-url", "", "vSphere vCenter URL for offload secret")
cmd.Flags().StringVar(&offloadStorageUsername, "offload-storage-username", "", "Storage array username for offload secret")
cmd.Flags().StringVar(&offloadStoragePassword, "offload-storage-password", "", "Storage array password for offload secret")
cmd.Flags().StringVar(&offloadStorageEndpoint, "offload-storage-endpoint", "", "Storage array management endpoint URL for offload secret")
cmd.Flags().StringVar(&offloadCACert, "offload-cacert", "", "CA certificate for offload secret (use @filename to load from file)")
cmd.Flags().BoolVar(&offloadInsecureSkipTLS, "offload-insecure-skip-tls", false, "Skip TLS verification for offload connections")
_ = cmd.MarkFlagRequired("name")
cmd.Flags().StringVar(&vmNamesQuaryOrFile, "vms", "", "List of VM names (comma-separated), path to YAML/JSON file (prefix with @), or query string (prefix with 'where ')")
cmd.Flags().StringVar(&preHook, "pre-hook", "", "Pre-migration hook to add to all VMs in the plan")
cmd.Flags().StringVar(&postHook, "post-hook", "", "Post-migration hook to add to all VMs in the plan")
// PlanSpec flags
cmd.Flags().StringVar(&planSpec.Description, "description", "", "Plan description")
cmd.Flags().StringVar(&planSpec.TargetNamespace, "target-namespace", "", "Target namespace")
cmd.Flags().StringVar(&transferNetwork, "transfer-network", "", "The network attachment definition for disk transfer. Supports 'namespace/network-name' or just 'network-name' (uses plan namespace)")
cmd.Flags().BoolVar(&planSpec.PreserveClusterCPUModel, "preserve-cluster-cpu-model", false, "Preserve the CPU model and flags the VM runs with in its cluster")
cmd.Flags().BoolVar(&planSpec.PreserveStaticIPs, "preserve-static-ips", true, "Preserve static IP configurations during migration")
cmd.Flags().StringVar(&planSpec.PVCNameTemplate, "pvc-name-template", "", "Template for generating PVC names for VM disks. Variables: {{.VmName}}, {{.PlanName}}, {{.DiskIndex}}, {{.WinDriveLetter}}, {{.RootDiskIndex}}, {{.Shared}}, {{.FileName}}")
cmd.Flags().StringVar(&planSpec.VolumeNameTemplate, "volume-name-template", "", "Template for generating volume interface names in the target VM. Variables: {{.PVCName}}, {{.VolumeIndex}}")
cmd.Flags().StringVar(&planSpec.NetworkNameTemplate, "network-name-template", "", "Template for generating network interface names in the target VM. Variables: {{.NetworkName}}, {{.NetworkNamespace}}, {{.NetworkType}}, {{.NetworkIndex}}")
cmd.Flags().BoolVar(&planSpec.MigrateSharedDisks, "migrate-shared-disks", true, "Migrate disks shared between multiple VMs")
cmd.Flags().BoolVar(&planSpec.Archived, "archived", false, "Whether this plan should be archived")
cmd.Flags().BoolVar(&planSpec.PVCNameTemplateUseGenerateName, "pvc-name-template-use-generate-name", true, "Use generateName instead of name for PVC name template")
cmd.Flags().BoolVar(&planSpec.DeleteGuestConversionPod, "delete-guest-conversion-pod", false, "Delete guest conversion pod after successful migration")
cmd.Flags().BoolVar(&planSpec.DeleteVmOnFailMigration, "delete-vm-on-fail-migration", false, "Delete target VM when migration fails")
cmd.Flags().BoolVar(&planSpec.SkipGuestConversion, "skip-guest-conversion", false, "Skip the guest conversion process (raw disk copy mode)")
cmd.Flags().BoolVar(&planSpec.RunPreflightInspection, "run-preflight-inspection", true, "Run preflight inspection on VM base disks before starting disk transfer")
cmd.Flags().StringVar(&installLegacyDrivers, "install-legacy-drivers", "", "Install legacy Windows drivers (true/false, leave empty for auto-detection)")
cmd.Flags().VarP(migrationTypeFlag, "migration-type", "m", "Migration type: cold, warm, live, or conversion")
cmd.Flags().StringVarP(&defaultTargetNetwork, "default-target-network", "N", "", "Default target network for network mapping. Use 'default' for pod networking, 'namespace/network-name', or just 'network-name' (uses plan namespace)")
cmd.Flags().StringVar(&defaultTargetStorageClass, "default-target-storage-class", "", "Default target storage class for storage mapping")
cmd.Flags().BoolVar(&useCompatibilityMode, "use-compatibility-mode", true, "Use compatibility devices (SATA bus, E1000E NIC) when skipGuestConversion is true")
cmd.Flags().StringSliceVarP(&targetLabels, "target-labels", "L", nil, "Target labels to be added to the VM (e.g., key1=value1,key2=value2)")
cmd.Flags().StringSliceVar(&targetNodeSelector, "target-node-selector", nil, "Target node selector to constrain VM scheduling (e.g., key1=value1,key2=value2)")
cmd.Flags().BoolVar(&planSpec.Warm, "warm", false, "Enable warm migration (use --migration-type=warm instead)")
cmd.Flags().StringVar(&targetAffinity, "target-affinity", "", "Target affinity to constrain VM scheduling using KARL syntax (e.g. 'REQUIRE pods(app=database) on node')")
cmd.Flags().StringVar(&targetPowerState, "target-power-state", "", "Target power state for VMs after migration: 'on', 'off', or 'auto' (default: match source VM power state)")
// Convertor-related flags (only apply to providers requiring guest conversion)
cmd.Flags().StringSliceVar(&convertorLabels, "convertor-labels", nil, "Labels to be added to virt-v2v convertor pods (e.g., key1=value1,key2=value2)")
cmd.Flags().StringSliceVar(&convertorNodeSelector, "convertor-node-selector", nil, "Node selector to constrain convertor pod scheduling (e.g., key1=value1,key2=value2)")
cmd.Flags().StringVar(&convertorAffinity, "convertor-affinity", "", "Convertor affinity to constrain convertor pod scheduling using KARL syntax")
// Add completion for storage enhancement flags
if err := cmd.RegisterFlagCompletionFunc("default-volume-mode", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"Filesystem", "Block"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("default-access-mode", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"ReadWriteOnce", "ReadWriteMany", "ReadOnlyMany"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("default-offload-plugin", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"vsphere"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("default-offload-vendor", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"flashsystem", "vantara", "ontap", "primera3par", "pureFlashArray", "powerflex", "powermax", "powerstore", "infinibox"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add completion for migration type flag
if err := cmd.RegisterFlagCompletionFunc("migration-type", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return migrationTypeFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add completion for install legacy drivers flag
if err := cmd.RegisterFlagCompletionFunc("install-legacy-drivers", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"true", "false"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add completion for target power state flag
if err := cmd.RegisterFlagCompletionFunc("target-power-state", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"on", "off", "auto"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add completion for pre-hook flag
if err := cmd.RegisterFlagCompletionFunc("pre-hook", completion.HookResourceNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
// Add completion for post-hook flag
if err := cmd.RegisterFlagCompletionFunc("post-hook", completion.HookResourceNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/create/provider.go | Go | package create
import (
"os"
"strings"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/provider/providerutil"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewProviderCmd creates the provider creation command
func NewProviderCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
var name, secret string
providerType := flags.NewProviderTypeFlag()
// Add Provider credential flags
var url, username, password, cacert, token string
var insecureSkipTLS bool
var vddkInitImage string
sdkEndpointType := flags.NewSdkEndpointTypeFlag()
// VSphere VDDK specific flags
var useVddkAioOptimization bool
var vddkBufSizeIn64K, vddkBufCount int
// OpenStack specific flags
var domainName, projectName, regionName string
// EC2 specific flags
var ec2Region, ec2TargetRegion, ec2TargetAZ string
var ec2TargetAccessKeyID, ec2TargetSecretKey string
var autoTargetCredentials bool
// HyperV specific flags
var smbUrl, smbUser, smbPassword string
// Check if MTV_VDDK_INIT_IMAGE environment variable is set
if envVddkInitImage := os.Getenv("MTV_VDDK_INIT_IMAGE"); envVddkInitImage != "" {
vddkInitImage = envVddkInitImage
}
cmd := &cobra.Command{
Use: "provider",
Short: "Create a new provider",
Long: `Create a new MTV provider to connect to a virtualization platform.
Providers represent source or target environments for VM migrations. Supported types:
- vsphere: VMware vSphere/vCenter (requires VDDK init image for migration)
- ovirt: Red Hat Virtualization (oVirt/RHV)
- openstack: OpenStack cloud platform
- ova: OVA files from NFS share
- openshift: Target OpenShift cluster (usually named 'host')
- ec2: Amazon EC2 instances
- hyperv: Microsoft Hyper-V
Credentials can be provided directly via flags or through an existing Kubernetes secret.`,
Example: ` # Create a vSphere provider
kubectl-mtv create provider --name vsphere-prod \
--type vsphere \
--url https://vcenter.example.com/sdk \
--username admin@vsphere.local \
--password 'secret' \
--vddk-init-image quay.io/kubev2v/vddk:latest
# Create an oVirt provider
kubectl-mtv create provider --name ovirt-prod \
--type ovirt \
--url https://rhv-manager.example.com/ovirt-engine/api \
--username admin@internal \
--password 'secret'
# Create an OpenShift target provider
kubectl-mtv create provider --name host \
--type openshift \
--url https://api.cluster.example.com:6443 \
--provider-token 'eyJhbGciOiJSUzI1NiIsInR5...'
# Create an OpenStack provider
kubectl-mtv create provider --name openstack-prod \
--type openstack \
--url https://keystone.example.com:5000/v3 \
--username admin \
--password 'secret' \
--provider-domain-name Default \
--provider-project-name admin
# Create a HyperV provider
kubectl-mtv create provider --name my-hyperv \
--type hyperv \
--url https://192.168.1.100 \
--username Administrator \
--password 'MyPassword' \
--smb-url '//192.168.1.100/VMShare'`,
Args: cobra.NoArgs,
SilenceUsage: true,
PreRunE: func(cmd *cobra.Command, args []string) error {
// Fetch dynamic provider types from the cluster
dynamicTypes, err := client.GetDynamicProviderTypes(kubeConfigFlags)
if err != nil {
// Log the error but don't fail - we can still work with static types
// This allows the command to work even if there are cluster connectivity issues
// as long as the user is using a static provider type
cmd.PrintErrf("Warning: failed to fetch dynamic provider types: %v\n", err)
} else {
// Set the dynamic types in the flag
providerType.SetDynamicTypes(dynamicTypes)
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
// Check if cacert starts with @ and load from file if so
if strings.HasPrefix(cacert, "@") {
filePath := cacert[1:]
fileContent, err := os.ReadFile(filePath)
if err != nil {
return err
}
cacert = string(fileContent)
}
options := providerutil.ProviderOptions{
Name: name,
Namespace: namespace,
Secret: secret,
URL: url,
Username: username,
Password: password,
CACert: cacert,
InsecureSkipTLS: insecureSkipTLS,
VddkInitImage: vddkInitImage,
SdkEndpoint: sdkEndpointType.GetValue(),
Token: token,
DomainName: domainName,
ProjectName: projectName,
RegionName: regionName,
UseVddkAioOptimization: useVddkAioOptimization,
VddkBufSizeIn64K: vddkBufSizeIn64K,
VddkBufCount: vddkBufCount,
EC2Region: ec2Region,
EC2TargetRegion: ec2TargetRegion,
EC2TargetAZ: ec2TargetAZ,
EC2TargetAccessKeyID: ec2TargetAccessKeyID,
EC2TargetSecretKey: ec2TargetSecretKey,
AutoTargetCredentials: autoTargetCredentials,
SMBUrl: smbUrl,
SMBUser: smbUser,
SMBPassword: smbPassword,
}
return provider.Create(kubeConfigFlags, providerType.GetValue(), options)
},
}
cmd.Flags().StringVarP(&name, "name", "M", "", "Provider name")
cmd.Flags().VarP(providerType, "type", "t", "Provider type (openshift, vsphere, ovirt, openstack, ova, ec2, hyperv)")
cmd.Flags().StringVar(&secret, "secret", "", "Secret containing provider credentials")
// Provider credential flags
cmd.Flags().StringVarP(&url, "url", "U", "", "Provider URL")
cmd.Flags().StringVarP(&username, "username", "u", "", "Provider credentials username")
cmd.Flags().StringVarP(&password, "password", "p", "", "Provider credentials password")
cmd.Flags().StringVar(&cacert, "cacert", "", "Provider CA certificate (use @filename to load from file)")
cmd.Flags().BoolVar(&insecureSkipTLS, "provider-insecure-skip-tls", false, "Skip TLS verification when connecting to the provider")
// OpenShift specific flags
cmd.Flags().StringVarP(&token, "provider-token", "T", "", "Provider authentication token")
// vSphere specific flags
cmd.Flags().StringVar(&vddkInitImage, "vddk-init-image", vddkInitImage, "Virtual Disk Development Kit (VDDK) container init image path")
cmd.Flags().Var(sdkEndpointType, "sdk-endpoint", "SDK endpoint type (vcenter or esxi)")
cmd.Flags().BoolVar(&useVddkAioOptimization, "use-vddk-aio-optimization", false, "Enable VDDK AIO optimization for improved disk transfer performance")
cmd.Flags().IntVar(&vddkBufSizeIn64K, "vddk-buf-size-in-64k", 0, "VDDK buffer size in 64K units (VixDiskLib.nfcAio.Session.BufSizeIn64K)")
cmd.Flags().IntVar(&vddkBufCount, "vddk-buf-count", 0, "VDDK buffer count (VixDiskLib.nfcAio.Session.BufCount)")
// OpenStack specific flags
cmd.Flags().StringVar(&domainName, "provider-domain-name", "", "OpenStack domain name")
cmd.Flags().StringVar(&projectName, "provider-project-name", "", "OpenStack project name")
cmd.Flags().StringVar(®ionName, "provider-region-name", "", "OpenStack region name")
cmd.Flags().StringVar(®ionName, "region", "", "Region name (alias for --provider-region-name)")
// EC2 specific flags
cmd.Flags().StringVar(&ec2Region, "ec2-region", "", "AWS region where source EC2 instances are located")
cmd.Flags().StringVar(&ec2TargetRegion, "target-region", "", "Target region for migrations (defaults to provider region)")
cmd.Flags().StringVar(&ec2TargetAZ, "target-az", "", "Target availability zone for migrations (required - EBS volumes are AZ-specific)")
cmd.Flags().StringVar(&ec2TargetAccessKeyID, "target-access-key-id", "", "Target AWS account access key ID (for cross-account migrations)")
cmd.Flags().StringVar(&ec2TargetSecretKey, "target-secret-access-key", "", "Target AWS account secret access key (for cross-account migrations)")
cmd.Flags().BoolVar(&autoTargetCredentials, "auto-target-credentials", false, "Automatically fetch target AWS credentials from cluster and target-az from worker nodes")
// HyperV specific flags
cmd.Flags().StringVar(&smbUrl, "smb-url", "", "SMB share URL for HyperV (e.g., //server/share)")
cmd.Flags().StringVar(&smbUser, "smb-user", "", "SMB username (defaults to HyperV username)")
cmd.Flags().StringVar(&smbPassword, "smb-password", "", "SMB password (defaults to HyperV password)")
// Add completion for provider type flag
if err := cmd.RegisterFlagCompletionFunc("type", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return providerType.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add completion for sdk-endpoint flag
if err := cmd.RegisterFlagCompletionFunc("sdk-endpoint", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return sdkEndpointType.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
if err := cmd.MarkFlagRequired("name"); err != nil {
panic(err)
}
if err := cmd.MarkFlagRequired("type"); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/create/vddk.go | Go | package create
import (
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/create/vddk"
)
// NewVddkCmd creates the VDDK image creation command
func NewVddkCmd(globalConfig GlobalConfigGetter, kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
var vddkTarGz, vddkTag, vddkBuildDir, vddkRuntime, vddkPlatform, vddkDockerfile string
var vddkPush, setControllerImage, vddkPushInsecureSkipTLS bool
cmd := &cobra.Command{
Use: "vddk-image",
Short: "Create a VDDK image for MTV",
Long: `Build a VDDK (Virtual Disk Development Kit) container image for vSphere migrations.
VDDK is required for migrating VMs from vSphere. This command builds a container
image from the VMware VDDK SDK and pushes it to your container registry.
You must download the VDDK SDK from VMware (requires VMware account):
https://developer.vmware.com/web/sdk/8.0/vddk`,
Example: ` # Build VDDK image using podman
kubectl-mtv create vddk-image \
--tar VMware-vix-disklib-8.0.1-21562716.x86_64.tar.gz \
--tag quay.io/myorg/vddk:8.0.1
# Build and push to registry
kubectl-mtv create vddk-image \
--tar VMware-vix-disklib-8.0.1-21562716.x86_64.tar.gz \
--tag quay.io/myorg/vddk:8.0.1 \
--push
# Build, push, and configure as global VDDK image in ForkliftController
kubectl-mtv create vddk-image \
--tar VMware-vix-disklib-8.0.1-21562716.x86_64.tar.gz \
--tag quay.io/myorg/vddk:8.0.1 \
--push \
--set-controller-image
# Use specific container runtime
kubectl-mtv create vddk-image \
--tar VMware-vix-disklib-8.0.1-21562716.x86_64.tar.gz \
--tag quay.io/myorg/vddk:8.0.1 \
--runtime docker
# Push to insecure registry (self-signed certificate)
kubectl-mtv create vddk-image \
--tar VMware-vix-disklib-8.0.1-21562716.x86_64.tar.gz \
--tag internal-registry.local:5000/vddk:8.0.1 \
--push \
--push-insecure-skip-tls`,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate that --set-controller-image requires --push
if setControllerImage && !vddkPush {
return fmt.Errorf("--set-controller-image requires --push to be set")
}
verbosity := 0
if globalConfig != nil {
verbosity = globalConfig.GetVerbosity()
}
err := vddk.BuildImage(vddkTarGz, vddkTag, vddkBuildDir, vddkRuntime, vddkPlatform, vddkDockerfile, verbosity, vddkPush, vddkPushInsecureSkipTLS)
if err != nil {
fmt.Printf("Error building VDDK image: %v\n", err)
fmt.Printf("You can use the '--help' flag for more information on usage.\n")
return nil
}
// Configure ForkliftController if requested
if setControllerImage {
if err := vddk.SetControllerVddkImage(kubeConfigFlags, vddkTag, verbosity); err != nil {
fmt.Printf("Error configuring ForkliftController: %v\n", err)
return nil
}
}
return nil
},
}
cmd.Flags().StringVar(&vddkTarGz, "tar", "", "Path to VMware VDDK tar.gz file (required), e.g. VMware-vix-disklib.tar.gz")
cmd.Flags().StringVar(&vddkTag, "tag", "", "Container image tag (required), e.g. quay.io/example/vddk:8.0.1")
cmd.Flags().StringVar(&vddkBuildDir, "build-dir", "", "Build directory (optional, uses tmp dir if not set)")
cmd.Flags().StringVar(&vddkRuntime, "runtime", "auto", "Container runtime to use: auto, podman, or docker (default: auto)")
cmd.Flags().StringVar(&vddkPlatform, "platform", "amd64", "Target platform for the image: amd64 or arm64. (default: amd64)")
cmd.Flags().StringVar(&vddkDockerfile, "dockerfile", "", "Path to custom Dockerfile (optional, uses default if not set)")
cmd.Flags().BoolVar(&vddkPush, "push", false, "Push image after build (optional)")
cmd.Flags().BoolVar(&vddkPushInsecureSkipTLS, "push-insecure-skip-tls", false, "Skip TLS verification when pushing to the registry (podman only, docker requires daemon config)")
cmd.Flags().BoolVar(&setControllerImage, "set-controller-image", false, "Configure the pushed image as global vddk_image in ForkliftController (requires --push)")
// Add autocomplete for runtime flag
if err := cmd.RegisterFlagCompletionFunc("runtime", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"auto", "podman", "docker"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add autocomplete for platform flag
if err := cmd.RegisterFlagCompletionFunc("platform", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"amd64", "arm64"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
if err := cmd.MarkFlagRequired("tar"); err != nil {
panic(err)
}
if err := cmd.MarkFlagRequired("tag"); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/cutover/cutover.go | Go | package cutover
import (
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
// NewCutoverCmd creates the cutover command with all its subcommands
func NewCutoverCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
cmd := &cobra.Command{
Use: "cutover",
Short: "Set cutover time for resources",
Long: `Set cutover time for various MTV resources`,
SilenceUsage: true,
}
// Add plan subcommand with plural alias
planCmd := NewPlanCmd(kubeConfigFlags)
planCmd.Aliases = []string{"plans"}
cmd.AddCommand(planCmd)
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/cutover/plan.go | Go | package cutover
import (
"errors"
"fmt"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/cutover/plan"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewPlanCmd creates the plan cutover command
func NewPlanCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
var cutoverTimeStr string
var all bool
var planNames []string
cmd := &cobra.Command{
Use: "plan",
Short: "Set the cutover time for one or more warm migration plans",
Long: `Trigger cutover for warm migration plans.
Cutover stops the source VMs and performs the final sync to complete the migration.
Use this to manually trigger cutover for warm migrations, or to reschedule
a cutover time. If no cutover time is specified, it defaults to immediately.`,
Example: ` # Trigger immediate cutover
kubectl-mtv cutover plan --name my-warm-migration
# Schedule cutover for a specific time
kubectl-mtv cutover plan --name my-warm-migration --cutover 2026-12-31T23:00:00Z
# Cutover all warm migration plans
kubectl-mtv cutover plans --all
# Cutover multiple plans
kubectl-mtv cutover plans --name plan1,plan2,plan3`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate mutual exclusivity of --name and --all
if all && len(planNames) > 0 {
return errors.New("cannot use --name with --all")
}
if !all && len(planNames) == 0 {
return errors.New("must specify --name or --all")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
var cutoverTime *time.Time
if cutoverTimeStr != "" {
// Parse the provided cutover time
t, err := time.Parse(time.RFC3339, cutoverTimeStr)
if err != nil {
return fmt.Errorf("failed to parse cutover time: %v", err)
}
cutoverTime = &t
}
if all {
// Get all plan names from the namespace
var err error
planNames, err = client.GetAllPlanNames(cmd.Context(), kubeConfigFlags, namespace)
if err != nil {
return fmt.Errorf("failed to get all plan names: %v", err)
}
if len(planNames) == 0 {
fmt.Printf("No plans found in namespace %s\n", namespace)
return nil
}
}
// Loop over each plan name and set cutover time
for _, planName := range planNames {
err := plan.Cutover(kubeConfigFlags, planName, namespace, cutoverTime)
if err != nil {
return err
}
}
return nil
},
}
cmd.Flags().StringSliceVarP(&planNames, "name", "M", nil, "Plan name(s) to cutover (comma-separated, e.g. \"plan1,plan2\")")
cmd.Flags().StringSliceVar(&planNames, "names", nil, "Alias for --name")
_ = cmd.Flags().MarkHidden("names")
cmd.Flags().StringVarP(&cutoverTimeStr, "cutover", "c", "", "Cutover time in ISO8601 format (e.g., 2023-12-31T15:30:00Z, '$(date --iso-8601=sec)'). If not specified, defaults to current time.")
cmd.Flags().BoolVar(&all, "all", false, "Set cutover time for all migration plans in the namespace")
_ = cmd.RegisterFlagCompletionFunc("name", completion.PlanNameCompletion(kubeConfigFlags))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/delete/delete.go | Go | package delete
import (
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
// NewDeleteCmd creates the delete command with all its subcommands
func NewDeleteCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
cmd := &cobra.Command{
Use: "delete",
Short: "Delete resources",
Long: `Delete resources like mappings, plans, and providers`,
SilenceUsage: true,
}
// Add mapping subcommand with plural alias
mappingCmd := NewMappingCmd(kubeConfigFlags)
mappingCmd.Aliases = []string{"mappings"}
cmd.AddCommand(mappingCmd)
// Add plan subcommand with plural alias
planCmd := NewPlanCmd(kubeConfigFlags)
planCmd.Aliases = []string{"plans"}
cmd.AddCommand(planCmd)
// Add provider subcommand with plural alias
providerCmd := NewProviderCmd(kubeConfigFlags)
providerCmd.Aliases = []string{"providers"}
cmd.AddCommand(providerCmd)
// Add host subcommand with plural alias
hostCmd := NewHostCmd(kubeConfigFlags)
hostCmd.Aliases = []string{"hosts"}
cmd.AddCommand(hostCmd)
// Add hook subcommand with plural alias
hookCmd := NewHookCmd(kubeConfigFlags)
hookCmd.Aliases = []string{"hooks"}
cmd.AddCommand(hookCmd)
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/delete/hook.go | Go | package delete
import (
"errors"
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/delete/hook"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewHookCmd creates the delete hook command
func NewHookCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
var all bool
var hookNames []string
cmd := &cobra.Command{
Use: "hook",
Short: "Delete one or more migration hooks",
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate --all and --name are mutually exclusive
if all && len(hookNames) > 0 {
return errors.New("cannot use --name with --all")
}
if !all && len(hookNames) == 0 {
return errors.New("either --name or --all is required")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
if all {
// Get all hook names from the namespace
var err error
hookNames, err = client.GetAllHookNames(cmd.Context(), kubeConfigFlags, namespace)
if err != nil {
return fmt.Errorf("failed to get all hook names: %v", err)
}
if len(hookNames) == 0 {
fmt.Printf("No hooks found in namespace %s\n", namespace)
return nil
}
}
// Loop over each hook name and delete it
for _, name := range hookNames {
err := hook.Delete(kubeConfigFlags, name, namespace)
if err != nil {
return err
}
}
return nil
},
}
cmd.Flags().BoolVar(&all, "all", false, "Delete all migration hooks in the namespace")
cmd.Flags().StringSliceVarP(&hookNames, "name", "M", nil, "Hook name(s) to delete (comma-separated, e.g. \"hook1,hook2\")")
cmd.Flags().StringSliceVar(&hookNames, "names", nil, "Alias for --name")
_ = cmd.Flags().MarkHidden("names")
_ = cmd.RegisterFlagCompletionFunc("name", completion.HookResourceNameCompletion(kubeConfigFlags))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/delete/host.go | Go | package delete
import (
"errors"
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/delete/host"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewHostCmd creates the delete host command
func NewHostCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
var all bool
var hostNames []string
cmd := &cobra.Command{
Use: "host",
Short: "Delete one or more migration hosts",
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate --all and --name are mutually exclusive
if all && len(hostNames) > 0 {
return errors.New("cannot use --name with --all")
}
if !all && len(hostNames) == 0 {
return errors.New("either --name or --all is required")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
if all {
// Get all host names from the namespace
var err error
hostNames, err = client.GetAllHostNames(cmd.Context(), kubeConfigFlags, namespace)
if err != nil {
return fmt.Errorf("failed to get all host names: %v", err)
}
if len(hostNames) == 0 {
fmt.Printf("No hosts found in namespace %s\n", namespace)
return nil
}
}
// Loop over each host name and delete it
for _, name := range hostNames {
err := host.Delete(kubeConfigFlags, name, namespace)
if err != nil {
return err
}
}
return nil
},
}
cmd.Flags().BoolVar(&all, "all", false, "Delete all migration hosts in the namespace")
cmd.Flags().StringSliceVarP(&hostNames, "name", "M", nil, "Host name(s) to delete (comma-separated, e.g. \"host1,host2\")")
cmd.Flags().StringSliceVar(&hostNames, "names", nil, "Alias for --name")
_ = cmd.Flags().MarkHidden("names")
_ = cmd.RegisterFlagCompletionFunc("name", completion.HostResourceNameCompletion(kubeConfigFlags))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/delete/mapping.go | Go | package delete
import (
"errors"
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/delete/mapping"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewMappingCmd creates the mapping deletion command with subcommands
func NewMappingCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
cmd := &cobra.Command{
Use: "mapping",
Short: "Delete mappings",
Long: `Delete network and storage mappings.
Mappings define how source resources translate to target resources. Use
'mapping network' or 'mapping storage' to delete specific mapping types.`,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// If no subcommand is specified, show help
return cmd.Help()
},
}
// Add subcommands for network and storage
cmd.AddCommand(newDeleteNetworkMappingCmd(kubeConfigFlags))
cmd.AddCommand(newDeleteStorageMappingCmd(kubeConfigFlags))
return cmd
}
// newDeleteNetworkMappingCmd creates the delete network mapping subcommand
func newDeleteNetworkMappingCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
var all bool
var mappingNames []string
cmd := &cobra.Command{
Use: "network",
Short: "Delete one or more network mappings",
Long: `Delete one or more network mappings.
Ensure no migration plans reference the mapping before deletion.`,
Example: ` # Delete a network mapping
kubectl-mtv delete mapping network --name my-net-map
# Delete multiple network mappings
kubectl-mtv delete mappings network --name map1,map2,map3
# Delete all network mappings
kubectl-mtv delete mappings network --all`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate --all and --name are mutually exclusive
if all && len(mappingNames) > 0 {
return errors.New("cannot use --name with --all")
}
if !all && len(mappingNames) == 0 {
return errors.New("either --name or --all is required")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
if all {
// Get all network mapping names from the namespace
var err error
mappingNames, err = client.GetAllNetworkMappingNames(cmd.Context(), kubeConfigFlags, namespace)
if err != nil {
return fmt.Errorf("failed to get all network mapping names: %v", err)
}
if len(mappingNames) == 0 {
fmt.Printf("No network mappings found in namespace %s\n", namespace)
return nil
}
}
// Loop over each mapping name and delete it
for _, name := range mappingNames {
err := mapping.Delete(kubeConfigFlags, name, namespace, "network")
if err != nil {
return err
}
}
return nil
},
}
cmd.Flags().BoolVar(&all, "all", false, "Delete all network mappings in the namespace")
cmd.Flags().StringSliceVarP(&mappingNames, "name", "M", nil, "Network mapping name(s) to delete (comma-separated, e.g. \"map1,map2\")")
cmd.Flags().StringSliceVar(&mappingNames, "names", nil, "Alias for --name")
_ = cmd.Flags().MarkHidden("names")
_ = cmd.RegisterFlagCompletionFunc("name", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return completion.MappingNameCompletion(kubeConfigFlags, "network")(cmd, args, toComplete)
})
return cmd
}
// newDeleteStorageMappingCmd creates the delete storage mapping subcommand
func newDeleteStorageMappingCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
var all bool
var mappingNames []string
cmd := &cobra.Command{
Use: "storage",
Short: "Delete one or more storage mappings",
Long: `Delete one or more storage mappings.
Ensure no migration plans reference the mapping before deletion.`,
Example: ` # Delete a storage mapping
kubectl-mtv delete mapping storage --name my-storage-map
# Delete multiple storage mappings
kubectl-mtv delete mappings storage --name map1,map2,map3
# Delete all storage mappings
kubectl-mtv delete mappings storage --all`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate --all and --name are mutually exclusive
if all && len(mappingNames) > 0 {
return errors.New("cannot use --name with --all")
}
if !all && len(mappingNames) == 0 {
return errors.New("either --name or --all is required")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
if all {
// Get all storage mapping names from the namespace
var err error
mappingNames, err = client.GetAllStorageMappingNames(cmd.Context(), kubeConfigFlags, namespace)
if err != nil {
return fmt.Errorf("failed to get all storage mapping names: %v", err)
}
if len(mappingNames) == 0 {
fmt.Printf("No storage mappings found in namespace %s\n", namespace)
return nil
}
}
// Loop over each mapping name and delete it
for _, name := range mappingNames {
err := mapping.Delete(kubeConfigFlags, name, namespace, "storage")
if err != nil {
return err
}
}
return nil
},
}
cmd.Flags().BoolVar(&all, "all", false, "Delete all storage mappings in the namespace")
cmd.Flags().StringSliceVarP(&mappingNames, "name", "M", nil, "Storage mapping name(s) to delete (comma-separated, e.g. \"map1,map2\")")
cmd.Flags().StringSliceVar(&mappingNames, "names", nil, "Alias for --name")
_ = cmd.Flags().MarkHidden("names")
_ = cmd.RegisterFlagCompletionFunc("name", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return completion.MappingNameCompletion(kubeConfigFlags, "storage")(cmd, args, toComplete)
})
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/delete/plan.go | Go | package delete
import (
"errors"
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/delete/plan"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewPlanCmd creates the plan deletion command
func NewPlanCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
var all bool
var skipArchive bool
var cleanAll bool
var planNames []string
cmd := &cobra.Command{
Use: "plan",
Short: "Delete one or more migration plans",
Long: `Delete one or more migration plans.
By default, plans are archived before deletion to preserve history. Use
--skip-archive to delete immediately without archiving. Use --clean-all
to also clean up any target VMs created from failed migrations.`,
Example: ` # Delete a plan (archives first)
kubectl-mtv delete plan --name my-migration
# Delete immediately without archiving
kubectl-mtv delete plan --name my-migration --skip-archive
# Delete plan and clean up failed migration VMs
kubectl-mtv delete plan --name my-migration --clean-all
# Delete multiple plans
kubectl-mtv delete plans --name plan1,plan2,plan3
# Delete all plans in namespace
kubectl-mtv delete plans --all`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate --all and --name are mutually exclusive
if all && len(planNames) > 0 {
return errors.New("cannot use --name with --all")
}
if !all && len(planNames) == 0 {
return errors.New("either --name or --all is required")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
if all {
// Get all plan names from the namespace
var err error
planNames, err = client.GetAllPlanNames(cmd.Context(), kubeConfigFlags, namespace)
if err != nil {
return fmt.Errorf("failed to get all plan names: %v", err)
}
if len(planNames) == 0 {
fmt.Printf("No plans found in namespace %s\n", namespace)
return nil
}
}
// Loop over each plan name and delete it
for _, name := range planNames {
err := plan.Delete(cmd.Context(), kubeConfigFlags, name, namespace, skipArchive, cleanAll)
if err != nil {
return err
}
}
return nil
},
}
cmd.Flags().BoolVar(&all, "all", false, "Delete all migration plans in the namespace")
cmd.Flags().StringSliceVarP(&planNames, "name", "M", nil, "Plan name(s) to delete (comma-separated, e.g. \"plan1,plan2\")")
cmd.Flags().StringSliceVar(&planNames, "names", nil, "Alias for --name")
_ = cmd.Flags().MarkHidden("names")
cmd.Flags().BoolVar(&skipArchive, "skip-archive", false, "Skip archiving and delete the plan immediately")
cmd.Flags().BoolVar(&cleanAll, "clean-all", false, "Archive, delete VMs on failed migration, then delete")
_ = cmd.RegisterFlagCompletionFunc("name", completion.PlanNameCompletion(kubeConfigFlags))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/delete/provider.go | Go | package delete
import (
"errors"
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/delete/provider"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewProviderCmd creates the provider deletion command
func NewProviderCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
var all bool
var providerNames []string
cmd := &cobra.Command{
Use: "provider",
Short: "Delete one or more providers",
Long: `Delete one or more MTV providers.
Deleting a provider removes its connection to the source or target environment.
Ensure no migration plans reference the provider before deletion.`,
Example: ` # Delete a provider
kubectl-mtv delete provider --name vsphere-prod
# Delete multiple providers
kubectl-mtv delete providers --name provider1,provider2
# Delete all providers in namespace
kubectl-mtv delete providers --all`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate --all and --name are mutually exclusive
if all && len(providerNames) > 0 {
return errors.New("cannot use --name with --all")
}
if !all && len(providerNames) == 0 {
return errors.New("either --name or --all is required")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
if all {
// Get all provider names from the namespace
var err error
providerNames, err = client.GetAllProviderNames(cmd.Context(), kubeConfigFlags, namespace)
if err != nil {
return fmt.Errorf("failed to get all provider names: %v", err)
}
if len(providerNames) == 0 {
fmt.Printf("No providers found in namespace %s\n", namespace)
return nil
}
}
// Loop over each provider name and delete it
for _, name := range providerNames {
err := provider.Delete(kubeConfigFlags, name, namespace)
if err != nil {
return err
}
}
return nil
},
}
cmd.Flags().BoolVar(&all, "all", false, "Delete all providers in the namespace")
cmd.Flags().StringSliceVarP(&providerNames, "name", "M", nil, "Provider name(s) to delete (comma-separated, e.g. \"prov1,prov2\")")
cmd.Flags().StringSliceVar(&providerNames, "names", nil, "Alias for --name")
_ = cmd.Flags().MarkHidden("names")
_ = cmd.RegisterFlagCompletionFunc("name", completion.ProviderNameCompletion(kubeConfigFlags))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/describe/describe.go | Go | package describe
import (
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/cmd/get"
)
// NewDescribeCmd creates the describe command with all its subcommands
func NewDescribeCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig get.GlobalConfigGetter) *cobra.Command {
cmd := &cobra.Command{
Use: "describe",
Short: "Describe resources",
Long: `Describe migration plans and VMs in migration plans`,
SilenceUsage: true,
}
planCmd := NewPlanCmd(kubeConfigFlags, globalConfig)
planCmd.Aliases = []string{"plans"}
cmd.AddCommand(planCmd)
hostCmd := NewHostCmd(kubeConfigFlags, globalConfig)
hostCmd.Aliases = []string{"hosts"}
cmd.AddCommand(hostCmd)
hookCmd := NewHookCmd(kubeConfigFlags, globalConfig)
hookCmd.Aliases = []string{"hooks"}
cmd.AddCommand(hookCmd)
mappingCmd := NewMappingCmd(globalConfig)
mappingCmd.Aliases = []string{"mappings"}
cmd.AddCommand(mappingCmd)
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/describe/hook.go | Go | package describe
import (
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/cmd/get"
"github.com/yaacov/kubectl-mtv/pkg/cmd/describe/hook"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewHookCmd creates the hook description command
func NewHookCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig get.GlobalConfigGetter) *cobra.Command {
var name string
cmd := &cobra.Command{
Use: "hook",
Short: "Describe a migration hook",
Long: `Display detailed information about a migration hook.
Shows hook configuration including container image, playbook content,
service account, deadline, and status conditions.`,
Example: ` # Describe a hook
kubectl-mtv describe hook --name my-post-hook`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate required --name flag
if name == "" {
return fmt.Errorf("--name is required")
}
// Get the global configuration
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(globalConfig.GetKubeConfigFlags())
return hook.Describe(globalConfig.GetKubeConfigFlags(), name, namespace, globalConfig.GetUseUTC())
},
}
cmd.Flags().StringVarP(&name, "name", "M", "", "Hook name")
_ = cmd.MarkFlagRequired("name")
_ = cmd.RegisterFlagCompletionFunc("name", completion.HookResourceNameCompletion(kubeConfigFlags))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/describe/host.go | Go | package describe
import (
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/cmd/get"
"github.com/yaacov/kubectl-mtv/pkg/cmd/describe/host"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewHostCmd creates the host description command
func NewHostCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig get.GlobalConfigGetter) *cobra.Command {
var name string
cmd := &cobra.Command{
Use: "host",
Short: "Describe a migration host",
Long: `Display detailed information about a migration host.
Shows host configuration, IP address, provider reference, and status conditions.`,
Example: ` # Describe a host
kubectl-mtv describe host --name esxi-host-1`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate required --name flag
if name == "" {
return fmt.Errorf("--name is required")
}
// Get the global configuration
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(globalConfig.GetKubeConfigFlags())
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return host.Describe(cmd.Context(), globalConfig.GetKubeConfigFlags(), name, namespace, globalConfig.GetUseUTC(), inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&name, "name", "M", "", "Host name")
_ = cmd.MarkFlagRequired("name")
_ = cmd.RegisterFlagCompletionFunc("name", completion.HostResourceNameCompletion(kubeConfigFlags))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/describe/mapping.go | Go | package describe
import (
"fmt"
"github.com/spf13/cobra"
"github.com/yaacov/kubectl-mtv/cmd/get"
"github.com/yaacov/kubectl-mtv/pkg/cmd/describe/mapping"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewMappingCmd creates the mapping description command with subcommands
func NewMappingCmd(globalConfig get.GlobalConfigGetter) *cobra.Command {
cmd := &cobra.Command{
Use: "mapping",
Short: "Describe mappings",
Long: `Describe network and storage mappings.
Shows detailed configuration of mappings including source/target pairs,
provider references, and status conditions.`,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// If no subcommand is specified, show help
return cmd.Help()
},
}
// Add subcommands for network and storage
cmd.AddCommand(newDescribeNetworkMappingCmd(globalConfig))
cmd.AddCommand(newDescribeStorageMappingCmd(globalConfig))
return cmd
}
// newDescribeNetworkMappingCmd creates the describe network mapping subcommand
func newDescribeNetworkMappingCmd(globalConfig get.GlobalConfigGetter) *cobra.Command {
var name string
cmd := &cobra.Command{
Use: "network",
Short: "Describe a network mapping",
Long: `Display detailed information about a network mapping.
Shows the source and target network pairs, provider references, and status.`,
Example: ` # Describe a network mapping
kubectl-mtv describe mapping network --name my-net-map`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate required --name flag
if name == "" {
return fmt.Errorf("--name is required")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(globalConfig.GetKubeConfigFlags())
return mapping.Describe(globalConfig.GetKubeConfigFlags(), "network", name, namespace, globalConfig.GetUseUTC())
},
}
cmd.Flags().StringVarP(&name, "name", "M", "", "Network mapping name")
_ = cmd.MarkFlagRequired("name")
_ = cmd.RegisterFlagCompletionFunc("name", completion.MappingNameCompletion(globalConfig.GetKubeConfigFlags(), "network"))
return cmd
}
// newDescribeStorageMappingCmd creates the describe storage mapping subcommand
func newDescribeStorageMappingCmd(globalConfig get.GlobalConfigGetter) *cobra.Command {
var name string
cmd := &cobra.Command{
Use: "storage",
Short: "Describe a storage mapping",
Long: `Display detailed information about a storage mapping.
Shows the source and target storage pairs, volume modes, access modes, and status.`,
Example: ` # Describe a storage mapping
kubectl-mtv describe mapping storage --name my-storage-map`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate required --name flag
if name == "" {
return fmt.Errorf("--name is required")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(globalConfig.GetKubeConfigFlags())
return mapping.Describe(globalConfig.GetKubeConfigFlags(), "storage", name, namespace, globalConfig.GetUseUTC())
},
}
cmd.Flags().StringVarP(&name, "name", "M", "", "Storage mapping name")
_ = cmd.MarkFlagRequired("name")
_ = cmd.RegisterFlagCompletionFunc("name", completion.MappingNameCompletion(globalConfig.GetKubeConfigFlags(), "storage"))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/describe/plan.go | Go | package describe
import (
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/cmd/get"
plan "github.com/yaacov/kubectl-mtv/pkg/cmd/describe/plan"
vm "github.com/yaacov/kubectl-mtv/pkg/cmd/describe/vm"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewPlanCmd creates the plan description command
func NewPlanCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig get.GlobalConfigGetter) *cobra.Command {
var name string
var withVMs bool
var vmName string
var watch bool
cmd := &cobra.Command{
Use: "plan",
Short: "Describe a migration plan",
Long: `Display detailed information about a migration plan.
Shows plan configuration, status, conditions, and optionally the list of VMs.
Use --vm to see detailed status of a specific VM in the plan.`,
Example: ` # Describe a plan
kubectl-mtv describe plan --name my-migration
# Describe a plan including VM list
kubectl-mtv describe plan --name my-migration --with-vms
# Describe a specific VM in the plan
kubectl-mtv describe plan --name my-migration --vm web-server
# Watch VM status with live updates
kubectl-mtv describe plan --name my-migration --vm web-server --watch`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate required --name flag
if name == "" {
return fmt.Errorf("--name is required")
}
// Get the global configuration
// Validate that --with-vms and --vm are mutually exclusive
if withVMs && vmName != "" {
return fmt.Errorf("--with-vms and --vm flags are mutually exclusive")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(globalConfig.GetKubeConfigFlags())
// If --vm flag is provided, switch to VM description behavior
if vmName != "" {
return vm.DescribeVM(globalConfig.GetKubeConfigFlags(), name, namespace, vmName, watch, globalConfig.GetUseUTC())
}
// Default behavior: describe plan
return plan.Describe(globalConfig.GetKubeConfigFlags(), name, namespace, withVMs, globalConfig.GetUseUTC())
},
}
cmd.Flags().StringVarP(&name, "name", "M", "", "Plan name")
_ = cmd.MarkFlagRequired("name")
cmd.Flags().BoolVar(&withVMs, "with-vms", false, "Include list of VMs in the plan specification")
cmd.Flags().StringVar(&vmName, "vm", "", "VM name to describe (switches to VM description mode)")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch VM status with live updates (only when --vm is used)")
_ = cmd.RegisterFlagCompletionFunc("name", completion.PlanNameCompletion(kubeConfigFlags))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/get.go | Go | package get
import (
"github.com/spf13/cobra"
"github.com/yaacov/kubectl-mtv/pkg/util/config"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
)
// GlobalConfigGetter defines the interface for getting global configuration
// This is a type alias for the shared config.GlobalConfigGetter interface
// to maintain backward compatibility with existing code.
type GlobalConfigGetter = config.GlobalConfigGetter
// logInfof logs formatted informational messages at verbosity level 1
func logInfof(format string, args ...interface{}) {
klog.V(1).Infof(format, args...)
}
// logDebugf logs formatted debug messages at verbosity level 2
func logDebugf(format string, args ...interface{}) {
klog.V(2).Infof(format, args...)
}
// logNamespaceOperation logs namespace-specific operations with consistent formatting
func logNamespaceOperation(operation string, namespace string, allNamespaces bool) {
if allNamespaces {
logInfof("%s from all namespaces", operation)
} else {
logInfof("%s from namespace: %s", operation, namespace)
}
}
// logOutputFormat logs the output format being used
func logOutputFormat(format string) {
logDebugf("Output format: %s", format)
}
// NewGetCmd creates the get command with all its subcommands
func NewGetCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
cmd := &cobra.Command{
Use: "get",
Short: "Get resources",
Long: `Get various MTV resources including plans, providers, mappings, and inventory`,
SilenceUsage: true,
}
// Add plan subcommand with plural alias
planCmd := NewPlanCmd(kubeConfigFlags, globalConfig)
planCmd.Aliases = []string{"plans"}
cmd.AddCommand(planCmd)
// Add provider subcommand with plural alias
providerCmd := NewProviderCmd(kubeConfigFlags, globalConfig)
providerCmd.Aliases = []string{"providers"}
cmd.AddCommand(providerCmd)
// Add mapping subcommand with plural alias
mappingCmd := NewMappingCmd(globalConfig)
mappingCmd.Aliases = []string{"mappings"}
cmd.AddCommand(mappingCmd)
// Add host subcommand with plural alias
hostCmd := NewHostCmd(kubeConfigFlags, globalConfig)
hostCmd.Aliases = []string{"hosts"}
cmd.AddCommand(hostCmd)
// Add hook subcommand with plural alias
hookCmd := NewHookCmd(kubeConfigFlags, globalConfig)
hookCmd.Aliases = []string{"hooks"}
cmd.AddCommand(hookCmd)
// Add inventory subcommand
cmd.AddCommand(NewInventoryCmd(kubeConfigFlags, globalConfig))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/hook.go | Go | package get
import (
"context"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/hook"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewHookCmd creates the get hook command
func NewHookCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var watch bool
var hookName string
cmd := &cobra.Command{
Use: "hook",
Short: "Get hooks",
Long: `Get MTV hook resources from the cluster.
Hooks are custom scripts or Ansible playbooks that run at specific points during
VM migration (pre-migration or post-migration). They can be used to customize
the migration process, such as installing drivers or configuring the target VM.`,
Example: ` # List all hooks
kubectl-mtv get hooks
# Get a specific hook in JSON format
kubectl-mtv get hook --name my-post-hook --output json
# Watch hook status changes
kubectl-mtv get hooks --watch`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 30*time.Second)
defer cancel()
}
// Get namespace from global configuration
kubeConfigFlags := globalConfig.GetKubeConfigFlags()
allNamespaces := globalConfig.GetAllNamespaces()
namespace := client.ResolveNamespaceWithAllFlag(kubeConfigFlags, allNamespaces)
// Log the operation being performed
if hookName != "" {
logNamespaceOperation("Getting hook", namespace, allNamespaces)
} else {
logNamespaceOperation("Getting hooks", namespace, allNamespaces)
}
logOutputFormat(outputFormatFlag.GetValue())
return hook.List(ctx, kubeConfigFlags, namespace, watch, outputFormatFlag.GetValue(), hookName, globalConfig.GetUseUTC())
},
}
cmd.Flags().StringVarP(&hookName, "name", "M", "", "Hook name")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for name and output format flags
if err := cmd.RegisterFlagCompletionFunc("name", completion.HookResourceNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
// Add completion for output format flag
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/host.go | Go | package get
import (
"context"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/host"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewHostCmd creates the get host command
func NewHostCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var watch bool
var hostName string
cmd := &cobra.Command{
Use: "host",
Short: "Get hosts",
Long: `Get MTV host resources from the cluster.
Host resources represent ESXi hosts for vSphere migrations or hypervisor hosts
for oVirt migrations. They store host-specific credentials and configuration.`,
Example: ` # List all hosts
kubectl-mtv get hosts
# Get a specific host in YAML format
kubectl-mtv get host --name esxi-host-1 --output yaml
# Watch host status changes
kubectl-mtv get hosts --watch`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 30*time.Second)
defer cancel()
}
// Get namespace from global configuration
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
// Log the operation being performed
if hostName != "" {
logNamespaceOperation("Getting host", namespace, globalConfig.GetAllNamespaces())
} else {
logNamespaceOperation("Getting hosts", namespace, globalConfig.GetAllNamespaces())
}
logOutputFormat(outputFormatFlag.GetValue())
return host.List(ctx, globalConfig.GetKubeConfigFlags(), namespace, watch, outputFormatFlag.GetValue(), hostName, globalConfig.GetUseUTC())
},
}
cmd.Flags().StringVarP(&hostName, "name", "M", "", "Host name")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for name and output format flags
if err := cmd.RegisterFlagCompletionFunc("name", completion.HostResourceNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
// Add completion for output format flag
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/inventory.go | Go | package get
import (
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
// NewInventoryCmd creates the inventory command with all its subcommands
func NewInventoryCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
cmd := &cobra.Command{
Use: "inventory",
Short: "Get inventory resources",
Long: `Get inventory resources from providers via the MTV inventory service.
The inventory service provides access to source provider resources (VMs, networks,
storage, hosts, etc.) without directly connecting to the provider. Resources are
cached and can be queried using TSL (Tree Search Language) filters.
Available resource types vary by provider:
- All providers: vm, network, storage
- vSphere/oVirt: host, datacenter, cluster, disk
- vSphere: datastore, folder, resourcepool
- oVirt: diskprofile, nicprofile
- OpenStack: instance, image, flavor, project, volume, volumetype, snapshot, subnet
- OpenShift: namespace, pvc, datavolume
- EC2: ec2instance, ec2volume, ec2volumetype, ec2network`,
SilenceUsage: true,
}
// Add general inventory resources
hostCmd := NewInventoryHostCmd(kubeConfigFlags, globalConfig)
hostCmd.Aliases = []string{"hosts"}
cmd.AddCommand(hostCmd)
namespaceCmd := NewInventoryNamespaceCmd(kubeConfigFlags, globalConfig)
namespaceCmd.Aliases = []string{"namespaces"}
cmd.AddCommand(namespaceCmd)
networkCmd := NewInventoryNetworkCmd(kubeConfigFlags, globalConfig)
networkCmd.Aliases = []string{"networks"}
cmd.AddCommand(networkCmd)
storageCmd := NewInventoryStorageCmd(kubeConfigFlags, globalConfig)
storageCmd.Aliases = []string{"storages"}
cmd.AddCommand(storageCmd)
vmCmd := NewInventoryVMCmd(kubeConfigFlags, globalConfig)
vmCmd.Aliases = []string{"vms"}
cmd.AddCommand(vmCmd)
datacenterCmd := NewInventoryDataCenterCmd(kubeConfigFlags, globalConfig)
datacenterCmd.Aliases = []string{"datacenters"}
cmd.AddCommand(datacenterCmd)
clusterCmd := NewInventoryClusterCmd(kubeConfigFlags, globalConfig)
clusterCmd.Aliases = []string{"clusters"}
cmd.AddCommand(clusterCmd)
diskCmd := NewInventoryDiskCmd(kubeConfigFlags, globalConfig)
diskCmd.Aliases = []string{"disks"}
cmd.AddCommand(diskCmd)
// Add profile resources
diskProfileCmd := NewInventoryDiskProfileCmd(kubeConfigFlags, globalConfig)
diskProfileCmd.Aliases = []string{"diskprofiles", "disk-profiles"}
cmd.AddCommand(diskProfileCmd)
nicProfileCmd := NewInventoryNICProfileCmd(kubeConfigFlags, globalConfig)
nicProfileCmd.Aliases = []string{"nicprofiles", "nic-profiles"}
cmd.AddCommand(nicProfileCmd)
// Add OpenStack-specific resources
instanceCmd := NewInventoryInstanceCmd(kubeConfigFlags, globalConfig)
instanceCmd.Aliases = []string{"instances"}
cmd.AddCommand(instanceCmd)
imageCmd := NewInventoryImageCmd(kubeConfigFlags, globalConfig)
imageCmd.Aliases = []string{"images"}
cmd.AddCommand(imageCmd)
flavorCmd := NewInventoryFlavorCmd(kubeConfigFlags, globalConfig)
flavorCmd.Aliases = []string{"flavors"}
cmd.AddCommand(flavorCmd)
projectCmd := NewInventoryProjectCmd(kubeConfigFlags, globalConfig)
projectCmd.Aliases = []string{"projects"}
cmd.AddCommand(projectCmd)
volumeCmd := NewInventoryVolumeCmd(kubeConfigFlags, globalConfig)
volumeCmd.Aliases = []string{"volumes"}
cmd.AddCommand(volumeCmd)
volumeTypeCmd := NewInventoryVolumeTypeCmd(kubeConfigFlags, globalConfig)
volumeTypeCmd.Aliases = []string{"volumetypes", "volume-types"}
cmd.AddCommand(volumeTypeCmd)
snapshotCmd := NewInventorySnapshotCmd(kubeConfigFlags, globalConfig)
snapshotCmd.Aliases = []string{"snapshots"}
cmd.AddCommand(snapshotCmd)
subnetCmd := NewInventorySubnetCmd(kubeConfigFlags, globalConfig)
subnetCmd.Aliases = []string{"subnets"}
cmd.AddCommand(subnetCmd)
// Add vSphere-specific resources
datastoreCmd := NewInventoryDatastoreCmd(kubeConfigFlags, globalConfig)
datastoreCmd.Aliases = []string{"datastores"}
cmd.AddCommand(datastoreCmd)
resourcePoolCmd := NewInventoryResourcePoolCmd(kubeConfigFlags, globalConfig)
resourcePoolCmd.Aliases = []string{"resourcepools", "resource-pools"}
cmd.AddCommand(resourcePoolCmd)
folderCmd := NewInventoryFolderCmd(kubeConfigFlags, globalConfig)
folderCmd.Aliases = []string{"folders"}
cmd.AddCommand(folderCmd)
// Add Kubernetes-specific resources
pvcCmd := NewInventoryPVCCmd(kubeConfigFlags, globalConfig)
pvcCmd.Aliases = []string{"pvcs", "persistentvolumeclaims"}
cmd.AddCommand(pvcCmd)
dataVolumeCmd := NewInventoryDataVolumeCmd(kubeConfigFlags, globalConfig)
dataVolumeCmd.Aliases = []string{"datavolumes", "data-volumes"}
cmd.AddCommand(dataVolumeCmd)
// Add provider inventory
providerCmd := NewInventoryProviderCmd(kubeConfigFlags, globalConfig)
providerCmd.Aliases = []string{"providers"}
cmd.AddCommand(providerCmd)
// Add EC2-specific resources
ec2InstanceCmd := NewInventoryEC2InstanceCmd(kubeConfigFlags, globalConfig)
ec2InstanceCmd.Aliases = []string{"ec2-instances"}
cmd.AddCommand(ec2InstanceCmd)
ec2VolumeCmd := NewInventoryEC2VolumeCmd(kubeConfigFlags, globalConfig)
ec2VolumeCmd.Aliases = []string{"ec2-volumes"}
cmd.AddCommand(ec2VolumeCmd)
ec2VolumeTypeCmd := NewInventoryEC2VolumeTypeCmd(kubeConfigFlags, globalConfig)
ec2VolumeTypeCmd.Aliases = []string{"ec2-volumetypes", "ec2-volume-types"}
cmd.AddCommand(ec2VolumeTypeCmd)
ec2NetworkCmd := NewInventoryEC2NetworkCmd(kubeConfigFlags, globalConfig)
ec2NetworkCmd.Aliases = []string{"ec2-networks"}
cmd.AddCommand(ec2NetworkCmd)
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/inventory_all.go | Go | package get
import (
"context"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewInventoryNetworkCmd creates the get inventory network command
func NewInventoryNetworkCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "network",
Short: "Get networks from a provider",
Long: `Get networks from a provider's inventory.
Queries the MTV inventory service to list networks available in the source provider.
Use --query to filter results using TSL query syntax.`,
Example: ` # Filter networks by name
kubectl-mtv get inventory networks --provider vsphere-prod --query "where name ~= 'VM Network.*'"
# List all networks from a provider
kubectl-mtv get inventory networks --provider vsphere-prod
# Output as JSON
kubectl-mtv get inventory networks --provider vsphere-prod --output json`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting networks from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListNetworksWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'VM Network.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryStorageCmd creates the get inventory storage command
func NewInventoryStorageCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "storage",
Short: "Get storage from a provider",
Long: `Get storage resources from a provider's inventory.
Queries the MTV inventory service to list storage domains (oVirt), datastores (vSphere),
or storage classes (OpenShift) available in the source provider.`,
Example: ` # Filter storage by name pattern
kubectl-mtv get inventory storages --provider ovirt-prod --query "where name ~= 'data.*'"
# List all storage from a provider
kubectl-mtv get inventory storages --provider vsphere-prod
# Output as YAML
kubectl-mtv get inventory storages --provider vsphere-prod --output yaml`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting storage from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListStorageWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'data.*' and type = 'VMFS'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryVMCmd creates the get inventory vm command
func NewInventoryVMCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewVMInventoryOutputTypeFlag()
var extendedOutput bool
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "vm",
Short: "Get VMs from a provider",
Long: `Get virtual machines from a provider's inventory.
Queries the MTV inventory service to list VMs available for migration.
Use --query to filter results using TSL query syntax. The --extended
flag shows additional VM details.
Output format 'planvms' generates YAML suitable for use with 'create plan --vms @file'.
Query Language (TSL):
Use --query "where ..." to filter inventory results with TSL query syntax:
--query "where name ~= 'prod-.*'"
--query "where powerState = 'poweredOn' and memoryMB > 4096"
--query "where len(disks) > 1 and cpuCount <= 8"
--query "where any(concerns[*].category = 'Critical')"
--query "where name like '%web%' order by memoryMB desc limit 10"
Supports comparison, regex, logical operators, array functions (len(), any(), all()),
SI units (Ki, Mi, Gi), sorting (ORDER BY), and limiting (LIMIT).
To discover available fields for your provider, run:
kubectl-mtv get inventory vm --provider <provider> --output json
Run 'kubectl-mtv help tsl' for the full syntax reference and field list.`,
Example: ` # Find VMs with multiple NICs (array length)
kubectl-mtv get inventory vms --provider vsphere-prod --query "where len(nics) >= 2 and cpuCount > 1"
# Find VMs with shared disks (any element match)
kubectl-mtv get inventory vms --provider vsphere-prod --query "where any(disks[*].shared = true)"
# Find VMs with critical migration concerns
kubectl-mtv get inventory vms --provider vsphere-prod --query "where any(concerns[*].category = 'Critical')"
# Filter VMs by name, CPU, and memory
kubectl-mtv get inventory vms --provider vsphere-prod --query "where name ~= 'web-.*' and memoryMB > 4096"
# List all VMs from a provider
kubectl-mtv get inventory vms --provider vsphere-prod
# Show extended VM details
kubectl-mtv get inventory vms --provider vsphere-prod --extended
# Export VMs for plan creation
kubectl-mtv get inventory vms --provider vsphere-prod --query "where name ~= 'prod-.*'" --output planvms > vms.yaml
kubectl-mtv create plan --name my-migration --vms @vms.yaml`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting VMs from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListVMsWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), extendedOutput, query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml, planvms)")
cmd.Flags().BoolVar(&extendedOutput, "extended", false, "Show extended output")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'web-.*' and cpuCount > 4\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
// Custom completion for inventory VM output format that includes planvms
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/inventory_ec2.go | Go | package get
import (
"context"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
const defaultInventoryTimeout = 280 * time.Second
// ec2CommandConfig holds the configuration for creating an EC2 inventory command
type ec2CommandConfig struct {
use string
short string
long string
logMessage string
listFunc func(ctx context.Context, flags *genericclioptions.ConfigFlags, provider, namespace, inventoryURL, outputFormat, query string, watch, insecure bool) error
}
// newEC2InventoryCmd creates a new EC2 inventory command with the given configuration
func newEC2InventoryCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter, cfg ec2CommandConfig) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: cfg.use,
Short: cfg.short,
Long: cfg.long,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, defaultInventoryTimeout)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation(cfg.logMessage, namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return cfg.listFunc(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
klog.V(2).Infof("Failed to register provider flag completion: %v", err)
}
// Add completion for output format flag
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
klog.V(2).Infof("Failed to register output flag completion: %v", err)
}
return cmd
}
// NewInventoryEC2InstanceCmd creates the get inventory instance command for EC2
func NewInventoryEC2InstanceCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
return newEC2InventoryCmd(kubeConfigFlags, globalConfig, ec2CommandConfig{
use: "ec2-instance",
short: "Get EC2 instances from a provider",
long: `Get EC2 instances from an AWS provider's inventory.`,
logMessage: "Getting EC2 instances from provider",
listFunc: inventory.ListEC2InstancesWithInsecure,
})
}
// NewInventoryEC2VolumeCmd creates the get inventory volume command for EC2 EBS volumes
func NewInventoryEC2VolumeCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
return newEC2InventoryCmd(kubeConfigFlags, globalConfig, ec2CommandConfig{
use: "ec2-volume",
short: "Get EC2 EBS volumes from a provider",
long: `Get EC2 EBS volumes (disks) from an AWS provider's inventory.`,
logMessage: "Getting EC2 EBS volumes from provider",
listFunc: inventory.ListEC2VolumesWithInsecure,
})
}
// NewInventoryEC2VolumeTypeCmd creates the get inventory volume-type command for EC2 storage classes
func NewInventoryEC2VolumeTypeCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
return newEC2InventoryCmd(kubeConfigFlags, globalConfig, ec2CommandConfig{
use: "ec2-volume-type",
short: "Get EC2 EBS volume types from a provider",
long: `Get EC2 EBS volume types (storage classes like gp3, io2, etc.) from an AWS provider's inventory.`,
logMessage: "Getting EC2 volume types from provider",
listFunc: inventory.ListEC2VolumeTypesWithInsecure,
})
}
// NewInventoryEC2NetworkCmd creates the get inventory network command for EC2 (VPCs and Subnets)
func NewInventoryEC2NetworkCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
return newEC2InventoryCmd(kubeConfigFlags, globalConfig, ec2CommandConfig{
use: "ec2-network",
short: "Get EC2 networks (VPCs and Subnets) from a provider",
long: `Get EC2 networks (VPCs and Subnets) from an AWS provider's inventory.`,
logMessage: "Getting EC2 networks from provider",
listFunc: inventory.ListEC2NetworksWithInsecure,
})
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/inventory_openshift.go | Go | package get
import (
"context"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewInventoryNamespaceCmd creates the get inventory namespace command
func NewInventoryNamespaceCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "namespace",
Short: "Get namespaces from a provider",
Long: `Get namespaces from an OpenShift provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting namespaces from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListNamespacesWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryPVCCmd creates the get inventory pvc command
func NewInventoryPVCCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "pvc",
Short: "Get PVCs from a provider",
Long: `Get PersistentVolumeClaims from an OpenShift provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting PVCs from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListPersistentVolumeClaimsWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryDataVolumeCmd creates the get inventory data-volume command
func NewInventoryDataVolumeCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "data-volume",
Short: "Get data volumes from a provider",
Long: `Get DataVolumes from an OpenShift provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting data volumes from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListDataVolumesWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/inventory_openstack.go | Go | package get
import (
"context"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewInventoryInstanceCmd creates the get inventory instance command
func NewInventoryInstanceCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "instance",
Short: "Get instances from a provider",
Long: `Get Nova instances from an OpenStack provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting instances from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListInstancesWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryImageCmd creates the get inventory image command
func NewInventoryImageCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "image",
Short: "Get images from a provider",
Long: `Get Glance images from an OpenStack provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting images from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListImagesWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for output format flag
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryFlavorCmd creates the get inventory flavor command
func NewInventoryFlavorCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "flavor",
Short: "Get flavors from a provider",
Long: `Get instance flavors from an OpenStack provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting flavors from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListFlavorsWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryProjectCmd creates the get inventory project command
func NewInventoryProjectCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "project",
Short: "Get projects from a provider",
Long: `Get Keystone projects from an OpenStack provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting projects from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListProjectsWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryVolumeCmd creates the get inventory volume command
func NewInventoryVolumeCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "volume",
Short: "Get volumes from a provider",
Long: `Get Cinder volumes from an OpenStack provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting volumes from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListVolumesWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryVolumeTypeCmd creates the get inventory volumetype command
func NewInventoryVolumeTypeCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "volumetype",
Short: "Get volume types from a provider",
Long: `Get Cinder volume types from an OpenStack provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting volume types from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListVolumeTypesWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventorySnapshotCmd creates the get inventory snapshot command
func NewInventorySnapshotCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "snapshot",
Short: "Get snapshots from a provider",
Long: `Get volume snapshots from an OpenStack provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting snapshots from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListSnapshotsWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventorySubnetCmd creates the get inventory subnet command
func NewInventorySubnetCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "subnet",
Short: "Get subnets from a provider",
Long: `Get network subnets from an OpenStack provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting subnets from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListSubnetsWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/inventory_ovirt.go | Go | package get
import (
"context"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewInventoryDiskProfileCmd creates the get inventory disk-profile command
func NewInventoryDiskProfileCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "disk-profile",
Short: "Get disk profiles from a provider",
Long: `Get disk profiles from an oVirt provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
cfg := globalConfig.GetKubeConfigFlags()
allNamespaces := globalConfig.GetAllNamespaces()
namespace := client.ResolveNamespaceWithAllFlag(cfg, allNamespaces)
logNamespaceOperation("Getting disk profiles from provider", namespace, allNamespaces)
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListDiskProfilesWithInsecure(ctx, cfg, provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryNICProfileCmd creates the get inventory nic-profile command
func NewInventoryNICProfileCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "nic-profile",
Short: "Get NIC profiles from a provider",
Long: `Get vNIC profiles from an oVirt provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
cfg := globalConfig.GetKubeConfigFlags()
allNamespaces := globalConfig.GetAllNamespaces()
namespace := client.ResolveNamespaceWithAllFlag(cfg, allNamespaces)
logNamespaceOperation("Getting NIC profiles from provider", namespace, allNamespaces)
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListNICProfilesWithInsecure(ctx, cfg, provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/inventory_ovirt_vsphere.go | Go | package get
import (
"context"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewInventoryHostCmd creates the get inventory host command
func NewInventoryHostCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "host",
Short: "Get hosts from a provider",
Long: `Get hypervisor hosts from a provider's inventory.
Lists ESXi hosts (vSphere) or hypervisor hosts (oVirt) from the source provider.
Host information is useful for planning migrations and understanding the source environment.`,
Example: ` # Filter hosts by cluster
kubectl-mtv get inventory hosts --provider vsphere-prod --query "where cluster = 'production'"
# List all hosts from a provider
kubectl-mtv get inventory hosts --provider vsphere-prod
# Output as JSON
kubectl-mtv get inventory hosts --provider vsphere-prod --output json`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting hosts from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListHostsWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryDataCenterCmd creates the get inventory datacenter command
func NewInventoryDataCenterCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "datacenter",
Short: "Get datacenters from a provider",
Long: `Get datacenters from a provider's inventory.
Lists datacenters from vSphere or oVirt providers. Datacenters are the top-level
organizational units that contain clusters, hosts, and VMs.`,
Example: ` # Filter datacenters by name
kubectl-mtv get inventory datacenters --provider vsphere-prod --query "where name ~= 'DC.*'"
# List all datacenters from a provider
kubectl-mtv get inventory datacenters --provider vsphere-prod
# Output as YAML
kubectl-mtv get inventory datacenters --provider vsphere-prod --output yaml`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting datacenters from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListDataCentersWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryClusterCmd creates the get inventory cluster command
func NewInventoryClusterCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "cluster",
Short: "Get clusters from a provider",
Long: `Get clusters from a provider's inventory.
Lists compute clusters from vSphere or oVirt providers. Clusters group hosts
together and define resource pools for VMs.`,
Example: ` # Filter clusters by datacenter
kubectl-mtv get inventory clusters --provider vsphere-prod --query "where datacenter = 'DC1'"
# List all clusters from a provider
kubectl-mtv get inventory clusters --provider vsphere-prod
# Output as JSON
kubectl-mtv get inventory clusters --provider vsphere-prod --output json`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting clusters from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListClustersWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryDiskCmd creates the get inventory disk command
func NewInventoryDiskCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "disk",
Short: "Get disks from a provider",
Long: `Get disks from a provider's inventory.
Lists virtual disks from vSphere or oVirt providers. Disk information includes
size, storage location, and attachment to VMs.`,
Example: ` # Filter disks by size using SI units (greater than 100GB)
kubectl-mtv get inventory disks --provider vsphere-prod --query "where capacity > 100Gi"
# List all disks from a provider
kubectl-mtv get inventory disks --provider ovirt-prod
# Output as JSON
kubectl-mtv get inventory disks --provider vsphere-prod --output json`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting disks from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListDisksWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/inventory_provider.go | Go | package get
import (
"context"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewInventoryProviderCmd creates the get inventory provider command
func NewInventoryProviderCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var providerName string
cmd := &cobra.Command{
Use: "provider",
Short: "Get inventory information from providers",
Long: `Get inventory information from providers including resource counts and provider status`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
if providerName != "" {
logNamespaceOperation("Getting inventory from provider", namespace, globalConfig.GetAllNamespaces())
} else {
logNamespaceOperation("Getting inventory from all providers", namespace, globalConfig.GetAllNamespaces())
}
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListProvidersWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), providerName, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&providerName, "name", "M", "", "Provider name")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for name and output format flags
if err := cmd.RegisterFlagCompletionFunc("name", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
// Add completion for output format flag
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/inventory_vsphere.go | Go | package get
import (
"context"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/inventory"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewInventoryDatastoreCmd creates the get inventory datastore command
func NewInventoryDatastoreCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "datastore",
Short: "Get datastores from a provider",
Long: `Get datastores from a vSphere provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting datastores from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListDatastoresWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryResourcePoolCmd creates the get inventory resource-pool command
func NewInventoryResourcePoolCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "resource-pool",
Short: "Get resource pools from a provider",
Long: `Get resource pools from a vSphere provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting resource pools from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListResourcePoolsWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for output format flag
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// NewInventoryFolderCmd creates the get inventory folder command
func NewInventoryFolderCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var query string
var watch bool
var provider string
cmd := &cobra.Command{
Use: "folder",
Short: "Get folders from a provider",
Long: `Get folders from a vSphere provider's inventory.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 280*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
logNamespaceOperation("Getting folders from provider", namespace, globalConfig.GetAllNamespaces())
logOutputFormat(outputFormatFlag.GetValue())
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return inventory.ListFoldersWithInsecure(ctx, globalConfig.GetKubeConfigFlags(), provider, namespace, inventoryURL, outputFormatFlag.GetValue(), query, watch, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&provider, "provider", "p", "", "Provider name")
_ = cmd.MarkFlagRequired("provider")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().StringVarP(&query, "query", "q", "", "Query filter using TSL syntax (e.g. \"where name ~= 'prod-.*'\")")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for provider and output format flags
if err := cmd.RegisterFlagCompletionFunc("provider", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/mapping.go | Go | package get
import (
"context"
"time"
"github.com/spf13/cobra"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/mapping"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewMappingCmd creates the get mapping command with subcommands
func NewMappingCmd(globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var watchFlag bool
var mappingName string
cmd := &cobra.Command{
Use: "mapping",
Short: "Get mappings",
Long: `Get network and storage mappings.
When called without a subcommand, lists both network and storage mappings.
Use 'mapping network' or 'mapping storage' subcommands to view a specific
mapping type.`,
Example: ` # List all mappings (both network and storage)
kubectl-mtv get mappings
# Get a specific mapping by name (searches both types)
kubectl-mtv get mapping --name my-mapping --output yaml
# Watch all mapping changes
kubectl-mtv get mappings --watch
# List only network mappings
kubectl-mtv get mapping network
# List only storage mappings
kubectl-mtv get mapping storage`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watchFlag {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 30*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
// Log the operation being performed
if mappingName != "" {
logNamespaceOperation("Getting mapping", namespace, globalConfig.GetAllNamespaces())
} else {
logNamespaceOperation("Getting all mappings", namespace, globalConfig.GetAllNamespaces())
}
logOutputFormat(outputFormatFlag.GetValue())
return mapping.List(ctx, globalConfig.GetKubeConfigFlags(), "all", namespace, watchFlag, outputFormatFlag.GetValue(), mappingName, globalConfig.GetUseUTC())
},
}
cmd.Flags().StringVarP(&mappingName, "name", "M", "", "Mapping name")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().BoolVarP(&watchFlag, "watch", "w", false, "Watch for changes")
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add subcommands for network and storage
cmd.AddCommand(newGetNetworkMappingCmd(globalConfig))
cmd.AddCommand(newGetStorageMappingCmd(globalConfig))
return cmd
}
// newGetNetworkMappingCmd creates the get network mapping subcommand
func newGetNetworkMappingCmd(globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var watch bool
var mappingName string
cmd := &cobra.Command{
Use: "network",
Short: "Get network mappings",
Long: `Get network mappings that define how source provider networks map to target OpenShift networks.
Network mappings translate source VM network connections to target network attachment
definitions (NADs) or pod networking.`,
Example: ` # List all network mappings
kubectl-mtv get mapping network
# Get a specific network mapping in YAML
kubectl-mtv get mapping network --name my-network-map --output yaml
# Watch network mapping changes
kubectl-mtv get mapping network --watch`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 30*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
// Log the operation being performed
if mappingName != "" {
logNamespaceOperation("Getting network mapping", namespace, globalConfig.GetAllNamespaces())
} else {
logNamespaceOperation("Getting network mappings", namespace, globalConfig.GetAllNamespaces())
}
logOutputFormat(outputFormatFlag.GetValue())
return mapping.List(ctx, globalConfig.GetKubeConfigFlags(), "network", namespace, watch, outputFormatFlag.GetValue(), mappingName, globalConfig.GetUseUTC())
},
}
cmd.Flags().StringVarP(&mappingName, "name", "M", "", "Mapping name")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for name and output format flags
if err := cmd.RegisterFlagCompletionFunc("name", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return completion.MappingNameCompletion(globalConfig.GetKubeConfigFlags(), "network")(cmd, args, toComplete)
}); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
// newGetStorageMappingCmd creates the get storage mapping subcommand
func newGetStorageMappingCmd(globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var watch bool
var mappingName string
cmd := &cobra.Command{
Use: "storage",
Short: "Get storage mappings",
Long: `Get storage mappings that define how source provider storage maps to target OpenShift storage classes.
Storage mappings translate source VM datastores/storage domains to target Kubernetes
storage classes with optional volume mode and access mode settings.`,
Example: ` # List all storage mappings
kubectl-mtv get mapping storage
# Get a specific storage mapping in JSON
kubectl-mtv get mapping storage --name my-storage-map --output json
# Watch storage mapping changes
kubectl-mtv get mapping storage --watch`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 30*time.Second)
defer cancel()
}
namespace := client.ResolveNamespaceWithAllFlag(globalConfig.GetKubeConfigFlags(), globalConfig.GetAllNamespaces())
// Log the operation being performed
if mappingName != "" {
logNamespaceOperation("Getting storage mapping", namespace, globalConfig.GetAllNamespaces())
} else {
logNamespaceOperation("Getting storage mappings", namespace, globalConfig.GetAllNamespaces())
}
logOutputFormat(outputFormatFlag.GetValue())
return mapping.List(ctx, globalConfig.GetKubeConfigFlags(), "storage", namespace, watch, outputFormatFlag.GetValue(), mappingName, globalConfig.GetUseUTC())
},
}
cmd.Flags().StringVarP(&mappingName, "name", "M", "", "Mapping name")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for name and output format flags
if err := cmd.RegisterFlagCompletionFunc("name", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return completion.MappingNameCompletion(globalConfig.GetKubeConfigFlags(), "storage")(cmd, args, toComplete)
}); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/plan.go | Go | package get
import (
"context"
"fmt"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/plan"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewPlanCmd creates the get plan command
func NewPlanCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var watch bool
var vms bool
var disk bool
var planName string
cmd := &cobra.Command{
Use: "plan",
Short: "Get migration plans",
Long: `Get migration plans from the cluster.
Lists all plans in the namespace, or retrieves details for a specific plan.
Use --vms to see the migration status of individual VMs within a plan.
Use --disk to see the disk transfer status with individual disk details.
Use both --vms and --disk together to see VMs with their disk details.`,
Example: ` # List all plans in current namespace
kubectl-mtv get plans
# List plans across all namespaces
kubectl-mtv get plans --all-namespaces
# Get a specific plan in JSON format
kubectl-mtv get plan --name my-migration --output json
# Watch plan status changes
kubectl-mtv get plan --name my-migration --watch
# Get VM migration status within a plan
kubectl-mtv get plan --name my-migration --vms
# Get disk transfer status within a plan
kubectl-mtv get plan --name my-migration --disk
# Get both VM and disk transfer status
kubectl-mtv get plan --name my-migration --vms --disk`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 30*time.Second)
defer cancel()
}
kubeConfigFlags := globalConfig.GetKubeConfigFlags()
allNamespaces := globalConfig.GetAllNamespaces()
namespace := client.ResolveNamespaceWithAllFlag(kubeConfigFlags, allNamespaces)
// If both --vms and --disk flags are used, show combined view
if vms && disk {
if planName == "" {
return fmt.Errorf("plan NAME is required when using --vms and --disk flags")
}
// Log the operation being performed
logNamespaceOperation("Getting plan VMs with disk details", namespace, allNamespaces)
logOutputFormat(outputFormatFlag.GetValue())
return plan.ListVMsWithDisks(ctx, kubeConfigFlags, planName, namespace, watch)
}
// If --vms flag is used, switch to ListVMs behavior
if vms {
if planName == "" {
return fmt.Errorf("plan NAME is required when using --vms flag")
}
// Log the operation being performed
logNamespaceOperation("Getting plan VMs", namespace, allNamespaces)
logOutputFormat(outputFormatFlag.GetValue())
return plan.ListVMs(ctx, kubeConfigFlags, planName, namespace, watch)
}
// If --disk flag is used, switch to ListDisks behavior
if disk {
if planName == "" {
return fmt.Errorf("plan NAME is required when using --disk flag")
}
// Log the operation being performed
logNamespaceOperation("Getting plan disk transfers", namespace, allNamespaces)
logOutputFormat(outputFormatFlag.GetValue())
return plan.ListDisks(ctx, kubeConfigFlags, planName, namespace, watch)
}
// Default behavior: list plans
// Log the operation being performed
if planName != "" {
logNamespaceOperation("Getting plan", namespace, allNamespaces)
} else {
logNamespaceOperation("Getting plans", namespace, allNamespaces)
}
logOutputFormat(outputFormatFlag.GetValue())
return plan.List(ctx, kubeConfigFlags, namespace, watch, outputFormatFlag.GetValue(), planName, globalConfig.GetUseUTC())
},
}
cmd.Flags().StringVarP(&planName, "name", "M", "", "Plan name")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
cmd.Flags().BoolVar(&vms, "vms", false, "Get VMs status in the migration plan (requires plan NAME)")
cmd.Flags().BoolVar(&disk, "disk", false, "Get disk transfer status in the migration plan (requires plan NAME)")
// Add completion for name and output format flags
if err := cmd.RegisterFlagCompletionFunc("name", completion.PlanNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
// Add completion for output format flag
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/get/provider.go | Go | package get
import (
"context"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/get/provider"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewProviderCmd creates the get provider command
func NewProviderCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var watch bool
var providerName string
cmd := &cobra.Command{
Use: "provider",
Short: "Get providers",
Long: `Get MTV providers from the cluster.
Providers represent source (oVirt, vSphere, OpenStack, OVA, EC2) or target (OpenShift)
environments for VM migrations. Lists all providers or retrieves details for a specific one.`,
Example: ` # List all providers
kubectl-mtv get providers
# List providers across all namespaces
kubectl-mtv get providers --all-namespaces
# Get provider details in YAML format
kubectl-mtv get provider --name vsphere-prod --output yaml
# Watch provider status changes
kubectl-mtv get providers --watch`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if !watch {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 30*time.Second)
defer cancel()
}
kubeConfigFlags := globalConfig.GetKubeConfigFlags()
allNamespaces := globalConfig.GetAllNamespaces()
namespace := client.ResolveNamespaceWithAllFlag(kubeConfigFlags, allNamespaces)
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
// Log the operation being performed
if providerName != "" {
logNamespaceOperation("Getting provider", namespace, allNamespaces)
} else {
logNamespaceOperation("Getting providers", namespace, allNamespaces)
}
logOutputFormat(outputFormatFlag.GetValue())
return provider.List(ctx, kubeConfigFlags, namespace, inventoryURL, watch, outputFormatFlag.GetValue(), providerName, inventoryInsecureSkipTLS)
},
}
cmd.Flags().StringVarP(&providerName, "name", "M", "", "Provider name")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for changes")
// Add completion for name and output format flags
if err := cmd.RegisterFlagCompletionFunc("name", completion.ProviderNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
// Add completion for output format flag
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/health/health.go | Go | package health
import (
"context"
"fmt"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
pkghealth "github.com/yaacov/kubectl-mtv/pkg/cmd/health"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// GlobalConfigGetter is an interface for accessing global configuration
type GlobalConfigGetter interface {
GetAllNamespaces() bool
GetVerbosity() int
}
// NewHealthCmd creates the health command
func NewHealthCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
var skipLogs bool
var logLines int
outputFormatFlag := flags.NewOutputFormatTypeFlag()
cmd := &cobra.Command{
Use: "health",
Short: "Check the health and deployment status of the MTV/Forklift system",
Long: `Perform comprehensive health checks on the MTV/Forklift migration system.
This command checks:
- MTV Operator installation and version
- ForkliftController configuration (feature flags, VDDK image, custom images)
- Forklift pod health (status, restarts, OOMKilled)
- Provider connectivity and readiness
- Migration plan status and issues
- Pod logs for errors and warnings (can be skipped with --skip-logs)
Namespace behavior:
Forklift OPERATOR components (controller, pods, logs) are always checked in
the auto-detected operator namespace (typically openshift-mtv), regardless
of the --namespace flag.
The --namespace and --all-namespaces flags control the scope for USER RESOURCES:
- Providers: checked in the specified namespace or all namespaces with --all-namespaces
- Plans: checked in the specified namespace or all namespaces with --all-namespaces
Configuration warnings (e.g., missing VDDK image for vSphere migrations)
only appear if relevant providers exist in the scoped namespace(s).
Use --all-namespaces to check all namespaces cluster-wide.
Examples:
# Check health in the default MTV namespace (includes log analysis)
kubectl mtv health
# Check health with JSON output
kubectl mtv health --output json
# Check health for providers/plans in a specific namespace
kubectl mtv health --namespace my-namespace
# Check health across all namespaces (recommended for full cluster check)
kubectl mtv health --all-namespaces
# Check health without log analysis (faster)
kubectl mtv health --skip-logs
# Check health with more log lines analyzed
kubectl mtv health --log-lines 200`,
RunE: func(cmd *cobra.Command, args []string) error {
// Create context with timeout
ctx, cancel := context.WithTimeout(cmd.Context(), 60*time.Second)
defer cancel()
// Get namespace from flag or use default
namespace := ""
if kubeConfigFlags.Namespace != nil && *kubeConfigFlags.Namespace != "" {
namespace = *kubeConfigFlags.Namespace
}
// Build health check options
opts := pkghealth.HealthCheckOptions{
Namespace: namespace,
AllNamespaces: globalConfig.GetAllNamespaces(),
CheckLogs: !skipLogs,
LogLines: logLines,
Verbose: globalConfig.GetVerbosity() > 0,
}
// Run health check
report, err := pkghealth.RunHealthCheck(ctx, kubeConfigFlags, opts)
if err != nil {
return fmt.Errorf("health check failed: %v", err)
}
// Print the report
return pkghealth.PrintHealthReport(report, outputFormatFlag.GetValue())
},
}
// Add flags
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (table, json, yaml)")
cmd.Flags().BoolVar(&skipLogs, "skip-logs", false, "Skip pod log analysis (faster but less thorough)")
cmd.Flags().IntVar(&logLines, "log-lines", 100, "Number of log lines to analyze per pod")
// Add completion for output format flag
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/help/help.go | Go | package help
import (
"encoding/json"
"fmt"
"strings"
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"
"github.com/yaacov/kubectl-mtv/pkg/cmd/help"
)
// NewHelpCmd creates the help command with machine-readable output support.
func NewHelpCmd(rootCmd *cobra.Command, clientVersion string) *cobra.Command {
var machine bool
var short bool
var readOnly bool
var write bool
var includeGlobalFlags bool
var outputFormat string
cmd := &cobra.Command{
Use: "help [command]",
Short: "Help about any command",
Long: `Help provides help for any command in the application.
Simply type kubectl-mtv help [path to command] for full details.
Use --machine to output the command schema in a machine-readable format
(JSON or YAML) for integration with MCP servers and automation tools.
You can scope --machine output to a specific command or subtree by passing
the command path (e.g., help --machine get plan). Use --short with --machine
to omit long descriptions and examples for a condensed view.
Help topics are also available for domain-specific languages:
tsl - Tree Search Language query syntax reference
karl - Kubernetes Affinity Rule Language syntax reference`,
Example: ` # Get help for a command
kubectl-mtv help get plan
# Learn about the TSL query language
kubectl-mtv help tsl
# Learn about the KARL affinity syntax
kubectl-mtv help karl
# Output complete command schema as JSON
kubectl-mtv help --machine
# Output schema for a single command
kubectl-mtv help --machine get plan
# Output schema for all "get" commands
kubectl-mtv help --machine get
# Condensed schema without long descriptions or examples
kubectl-mtv help --machine --short
# Output schema in YAML format
kubectl-mtv help --machine --output yaml
# Output only read-only commands
kubectl-mtv help --machine --read-only
# Output only write commands
kubectl-mtv help --machine --write
# Get TSL reference in machine-readable format
kubectl-mtv help --machine tsl`,
RunE: func(cmd *cobra.Command, args []string) error {
// Check for help topics (e.g., "help tsl", "help karl")
if len(args) > 0 {
if topic := help.GetTopic(args[0]); topic != nil {
if machine {
return outputTopic(cmd, topic, outputFormat)
}
fmt.Fprintf(cmd.OutOrStdout(), "%s\n\n%s\n", topic.Short, topic.Content)
return nil
}
}
if !machine {
// Default help behavior - show help for root or specified command
if len(args) == 0 {
return rootCmd.Help()
}
// Find the subcommand and show its help
targetCmd, _, err := rootCmd.Find(args)
if err != nil {
return fmt.Errorf("unknown command %q for %q", args, rootCmd.Name())
}
return targetCmd.Help()
}
// Machine-readable output
if readOnly && write {
return fmt.Errorf("flags --read-only and --write are mutually exclusive")
}
opts := help.Options{
ReadOnly: readOnly,
Write: write,
IncludeGlobalFlags: includeGlobalFlags,
Short: short,
}
schema := help.Generate(rootCmd, clientVersion, opts)
// Filter to a specific command subtree if args are provided
if len(args) > 0 {
if n := help.FilterByPath(schema, args); n == 0 {
return fmt.Errorf("unknown command %q for %q", strings.Join(args, " "), rootCmd.Name())
}
}
return outputSchema(cmd, schema, outputFormat)
},
}
cmd.Flags().BoolVar(&machine, "machine", false, "Enable machine-readable output")
cmd.Flags().BoolVar(&short, "short", false, "Omit long descriptions and examples from machine output (with --machine)")
cmd.Flags().StringVarP(&outputFormat, "output", "o", "json", "Output format for --machine: json, yaml")
cmd.Flags().BoolVar(&readOnly, "read-only", false, "Include only read-only commands (with --machine)")
cmd.Flags().BoolVar(&write, "write", false, "Include only write commands (with --machine)")
cmd.Flags().BoolVar(&includeGlobalFlags, "include-global-flags", true, "Include global flags in output (with --machine)")
// Add completion for output format flag
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"json", "yaml"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
// Ignore completion registration errors - not critical
_ = err
}
return cmd
}
// outputTopic writes a help topic to the command's output in the specified format.
func outputTopic(cmd *cobra.Command, topic *help.Topic, format string) error {
var output []byte
var err error
switch format {
case "yaml":
output, err = yaml.Marshal(topic)
case "json":
output, err = json.MarshalIndent(topic, "", " ")
default:
return fmt.Errorf("unsupported output format: %s (use json or yaml)", format)
}
if err != nil {
return fmt.Errorf("failed to marshal topic: %w", err)
}
fmt.Fprintln(cmd.OutOrStdout(), string(output))
return nil
}
// outputSchema writes the schema to the command's output in the specified format.
func outputSchema(cmd *cobra.Command, schema *help.HelpSchema, format string) error {
var output []byte
var err error
switch format {
case "yaml":
output, err = yaml.Marshal(schema)
case "json":
output, err = json.MarshalIndent(schema, "", " ")
default:
return fmt.Errorf("unsupported output format: %s (use json or yaml)", format)
}
if err != nil {
return fmt.Errorf("failed to marshal schema: %w", err)
}
fmt.Fprintln(cmd.OutOrStdout(), string(output))
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/kubectl-mtv.go | Go | package cmd
import (
"context"
"flag"
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"github.com/yaacov/kubectl-mtv/cmd/archive"
"github.com/yaacov/kubectl-mtv/cmd/cancel"
"github.com/yaacov/kubectl-mtv/cmd/create"
"github.com/yaacov/kubectl-mtv/cmd/cutover"
"github.com/yaacov/kubectl-mtv/cmd/delete"
"github.com/yaacov/kubectl-mtv/cmd/describe"
"github.com/yaacov/kubectl-mtv/cmd/get"
"github.com/yaacov/kubectl-mtv/cmd/health"
"github.com/yaacov/kubectl-mtv/cmd/help"
"github.com/yaacov/kubectl-mtv/cmd/mcpserver"
"github.com/yaacov/kubectl-mtv/cmd/patch"
"github.com/yaacov/kubectl-mtv/cmd/settings"
"github.com/yaacov/kubectl-mtv/cmd/start"
"github.com/yaacov/kubectl-mtv/cmd/unarchive"
"github.com/yaacov/kubectl-mtv/cmd/version"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
pkgversion "github.com/yaacov/kubectl-mtv/pkg/version"
)
// GlobalConfig holds global configuration flags that are passed to all subcommands
type GlobalConfig struct {
Verbosity int
AllNamespaces bool
UseUTC bool
InventoryURL string
InventoryInsecureSkipTLS bool
KubeConfigFlags *genericclioptions.ConfigFlags
discoveredInventoryURL string // cached discovered URL
inventoryURLResolved bool // flag to track if we've attempted discovery
}
// GetVerbosity returns the verbosity level
func (g *GlobalConfig) GetVerbosity() int {
return g.Verbosity
}
// GetAllNamespaces returns whether to list resources across all namespaces
func (g *GlobalConfig) GetAllNamespaces() bool {
return g.AllNamespaces
}
// GetUseUTC returns whether to format times in UTC
func (g *GlobalConfig) GetUseUTC() bool {
return g.UseUTC
}
// GetInventoryURL returns the inventory service URL, auto-discovering if necessary
// This method will automatically discover the URL from OpenShift routes if:
// 1. No URL was provided via flag or environment variable
// 2. Discovery hasn't been attempted yet
func (g *GlobalConfig) GetInventoryURL() string {
// If explicitly set via flag or env var, return it
if g.InventoryURL != "" {
return g.InventoryURL
}
// Return cached discovered URL if we already tried discovery
if g.inventoryURLResolved {
return g.discoveredInventoryURL
}
// Mark as resolved to avoid repeated attempts
g.inventoryURLResolved = true
// Attempt auto-discovery from OpenShift routes
// Note: This uses the default namespace from kubeconfig
namespace := ""
if g.KubeConfigFlags.Namespace != nil && *g.KubeConfigFlags.Namespace != "" {
namespace = *g.KubeConfigFlags.Namespace
}
// Use context.Background() for discovery as we don't have a command context here
discoveredURL := client.DiscoverInventoryURL(context.Background(), g.KubeConfigFlags, namespace)
if discoveredURL != "" {
klog.V(2).Infof("Auto-discovered inventory URL: %s", discoveredURL)
g.discoveredInventoryURL = discoveredURL
} else {
klog.V(2).Info("No inventory URL provided and auto-discovery failed (this is expected on non-OpenShift clusters)")
}
return g.discoveredInventoryURL
}
// GetInventoryInsecureSkipTLS returns whether to skip TLS verification for inventory service
func (g *GlobalConfig) GetInventoryInsecureSkipTLS() bool {
return g.InventoryInsecureSkipTLS
}
// GetKubeConfigFlags returns the Kubernetes configuration flags
func (g *GlobalConfig) GetKubeConfigFlags() *genericclioptions.ConfigFlags {
return g.KubeConfigFlags
}
var (
kubeConfigFlags *genericclioptions.ConfigFlags
rootCmd *cobra.Command
globalConfig *GlobalConfig
// Version is set via ldflags during build
clientVersion = "unknown"
)
// logDebugf logs formatted debug messages at verbosity level 2
func logDebugf(format string, args ...interface{}) {
klog.V(2).Infof(format, args...)
}
// GetGlobalConfig returns the global configuration instance
func GetGlobalConfig() *GlobalConfig {
return globalConfig
}
// Execute adds all child commands to the root command and sets flags appropriately.
func Execute() error {
return rootCmd.Execute()
}
func init() {
// Export clientVersion to pkg/version for use by other packages
pkgversion.ClientVersion = clientVersion
kubeConfigFlags = genericclioptions.NewConfigFlags(true)
// Initialize global configuration
globalConfig = &GlobalConfig{
KubeConfigFlags: kubeConfigFlags,
}
rootCmd = &cobra.Command{
Use: "kubectl-mtv",
Short: "Migration Toolkit for Virtualization CLI",
Long: `Migration Toolkit for Virtualization (MTV) CLI.
Migrate virtual machines from VMware vSphere, oVirt (RHV), OpenStack, and OVA to KubeVirt on OpenShift/Kubernetes.`,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
// Initialize klog with the verbosity level
klog.InitFlags(nil)
if err := flag.Set("v", fmt.Sprintf("%d", globalConfig.Verbosity)); err != nil {
klog.Warningf("Failed to set klog verbosity: %v", err)
}
// Log global configuration if verbosity is enabled
logDebugf("Global configuration - Verbosity: %d, All Namespaces: %t",
globalConfig.Verbosity, globalConfig.AllNamespaces)
},
}
kubeConfigFlags.AddFlags(rootCmd.PersistentFlags())
// Add global flags
rootCmd.PersistentFlags().IntVarP(&globalConfig.Verbosity, "verbose", "v", 0, "verbose output level (0=silent, 1=info, 2=debug, 3=trace)")
rootCmd.PersistentFlags().BoolVarP(&globalConfig.AllNamespaces, "all-namespaces", "A", false, "list resources across all namespaces")
rootCmd.PersistentFlags().BoolVar(&globalConfig.UseUTC, "use-utc", false, "format timestamps in UTC instead of local timezone")
rootCmd.PersistentFlags().StringVarP(&globalConfig.InventoryURL, "inventory-url", "i", os.Getenv("MTV_INVENTORY_URL"), "Base URL for the inventory service")
rootCmd.PersistentFlags().BoolVar(&globalConfig.InventoryInsecureSkipTLS, "inventory-insecure-skip-tls", os.Getenv("MTV_INVENTORY_INSECURE_SKIP_TLS") == "true", "Skip TLS verification for inventory service connections")
// Mark global flags that should appear in AI/MCP tool descriptions.
// These are surfaced via the "llm-relevant" pflag annotation, which the help
// generator reads and sets in the machine-readable schema.
markLLMRelevant(rootCmd.PersistentFlags(), "namespace", "all-namespaces", "inventory-url", "verbose")
// Add standard commands for various resources - directly using package functions
rootCmd.AddCommand(get.NewGetCmd(kubeConfigFlags, globalConfig))
rootCmd.AddCommand(delete.NewDeleteCmd(kubeConfigFlags))
rootCmd.AddCommand(create.NewCreateCmd(kubeConfigFlags, globalConfig))
rootCmd.AddCommand(describe.NewDescribeCmd(kubeConfigFlags, globalConfig))
rootCmd.AddCommand(patch.NewPatchCmd(kubeConfigFlags, globalConfig))
// Plan commands - directly using package functions
rootCmd.AddCommand(start.NewStartCmd(kubeConfigFlags, globalConfig))
rootCmd.AddCommand(cancel.NewCancelCmd(kubeConfigFlags))
rootCmd.AddCommand(cutover.NewCutoverCmd(kubeConfigFlags))
rootCmd.AddCommand(archive.NewArchiveCmd(kubeConfigFlags))
rootCmd.AddCommand(unarchive.NewUnArchiveCmd(kubeConfigFlags))
// Version command - directly using package function
rootCmd.AddCommand(version.NewVersionCmd(clientVersion, kubeConfigFlags, globalConfig))
// Health command - check MTV system health
rootCmd.AddCommand(health.NewHealthCmd(kubeConfigFlags, globalConfig))
// Settings command - view ForkliftController settings
rootCmd.AddCommand(settings.NewSettingsCmd(kubeConfigFlags, globalConfig))
// MCP Server command - start the Model Context Protocol server
rootCmd.AddCommand(mcpserver.NewMCPServerCmd())
// Help command - replace default Cobra help with our enhanced version
// that supports machine-readable output for MCP server integration
rootCmd.SetHelpCommand(help.NewHelpCmd(rootCmd, clientVersion))
}
// LLMRelevantAnnotation is the pflag annotation key used to mark flags
// that should be included in AI/MCP tool descriptions.
const LLMRelevantAnnotation = "llm-relevant"
// markLLMRelevant annotates the named flags as relevant for LLM/MCP tool descriptions.
// The help generator reads this annotation and sets the llm_relevant field in the schema.
func markLLMRelevant(flags *pflag.FlagSet, names ...string) {
for _, name := range names {
f := flags.Lookup(name)
if f == nil {
continue
}
if f.Annotations == nil {
f.Annotations = make(map[string][]string)
}
f.Annotations[LLMRelevantAnnotation] = []string{"true"}
}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/mcpserver/mcpserver.go | Go | package mcpserver
import (
"context"
"fmt"
"log"
"net"
"net/http"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/modelcontextprotocol/go-sdk/mcp"
"github.com/spf13/cobra"
"github.com/yaacov/kubectl-mtv/pkg/mcp/discovery"
"github.com/yaacov/kubectl-mtv/pkg/mcp/tools"
"github.com/yaacov/kubectl-mtv/pkg/mcp/util"
"github.com/yaacov/kubectl-mtv/pkg/version"
)
var (
sse bool
port string
host string
certFile string
keyFile string
outputFormat string
kubeServer string
kubeToken string
insecureSkipTLS bool
maxResponseChars int
readOnly bool
)
// NewMCPServerCmd creates the mcp-server command
func NewMCPServerCmd() *cobra.Command {
mcpCmd := &cobra.Command{
Use: "mcp-server",
Short: "Start the MCP (Model Context Protocol) server",
Long: `Start the MCP (Model Context Protocol) server for kubectl-mtv.
This server provides AI assistants with access to kubectl-mtv resources.
USE WITH CAUTION: Includes write operations that can modify resources.
Modes:
Default: Stdio mode for AI assistant integration
--sse: HTTP server mode with optional TLS
Read-Only Mode:
--read-only: Disables all write operations (mtv_write tool not registered)
Only read operations will be available to AI assistants
Security:
--cert-file: Path to TLS certificate file (enables TLS when both cert and key provided)
--key-file: Path to TLS private key file (enables TLS when both cert and key provided)
Kubernetes Authentication:
--server: Kubernetes API server URL (passed to kubectl via --server flag)
--token: Kubernetes authentication token (passed to kubectl via --token flag)
These flags set default credentials for all requests. They work in both stdio and SSE modes.
SSE Mode Authentication (HTTP Headers):
In SSE mode, the following HTTP headers are also supported for per-request authentication:
Authorization: Bearer <token>
Kubernetes authentication token. Passed to kubectl via --token flag.
X-Kubernetes-Server: <url>
Kubernetes API server URL. Passed to kubectl via --server flag.
Precedence: HTTP headers (per-request) > CLI flags (--server/--token) > kubeconfig (implicit).
Quick Setup for AI Assistants:
Claude Desktop: claude mcp add kubectl-mtv kubectl mtv mcp-server
Cursor IDE: Settings → MCP → Add Server (Name: kubectl-mtv, Command: kubectl, Args: mtv mcp-server)
Manual Claude config: Add to claude_desktop_config.json:
"kubectl-mtv": {"command": "kubectl", "args": ["mtv", "mcp-server"]}`,
RunE: func(cobraCmd *cobra.Command, args []string) error {
// Validate output format - only "json" and "text" are supported in MCP mode
validFormats := map[string]bool{"json": true, "text": true}
if !validFormats[outputFormat] {
return fmt.Errorf("invalid --output-format value %q: must be one of: json, text", outputFormat)
}
// Set the output format for MCP responses
util.SetOutputFormat(outputFormat)
// Set max response size (helps small LLMs stay within context window)
util.SetMaxResponseChars(maxResponseChars)
// Set default Kubernetes credentials from CLI flags
// These serve as fallback when HTTP headers don't provide credentials
util.SetDefaultKubeServer(kubeServer)
util.SetDefaultKubeToken(kubeToken)
util.SetDefaultInsecureSkipTLS(insecureSkipTLS)
// Create a context that listens for interrupt signals
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Setup signal handling for graceful shutdown
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
if sse {
// SSE mode - run HTTP server
addr := net.JoinHostPort(host, port)
// Create MCP handler with header capture for SSE mode
// The SSE transport doesn't populate RequestExtra.Header automatically.
// The createMCPServerWithHeaderCapture callback is invoked once during
// session initiation (the initial SSE GET request) and captures HTTP headers
// at that time. Those captured headers persist for the lifetime of the SSE
// session and are injected into RequestExtra.Header for all subsequent tool
// calls within that session. The outer POST-logging wrapper below provides
// diagnostic logging per-request but doesn't affect header propagation.
innerHandler := mcp.NewSSEHandler(func(req *http.Request) *mcp.Server {
server, err := createMCPServerWithHeaderCapture(req, readOnly)
if err != nil {
log.Printf("Failed to create server: %v", err)
return nil
}
return server
}, nil)
// Wrap to log header capture (without leaking sensitive data)
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
if auth := r.Header.Get("Authorization"); auth != "" {
// Extract only the scheme (e.g., "Bearer") without token content
scheme := "unknown"
if parts := strings.SplitN(auth, " ", 2); len(parts) > 0 {
scheme = parts[0]
}
log.Printf("[auth] SERVER: POST request with Authorization: %s [REDACTED]", scheme)
} else {
log.Printf("[auth] SERVER: POST request with NO Authorization header")
}
}
innerHandler.ServeHTTP(w, r)
})
server := &http.Server{
Addr: addr,
Handler: handler,
}
// Start server in a goroutine
errChan := make(chan error, 1)
go func() {
// Check if TLS should be enabled (both cert and key files provided)
useTLS := certFile != "" && keyFile != ""
if useTLS {
log.Printf("Starting kubectl-mtv MCP server with TLS in SSE mode on %s", addr)
log.Printf("Using cert: %s, key: %s", certFile, keyFile)
log.Printf("Connect clients to: https://%s/sse", addr)
errChan <- server.ListenAndServeTLS(certFile, keyFile)
} else {
log.Printf("Starting kubectl-mtv MCP server in SSE mode on %s", addr)
log.Printf("Connect clients to: http://%s/sse", addr)
errChan <- server.ListenAndServe()
}
}()
// Wait for either an error or interrupt signal
select {
case err := <-errChan:
if err != nil && err != http.ErrServerClosed {
return err
}
case <-sigChan:
log.Println("\nShutting down server...")
// Give the server 5 seconds to gracefully shutdown
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
defer shutdownCancel()
if err := server.Shutdown(shutdownCtx); err != nil {
log.Printf("Server shutdown error: %v", err)
}
}
return nil
}
// Stdio mode - default behavior
server, err := createMCPServer()
if err != nil {
return fmt.Errorf("failed to create server: %w", err)
}
log.Println("Starting kubectl-mtv MCP server in stdio mode")
log.Println("Server is ready and listening for MCP protocol messages on stdin/stdout")
// Run server in a goroutine
errChan := make(chan error, 1)
go func() {
errChan <- server.Run(ctx, &mcp.StdioTransport{})
}()
// Wait for either an error or interrupt signal
select {
case err := <-errChan:
return err
case <-sigChan:
log.Println("\nShutting down server...")
cancel()
// Give the server a moment to clean up
time.Sleep(100 * time.Millisecond)
return nil
}
},
}
// Add flags matching the MCP CLI flags
mcpCmd.Flags().BoolVar(&sse, "sse", false, "Run in SSE (Server-Sent Events) mode over HTTP")
mcpCmd.Flags().StringVar(&port, "port", "8080", "Port to listen on for SSE mode")
mcpCmd.Flags().StringVar(&host, "host", "127.0.0.1", "Host address to bind to for SSE mode")
mcpCmd.Flags().StringVar(&certFile, "cert-file", "", "Path to TLS certificate file (enables TLS when used with --key-file)")
mcpCmd.Flags().StringVar(&keyFile, "key-file", "", "Path to TLS private key file (enables TLS when used with --cert-file)")
mcpCmd.Flags().StringVar(&outputFormat, "output-format", "text", "Default output format for commands: text (table) or json")
mcpCmd.Flags().StringVar(&kubeServer, "server", "", "Kubernetes API server URL (passed to kubectl via --server flag)")
mcpCmd.Flags().StringVar(&kubeToken, "token", "", "Kubernetes authentication token (passed to kubectl via --token flag)")
mcpCmd.Flags().BoolVar(&insecureSkipTLS, "insecure-skip-tls-verify", false, "Skip TLS certificate verification for Kubernetes API connections")
mcpCmd.Flags().IntVar(&maxResponseChars, "max-response-chars", 0, "Max characters for text output (0=unlimited). Helps small LLMs by truncating long responses")
mcpCmd.Flags().BoolVar(&readOnly, "read-only", false, "Run in read-only mode (disables write operations)")
return mcpCmd
}
// createMCPServer creates the MCP server with dynamically discovered tools.
// Discovery happens at startup using kubectl-mtv help --machine.
func createMCPServer() (*mcp.Server, error) {
return createMCPServerWithHeaderCapture(nil, readOnly)
}
// createMCPServerWithHeaderCapture creates the MCP server with HTTP header capture
// The req parameter contains the HTTP request that triggered server creation,
// which may include authentication headers that we want to pass to tool handlers
// The readOnlyMode parameter controls whether write operations are enabled
func createMCPServerWithHeaderCapture(req *http.Request, readOnlyMode bool) (*mcp.Server, error) {
ctx := context.Background()
registry, err := discovery.NewRegistry(ctx)
if err != nil {
return nil, fmt.Errorf("failed to discover commands: %w", err)
}
server := mcp.NewServer(&mcp.Implementation{
Name: "kubectl-mtv",
Version: version.ClientVersion,
}, nil)
// Register tools with minimal descriptions (the input schema covers parameter usage).
// The mtv_help tool provides on-demand detailed help for any command or topic.
// Use AddToolWithCoercion for tools with boolean parameters to handle string
// booleans ("True"/"true") from AI models that don't send proper JSON booleans.
// Since the SSE transport doesn't populate RequestExtra.Header, we wrap each
// tool handler to manually inject headers from the HTTP request
var capturedHeaders http.Header
if req != nil {
capturedHeaders = req.Header
}
// Always register read-only tools
tools.AddToolWithCoercion(server, tools.GetMinimalMTVReadTool(registry), wrapWithHeaders(tools.HandleMTVRead(registry), capturedHeaders))
tools.AddToolWithCoercion(server, tools.GetMinimalKubectlLogsTool(), wrapWithHeaders(tools.HandleKubectlLogs, capturedHeaders))
tools.AddToolWithCoercion(server, tools.GetMinimalKubectlTool(), wrapWithHeaders(tools.HandleKubectl, capturedHeaders))
mcp.AddTool(server, tools.GetMTVHelpTool(), wrapWithHeaders(tools.HandleMTVHelp, capturedHeaders))
// Only register write tool if not in read-only mode
if !readOnlyMode {
tools.AddToolWithCoercion(server, tools.GetMinimalMTVWriteTool(registry), wrapWithHeaders(tools.HandleMTVWrite(registry), capturedHeaders))
} else {
log.Println("Running in read-only mode - write operations disabled")
}
return server, nil
}
// wrapWithHeaders wraps a tool handler to inject captured HTTP headers into RequestExtra
func wrapWithHeaders[In, Out any](
handler func(context.Context, *mcp.CallToolRequest, In) (*mcp.CallToolResult, Out, error),
headers http.Header,
) func(context.Context, *mcp.CallToolRequest, In) (*mcp.CallToolResult, Out, error) {
return func(ctx context.Context, req *mcp.CallToolRequest, input In) (*mcp.CallToolResult, Out, error) {
// Inject headers into RequestExtra if not already present
if req.Extra == nil && headers != nil {
req.Extra = &mcp.RequestExtra{Header: headers}
} else if req.Extra != nil && req.Extra.Header == nil && headers != nil {
req.Extra.Header = headers
}
return handler(ctx, req, input)
}
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/patch/mapping.go | Go | package patch
import (
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/patch/mapping"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewMappingCmd creates the mapping patch command with subcommands
func NewMappingCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
cmd := &cobra.Command{
Use: "mapping",
Short: "Patch mappings",
Long: `Patch network and storage mappings by adding, updating, or removing pairs`,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// If no subcommand is specified, show help
return cmd.Help()
},
}
// Add subcommands for network and storage
cmd.AddCommand(newPatchNetworkMappingCmd(kubeConfigFlags, globalConfig))
cmd.AddCommand(newPatchStorageMappingCmd(kubeConfigFlags, globalConfig))
return cmd
}
// newPatchNetworkMappingCmd creates the patch network mapping subcommand
func newPatchNetworkMappingCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
var name string
var addPairs, updatePairs, removePairs string
cmd := &cobra.Command{
Use: "network",
Short: "Patch a network mapping",
Long: `Patch a network mapping by adding, updating, or removing network pairs`,
Example: ` # Add network pairs to a mapping
kubectl-mtv patch mapping network --name my-net-map --add-pairs "VM Network:default"
# Update network pairs
kubectl-mtv patch mapping network --name my-net-map --update-pairs "VM Network:migration-net"`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate required --name flag
if name == "" {
return fmt.Errorf("--name is required")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
// Get inventory URL from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
return mapping.PatchNetwork(kubeConfigFlags, name, namespace, addPairs, updatePairs, removePairs, inventoryURL)
},
}
cmd.Flags().StringVarP(&name, "name", "M", "", "Network mapping name")
_ = cmd.MarkFlagRequired("name")
cmd.Flags().StringVar(&addPairs, "add-pairs", "", "Network pairs to add in format 'source:target-namespace/target-network', 'source:target-network', 'source:default', or 'source:ignored' (comma-separated)")
cmd.Flags().StringVar(&updatePairs, "update-pairs", "", "Network pairs to update in format 'source:target-namespace/target-network', 'source:target-network', 'source:default', or 'source:ignored' (comma-separated)")
cmd.Flags().StringVar(&removePairs, "remove-pairs", "", "Source network names to remove from mapping (comma-separated)")
_ = cmd.RegisterFlagCompletionFunc("name", completion.MappingNameCompletion(kubeConfigFlags, "network"))
return cmd
}
// newPatchStorageMappingCmd creates the patch storage mapping subcommand
func newPatchStorageMappingCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
var name string
var addPairs, updatePairs, removePairs string
var defaultVolumeMode string
var defaultAccessMode string
var defaultOffloadPlugin string
var defaultOffloadSecret string
var defaultOffloadVendor string
cmd := &cobra.Command{
Use: "storage",
Short: "Patch a storage mapping",
Long: `Patch a storage mapping by adding, updating, or removing storage pairs`,
Example: ` # Add storage pairs to a mapping
kubectl-mtv patch mapping storage --name my-storage-map --add-pairs "datastore1:standard"
# Update storage pairs
kubectl-mtv patch mapping storage --name my-storage-map --update-pairs "datastore1:premium"`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate required --name flag
if name == "" {
return fmt.Errorf("--name is required")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
// Get inventory URL and insecure skip TLS from global config (auto-discovers if needed)
inventoryURL := globalConfig.GetInventoryURL()
inventoryInsecureSkipTLS := globalConfig.GetInventoryInsecureSkipTLS()
return mapping.PatchStorageWithOptions(kubeConfigFlags, name, namespace, addPairs, updatePairs,
removePairs, inventoryURL, inventoryInsecureSkipTLS, defaultVolumeMode, defaultAccessMode,
defaultOffloadPlugin, defaultOffloadSecret, defaultOffloadVendor)
},
}
cmd.Flags().StringVarP(&name, "name", "M", "", "Storage mapping name")
_ = cmd.MarkFlagRequired("name")
cmd.Flags().StringVar(&addPairs, "add-pairs", "", "Storage pairs to add in format 'source:storage-class[;volumeMode=Block|Filesystem][;accessMode=ReadWriteOnce|ReadWriteMany|ReadOnlyMany][;offloadPlugin=vsphere][;offloadSecret=secret-name][;offloadVendor=vantara|ontap|...]' (comma-separated pairs, semicolon-separated parameters)")
cmd.Flags().StringVar(&updatePairs, "update-pairs", "", "Storage pairs to update in format 'source:storage-class[;volumeMode=Block|Filesystem][;accessMode=ReadWriteOnce|ReadWriteMany|ReadOnlyMany][;offloadPlugin=vsphere][;offloadSecret=secret-name][;offloadVendor=vantara|ontap|...]' (comma-separated pairs, semicolon-separated parameters)")
cmd.Flags().StringVar(&removePairs, "remove-pairs", "", "Source storage names to remove from mapping (comma-separated)")
cmd.Flags().StringVar(&defaultVolumeMode, "default-volume-mode", "", "Default volume mode for new/updated storage pairs (Filesystem|Block)")
cmd.Flags().StringVar(&defaultAccessMode, "default-access-mode", "", "Default access mode for new/updated storage pairs (ReadWriteOnce|ReadWriteMany|ReadOnlyMany)")
cmd.Flags().StringVar(&defaultOffloadPlugin, "default-offload-plugin", "", "Default offload plugin type for new/updated storage pairs (vsphere)")
cmd.Flags().StringVar(&defaultOffloadSecret, "default-offload-secret", "", "Default offload plugin secret name for new/updated storage pairs")
cmd.Flags().StringVar(&defaultOffloadVendor, "default-offload-vendor", "", "Default offload plugin vendor for new/updated storage pairs (flashsystem|vantara|ontap|primera3par|pureFlashArray|powerflex|powermax|powerstore|infinibox)")
// Add completion for volume mode flag
if err := cmd.RegisterFlagCompletionFunc("default-volume-mode", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"Filesystem", "Block"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add completion for access mode flag
if err := cmd.RegisterFlagCompletionFunc("default-access-mode", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"ReadWriteOnce", "ReadWriteMany", "ReadOnlyMany"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add completion for offload plugin flag
if err := cmd.RegisterFlagCompletionFunc("default-offload-plugin", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"vsphere"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add completion for offload vendor flag
if err := cmd.RegisterFlagCompletionFunc("default-offload-vendor", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"flashsystem", "vantara", "ontap", "primera3par", "pureFlashArray", "powerflex", "powermax", "powerstore", "infinibox"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
_ = cmd.RegisterFlagCompletionFunc("name", completion.MappingNameCompletion(kubeConfigFlags, "storage"))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/patch/patch.go | Go | package patch
import (
"github.com/spf13/cobra"
"github.com/yaacov/kubectl-mtv/pkg/util/config"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
// GlobalConfigGetter is a type alias for the shared config interface.
// This maintains backward compatibility while using the centralized interface definition.
type GlobalConfigGetter = config.InventoryConfigWithKubeFlags
// NewPatchCmd creates the patch command with subcommands
func NewPatchCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
cmd := &cobra.Command{
Use: "patch",
Short: "Patch resources",
Long: `Patch various Migration Toolkit for Virtualization resources`,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// If no subcommand is specified, show help
return cmd.Help()
},
}
// Add subcommands
mappingCmd := NewMappingCmd(kubeConfigFlags, globalConfig)
mappingCmd.Aliases = []string{"mappings"}
cmd.AddCommand(mappingCmd)
providerCmd := NewProviderCmd(kubeConfigFlags)
providerCmd.Aliases = []string{"providers"}
cmd.AddCommand(providerCmd)
planCmd := NewPlanCmd(kubeConfigFlags)
planCmd.Aliases = []string{"plans"}
cmd.AddCommand(planCmd)
planVMCmd := NewPlanVMCmd(kubeConfigFlags)
planVMCmd.Aliases = []string{"planvms"}
cmd.AddCommand(planVMCmd)
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/patch/plan.go | Go | package patch
import (
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/patch/plan"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewPlanCmd creates the patch plan command
func NewPlanCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
// Editable PlanSpec fields
var transferNetwork string
var installLegacyDrivers string // "true", "false", or "" for nil
migrationTypeFlag := flags.NewMigrationTypeFlag()
var targetLabels []string
var targetNodeSelector []string
var useCompatibilityMode bool
var targetAffinity string
var targetNamespace string
var targetPowerState string
// Convertor-related flags
var convertorLabels []string
var convertorNodeSelector []string
var convertorAffinity string
// Missing flags from create plan
var description string
var preserveClusterCPUModel bool
var preserveStaticIPs bool
var pvcNameTemplate string
var volumeNameTemplate string
var networkNameTemplate string
var migrateSharedDisks bool
var archived bool
var pvcNameTemplateUseGenerateName bool
var deleteGuestConversionPod bool
var deleteVmOnFailMigration bool
var skipGuestConversion bool
var warm bool
var runPreflightInspection bool
// Plan name (required)
var planName string
// Boolean tracking for flag changes
var useCompatibilityModeChanged bool
var preserveClusterCPUModelChanged bool
var preserveStaticIPsChanged bool
var migrateSharedDisksChanged bool
var archivedChanged bool
var pvcNameTemplateUseGenerateNameChanged bool
var deleteGuestConversionPodChanged bool
var deleteVmOnFailMigrationChanged bool
var skipGuestConversionChanged bool
var warmChanged bool
var runPreflightInspectionChanged bool
cmd := &cobra.Command{
Use: "plan",
Short: "Patch a migration plan",
Long: `Patch an existing migration plan without modifying its VM list.
Use this to update plan settings like migration type, transfer network,
target labels, node selectors, or convertor pod configuration.
Affinity Syntax (KARL):
The --target-affinity and --convertor-affinity flags use KARL syntax:
--target-affinity "REQUIRE pods(app=database) on node"
--convertor-affinity "PREFER pods(app=cache) on zone weight=80"
Rule types: REQUIRE, PREFER, AVOID, REPEL. Topology: node, zone, region, rack.
Run 'kubectl-mtv help karl' for the full syntax reference.`,
Example: ` # Change migration type to warm
kubectl-mtv patch plan --plan-name my-migration --migration-type warm
# Update transfer network
kubectl-mtv patch plan --plan-name my-migration --transfer-network my-namespace/migration-net
# Add target labels to migrated VMs
kubectl-mtv patch plan --plan-name my-migration --target-labels env=prod,team=platform
# Configure convertor pod scheduling
kubectl-mtv patch plan --plan-name my-migration --convertor-node-selector node-role=worker`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate required --plan-name flag
if planName == "" {
return fmt.Errorf("--plan-name is required")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
// Check if boolean flags have been explicitly set (changed from default)
useCompatibilityModeChanged = cmd.Flags().Changed("use-compatibility-mode")
preserveClusterCPUModelChanged = cmd.Flags().Changed("preserve-cluster-cpu-model")
preserveStaticIPsChanged = cmd.Flags().Changed("preserve-static-ips")
migrateSharedDisksChanged = cmd.Flags().Changed("migrate-shared-disks")
archivedChanged = cmd.Flags().Changed("archived")
pvcNameTemplateUseGenerateNameChanged = cmd.Flags().Changed("pvc-name-template-use-generate-name")
deleteGuestConversionPodChanged = cmd.Flags().Changed("delete-guest-conversion-pod")
deleteVmOnFailMigrationChanged = cmd.Flags().Changed("delete-vm-on-fail-migration")
skipGuestConversionChanged = cmd.Flags().Changed("skip-guest-conversion")
warmChanged = cmd.Flags().Changed("warm")
runPreflightInspectionChanged = cmd.Flags().Changed("run-preflight-inspection")
return plan.PatchPlan(plan.PatchPlanOptions{
ConfigFlags: kubeConfigFlags,
Name: planName,
Namespace: namespace,
// Core plan fields
TransferNetwork: transferNetwork,
InstallLegacyDrivers: installLegacyDrivers,
MigrationType: string(migrationTypeFlag.GetValue()),
TargetLabels: targetLabels,
TargetNodeSelector: targetNodeSelector,
UseCompatibilityMode: useCompatibilityMode,
TargetAffinity: targetAffinity,
TargetNamespace: targetNamespace,
TargetPowerState: targetPowerState,
// Convertor-related fields
ConvertorLabels: convertorLabels,
ConvertorNodeSelector: convertorNodeSelector,
ConvertorAffinity: convertorAffinity,
// Additional plan fields
Description: description,
PreserveClusterCPUModel: preserveClusterCPUModel,
PreserveStaticIPs: preserveStaticIPs,
PVCNameTemplate: pvcNameTemplate,
VolumeNameTemplate: volumeNameTemplate,
NetworkNameTemplate: networkNameTemplate,
MigrateSharedDisks: migrateSharedDisks,
Archived: archived,
PVCNameTemplateUseGenerateName: pvcNameTemplateUseGenerateName,
DeleteGuestConversionPod: deleteGuestConversionPod,
SkipGuestConversion: skipGuestConversion,
Warm: warm,
RunPreflightInspection: runPreflightInspection,
// Flag change tracking
UseCompatibilityModeChanged: useCompatibilityModeChanged,
PreserveClusterCPUModelChanged: preserveClusterCPUModelChanged,
PreserveStaticIPsChanged: preserveStaticIPsChanged,
MigrateSharedDisksChanged: migrateSharedDisksChanged,
ArchivedChanged: archivedChanged,
PVCNameTemplateUseGenerateNameChanged: pvcNameTemplateUseGenerateNameChanged,
DeleteGuestConversionPodChanged: deleteGuestConversionPodChanged,
DeleteVmOnFailMigration: deleteVmOnFailMigration,
DeleteVmOnFailMigrationChanged: deleteVmOnFailMigrationChanged,
SkipGuestConversionChanged: skipGuestConversionChanged,
WarmChanged: warmChanged,
RunPreflightInspectionChanged: runPreflightInspectionChanged,
})
},
}
cmd.Flags().StringVar(&planName, "plan-name", "", "Plan name")
_ = cmd.MarkFlagRequired("plan-name")
cmd.Flags().StringVar(&transferNetwork, "transfer-network", "", "Network to use for transferring VM data. Supports 'namespace/network-name' or just 'network-name' (uses plan namespace)")
cmd.Flags().StringVar(&installLegacyDrivers, "install-legacy-drivers", "", "Install legacy Windows drivers (true/false, leave empty for auto-detection)")
cmd.Flags().Var(migrationTypeFlag, "migration-type", "Migration type: cold, warm, live, or conversion")
cmd.Flags().StringSliceVar(&targetLabels, "target-labels", []string{}, "Target VM labels in format key=value (can be specified multiple times)")
cmd.Flags().StringSliceVar(&targetNodeSelector, "target-node-selector", []string{}, "Target node selector in format key=value (can be specified multiple times)")
cmd.Flags().BoolVar(&useCompatibilityMode, "use-compatibility-mode", false, "Use compatibility devices (SATA bus, E1000E NIC) when skipGuestConversion is true")
cmd.Flags().StringVar(&targetAffinity, "target-affinity", "", "Target affinity using KARL syntax (e.g. 'REQUIRE pods(app=database) on node')")
cmd.Flags().StringVar(&targetNamespace, "target-namespace", "", "Target namespace for migrated VMs")
cmd.Flags().StringVar(&targetPowerState, "target-power-state", "", "Target power state for VMs after migration: 'on', 'off', or 'auto' (default: match source VM power state)")
// Convertor-related flags (only apply to providers requiring guest conversion)
cmd.Flags().StringSliceVar(&convertorLabels, "convertor-labels", nil, "Labels to be added to virt-v2v convertor pods (e.g., key1=value1,key2=value2)")
cmd.Flags().StringSliceVar(&convertorNodeSelector, "convertor-node-selector", nil, "Node selector to constrain convertor pod scheduling (e.g., key1=value1,key2=value2)")
cmd.Flags().StringVar(&convertorAffinity, "convertor-affinity", "", "Convertor affinity to constrain convertor pod scheduling using KARL syntax")
// Plan metadata and configuration flags
cmd.Flags().StringVar(&description, "description", "", "Plan description")
cmd.Flags().BoolVar(&preserveClusterCPUModel, "preserve-cluster-cpu-model", false, "Preserve the CPU model and flags the VM runs with in its cluster")
cmd.Flags().BoolVar(&preserveStaticIPs, "preserve-static-ips", false, "Preserve static IP configurations during migration")
cmd.Flags().StringVar(&pvcNameTemplate, "pvc-name-template", "", "Template for generating PVC names for VM disks. Variables: {{.VmName}}, {{.PlanName}}, {{.DiskIndex}}, {{.WinDriveLetter}}, {{.RootDiskIndex}}, {{.Shared}}, {{.FileName}}")
cmd.Flags().StringVar(&volumeNameTemplate, "volume-name-template", "", "Template for generating volume interface names in the target VM. Variables: {{.PVCName}}, {{.VolumeIndex}}")
cmd.Flags().StringVar(&networkNameTemplate, "network-name-template", "", "Template for generating network interface names in the target VM. Variables: {{.NetworkName}}, {{.NetworkNamespace}}, {{.NetworkType}}, {{.NetworkIndex}}")
cmd.Flags().BoolVar(&migrateSharedDisks, "migrate-shared-disks", true, "Migrate disks shared between multiple VMs")
cmd.Flags().BoolVar(&archived, "archived", false, "Whether this plan should be archived")
cmd.Flags().BoolVar(&pvcNameTemplateUseGenerateName, "pvc-name-template-use-generate-name", true, "Use generateName instead of name for PVC name template")
cmd.Flags().BoolVar(&deleteGuestConversionPod, "delete-guest-conversion-pod", false, "Delete guest conversion pod after successful migration")
cmd.Flags().BoolVar(&deleteVmOnFailMigration, "delete-vm-on-fail-migration", false, "Delete target VM when migration fails")
cmd.Flags().BoolVar(&skipGuestConversion, "skip-guest-conversion", false, "Skip the guest conversion process (raw disk copy mode)")
cmd.Flags().BoolVar(&warm, "warm", false, "Enable warm migration (use --migration-type=warm instead)")
cmd.Flags().BoolVar(&runPreflightInspection, "run-preflight-inspection", true, "Run preflight inspection on VM base disks before starting disk transfer")
// Add completion for migration type flag
if err := cmd.RegisterFlagCompletionFunc("migration-type", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return migrationTypeFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add completion for install legacy drivers flag
if err := cmd.RegisterFlagCompletionFunc("install-legacy-drivers", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"true", "false"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
// Add completion for target power state flag
if err := cmd.RegisterFlagCompletionFunc("target-power-state", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"on", "off", "auto"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
_ = cmd.RegisterFlagCompletionFunc("plan-name", completion.PlanNameCompletion(kubeConfigFlags))
return cmd
}
// NewPlanVMCmd creates the patch planvm command
func NewPlanVMCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
// Plan and VM names (required)
var planName string
var vmName string
// VM-specific fields that can be patched
var targetName string
var rootDisk string
var instanceType string
var pvcNameTemplate string
var volumeNameTemplate string
var networkNameTemplate string
var luksSecret string
var targetPowerState string
// Hook-related flags
var addPreHook string
var addPostHook string
var removeHook string
var clearHooks bool
// Additional VM flags
var deleteVmOnFailMigration bool
var deleteVmOnFailMigrationChanged bool
cmd := &cobra.Command{
Use: "planvm",
Short: "Patch a specific VM within a migration plan",
Long: `Patch VM-specific fields for a VM within a migration plan's VM list.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate required flags
if planName == "" {
return fmt.Errorf("--plan-name is required")
}
if vmName == "" {
return fmt.Errorf("--vm-name is required")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
// Check if boolean flags have been explicitly set (changed from default)
deleteVmOnFailMigrationChanged = cmd.Flags().Changed("delete-vm-on-fail-migration")
return plan.PatchPlanVM(kubeConfigFlags, planName, vmName, namespace,
targetName, rootDisk, instanceType, pvcNameTemplate, volumeNameTemplate, networkNameTemplate, luksSecret, targetPowerState,
addPreHook, addPostHook, removeHook, clearHooks, deleteVmOnFailMigration, deleteVmOnFailMigrationChanged)
},
}
cmd.Flags().StringVar(&planName, "plan-name", "", "Plan name")
_ = cmd.MarkFlagRequired("plan-name")
cmd.Flags().StringVar(&vmName, "vm-name", "", "VM name")
_ = cmd.MarkFlagRequired("vm-name")
// VM-specific flags
cmd.Flags().StringVar(&targetName, "target-name", "", "Custom name for the VM in the target cluster")
cmd.Flags().StringVar(&rootDisk, "root-disk", "", "The primary disk to boot from")
cmd.Flags().StringVar(&instanceType, "instance-type", "", "Override the VM's instance type in the target")
cmd.Flags().StringVar(&pvcNameTemplate, "pvc-name-template", "", "Go template for naming PVCs for this VM's disks. Variables: {{.VmName}}, {{.PlanName}}, {{.DiskIndex}}, {{.WinDriveLetter}}, {{.RootDiskIndex}}, {{.Shared}}, {{.FileName}}")
cmd.Flags().StringVar(&volumeNameTemplate, "volume-name-template", "", "Go template for naming volume interfaces. Variables: {{.PVCName}}, {{.VolumeIndex}}")
cmd.Flags().StringVar(&networkNameTemplate, "network-name-template", "", "Go template for naming network interfaces. Variables: {{.NetworkName}}, {{.NetworkNamespace}}, {{.NetworkType}}, {{.NetworkIndex}}")
cmd.Flags().StringVar(&luksSecret, "luks-secret", "", "Kubernetes Secret name containing LUKS disk decryption keys")
cmd.Flags().StringVar(&targetPowerState, "target-power-state", "", "Target power state for this VM after migration: 'on', 'off', or 'auto' (default: match source VM power state)")
// Hook-related flags
cmd.Flags().StringVar(&addPreHook, "add-pre-hook", "", "Add a pre-migration hook to this VM")
cmd.Flags().StringVar(&addPostHook, "add-post-hook", "", "Add a post-migration hook to this VM")
cmd.Flags().StringVar(&removeHook, "remove-hook", "", "Remove a hook from this VM by hook name")
cmd.Flags().BoolVar(&clearHooks, "clear-hooks", false, "Remove all hooks from this VM")
// Additional VM flags
cmd.Flags().BoolVar(&deleteVmOnFailMigration, "delete-vm-on-fail-migration", false, "Delete target VM when migration fails (overrides plan-level setting)")
// Add completion for hook flags
if err := cmd.RegisterFlagCompletionFunc("add-pre-hook", completion.HookResourceNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("add-post-hook", completion.HookResourceNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
if err := cmd.RegisterFlagCompletionFunc("remove-hook", completion.HookResourceNameCompletion(kubeConfigFlags)); err != nil {
panic(err)
}
// Add completion for target power state flag
if err := cmd.RegisterFlagCompletionFunc("target-power-state", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"on", "off", "auto"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
_ = cmd.RegisterFlagCompletionFunc("plan-name", completion.PlanNameCompletion(kubeConfigFlags))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/patch/provider.go | Go | package patch
import (
"fmt"
"os"
"strings"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/patch/provider"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewProviderCmd creates the patch provider command
func NewProviderCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
opts := provider.PatchProviderOptions{
ConfigFlags: kubeConfigFlags,
}
// Check if MTV_VDDK_INIT_IMAGE environment variable is set
if envVddkInitImage := os.Getenv("MTV_VDDK_INIT_IMAGE"); envVddkInitImage != "" {
opts.VddkInitImage = envVddkInitImage
}
cmd := &cobra.Command{
Use: "provider",
Short: "Patch an existing provider",
Long: `Patch an existing provider by updating URL, credentials, or VDDK settings. Type and SDK endpoint cannot be changed.`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate required --name flag
if opts.Name == "" {
return fmt.Errorf("--name is required")
}
// Resolve the appropriate namespace based on context and flags
opts.Namespace = client.ResolveNamespace(kubeConfigFlags)
// Check if cacert starts with @ and load from file if so
if strings.HasPrefix(opts.CACert, "@") {
filePath := opts.CACert[1:]
fileContent, err := os.ReadFile(filePath)
if err != nil {
return fmt.Errorf("failed to read CA certificate file '%s': %v", filePath, err)
}
opts.CACert = string(fileContent)
}
// Set flag change tracking
opts.InsecureSkipTLSChanged = cmd.Flag("provider-insecure-skip-tls").Changed
opts.UseVddkAioOptimizationChanged = cmd.Flag("use-vddk-aio-optimization").Changed
return provider.PatchProvider(opts)
},
}
// Provider name (required)
cmd.Flags().StringVarP(&opts.Name, "name", "M", "", "Provider name")
_ = cmd.MarkFlagRequired("name")
// Editable provider flags
cmd.Flags().StringVarP(&opts.URL, "url", "U", "", "Provider URL")
cmd.Flags().StringVarP(&opts.Username, "username", "u", "", "Provider credentials username")
cmd.Flags().StringVarP(&opts.Password, "password", "p", "", "Provider credentials password")
cmd.Flags().StringVar(&opts.CACert, "cacert", "", "Provider CA certificate (use @filename to load from file)")
cmd.Flags().BoolVar(&opts.InsecureSkipTLS, "provider-insecure-skip-tls", false, "Skip TLS verification when connecting to the provider")
// OpenShift specific flags
cmd.Flags().StringVarP(&opts.Token, "provider-token", "T", "", "Provider authentication token")
// vSphere specific flags (editable VDDK settings)
cmd.Flags().StringVar(&opts.VddkInitImage, "vddk-init-image", "", "Virtual Disk Development Kit (VDDK) container init image path")
cmd.Flags().BoolVar(&opts.UseVddkAioOptimization, "use-vddk-aio-optimization", false, "Enable VDDK AIO optimization for improved disk transfer performance")
cmd.Flags().IntVar(&opts.VddkBufSizeIn64K, "vddk-buf-size-in-64k", 0, "VDDK buffer size in 64K units (VixDiskLib.nfcAio.Session.BufSizeIn64K)")
cmd.Flags().IntVar(&opts.VddkBufCount, "vddk-buf-count", 0, "VDDK buffer count (VixDiskLib.nfcAio.Session.BufCount)")
// OpenStack specific flags
cmd.Flags().StringVar(&opts.DomainName, "provider-domain-name", "", "OpenStack domain name")
cmd.Flags().StringVar(&opts.ProjectName, "provider-project-name", "", "OpenStack project name")
cmd.Flags().StringVar(&opts.RegionName, "provider-region-name", "", "OpenStack region name")
cmd.Flags().StringVar(&opts.RegionName, "region", "", "Region name (alias for --provider-region-name)")
// HyperV specific flags
cmd.Flags().StringVar(&opts.SMBUrl, "smb-url", "", "SMB share URL for HyperV (e.g., //server/share)")
cmd.Flags().StringVar(&opts.SMBUser, "smb-user", "", "SMB username (defaults to HyperV username)")
cmd.Flags().StringVar(&opts.SMBPassword, "smb-password", "", "SMB password (defaults to HyperV password)")
// EC2 specific flags
cmd.Flags().StringVar(&opts.EC2Region, "ec2-region", "", "AWS region where source EC2 instances are located")
cmd.Flags().StringVar(&opts.EC2TargetRegion, "target-region", "", "Target region for migrations (defaults to provider region)")
cmd.Flags().StringVar(&opts.EC2TargetAZ, "target-az", "", "Target availability zone for migrations (required - EBS volumes are AZ-specific)")
cmd.Flags().StringVar(&opts.EC2TargetAccessKeyID, "target-access-key-id", "", "Target AWS account access key ID (for cross-account migrations)")
cmd.Flags().StringVar(&opts.EC2TargetSecretKey, "target-secret-access-key", "", "Target AWS account secret access key (for cross-account migrations)")
cmd.Flags().BoolVar(&opts.AutoTargetCredentials, "auto-target-credentials", false, "Automatically fetch target AWS credentials from cluster and target-az from worker nodes")
_ = cmd.RegisterFlagCompletionFunc("name", completion.ProviderNameCompletion(kubeConfigFlags))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/settings/set.go | Go | package settings
import (
"context"
"fmt"
"sort"
"strings"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/settings"
)
// NewSetCmd creates the 'settings set' subcommand.
func NewSetCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
var settingName string
var settingValue string
cmd := &cobra.Command{
Use: "set",
Short: "Set a ForkliftController setting value",
Long: `Set a ForkliftController setting value.
The setting name must be one of the supported settings. Use 'kubectl mtv settings'
to see all available settings and their current values.
Value types are automatically validated:
- Boolean settings accept: true, false, yes, no, 1, 0
- Integer settings accept: numeric values
- String settings accept: any value
Examples:
# Set the VDDK image for vSphere migrations
kubectl mtv settings set --setting vddk_image --value quay.io/myorg/vddk:8.0
# Increase maximum concurrent VM migrations
kubectl mtv settings set --setting controller_max_vm_inflight --value 30
# Enable OpenShift cross-cluster live migration
kubectl mtv settings set --setting feature_ocp_live_migration --value true
# Increase virt-v2v memory limit for large VMs
kubectl mtv settings set --setting virt_v2v_container_limits_memory --value 16Gi
# Set a value starting with -- (use -- to stop flag parsing)
kubectl mtv settings set --setting virt_v2v_extra_args --value --machine-readable`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithTimeout(cmd.Context(), 30*time.Second)
defer cancel()
opts := settings.SetSettingOptions{
ConfigFlags: kubeConfigFlags,
Name: settingName,
Value: settingValue,
Verbosity: globalConfig.GetVerbosity(),
}
if err := settings.SetSetting(ctx, opts); err != nil {
return err
}
fmt.Printf("Setting '%s' updated to '%s'\n", settingName, settingValue)
return nil
},
}
cmd.Flags().StringVar(&settingName, "setting", "", "Setting name")
cmd.Flags().StringVar(&settingValue, "value", "", "Setting value")
if err := cmd.MarkFlagRequired("setting"); err != nil {
_ = err
}
if err := cmd.MarkFlagRequired("value"); err != nil {
_ = err
}
_ = cmd.RegisterFlagCompletionFunc("setting", setSettingCompletion)
_ = cmd.RegisterFlagCompletionFunc("value", setValueCompletion)
return cmd
}
// setSettingCompletion provides completion for the --setting flag.
func setSettingCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var completions []string
for name := range settings.SupportedSettings {
if strings.HasPrefix(name, toComplete) {
completions = append(completions, name)
}
}
sort.Strings(completions)
return completions, cobra.ShellCompDirectiveNoFileComp
}
// setValueCompletion provides completion for the --value flag based on the --setting flag.
func setValueCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
settingName, _ := cmd.Flags().GetString("setting")
def := settings.GetSettingDefinition(settingName)
if def == nil {
return nil, cobra.ShellCompDirectiveNoFileComp
}
// Provide value completions based on type
switch def.Type {
case settings.TypeBool:
return []string{"true", "false"}, cobra.ShellCompDirectiveNoFileComp
case settings.TypeInt:
// For integers, suggest common values based on the setting
switch def.Name {
case "controller_max_vm_inflight":
return []string{"10", "20", "30", "50", "100"}, cobra.ShellCompDirectiveNoFileComp
case "controller_precopy_interval":
return []string{"30", "60", "120", "180"}, cobra.ShellCompDirectiveNoFileComp
case "controller_log_level":
return []string{"0", "1", "2", "3", "4", "5"}, cobra.ShellCompDirectiveNoFileComp
}
case settings.TypeString:
// For string settings like resource limits, suggest common values
switch def.Name {
case "virt_v2v_container_limits_cpu":
return []string{"2000m", "4000m", "6000m", "8000m"}, cobra.ShellCompDirectiveNoFileComp
case "virt_v2v_container_limits_memory":
return []string{"4Gi", "8Gi", "12Gi", "16Gi", "32Gi"}, cobra.ShellCompDirectiveNoFileComp
case "virt_v2v_container_requests_cpu":
return []string{"500m", "1000m", "2000m"}, cobra.ShellCompDirectiveNoFileComp
case "virt_v2v_container_requests_memory":
return []string{"512Mi", "1Gi", "2Gi", "4Gi"}, cobra.ShellCompDirectiveNoFileComp
case "populator_container_limits_cpu":
return []string{"500m", "1000m", "2000m"}, cobra.ShellCompDirectiveNoFileComp
case "populator_container_limits_memory":
return []string{"512Mi", "1Gi", "2Gi"}, cobra.ShellCompDirectiveNoFileComp
case "populator_container_requests_cpu":
return []string{"50m", "100m", "200m"}, cobra.ShellCompDirectiveNoFileComp
case "populator_container_requests_memory":
return []string{"256Mi", "512Mi", "1Gi"}, cobra.ShellCompDirectiveNoFileComp
}
}
return nil, cobra.ShellCompDirectiveNoFileComp
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/settings/settings.go | Go | package settings
import (
"context"
"encoding/json"
"fmt"
"sort"
"strings"
"text/tabwriter"
"time"
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/settings"
"github.com/yaacov/kubectl-mtv/pkg/util/config"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// GlobalConfigGetter is a type alias for the shared config interface.
type GlobalConfigGetter = config.GlobalConfigGetter
// NewSettingsCmd creates the settings command with subcommands.
func NewSettingsCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var allSettings bool
cmd := &cobra.Command{
Use: "settings",
Short: "View and manage ForkliftController settings",
Long: `View and manage ForkliftController configuration settings.
This command provides access to a curated subset of ForkliftController settings
that users commonly need to configure, including:
- VDDK image for vSphere migrations
- Feature flags (warm migration, copy offload, live migration)
- Performance tuning (max concurrent VMs, precopy interval)
- Container resource settings (virt-v2v, populator)
- Debugging options (log level)
Use --all to see all available ForkliftController settings, including advanced
options for controller, inventory, API, validation, and other components.
Examples:
# View common settings
kubectl mtv settings
# View ALL settings (including advanced)
kubectl mtv settings --all
# View settings in YAML format
kubectl mtv settings --output yaml
# Get a specific setting
kubectl mtv settings get --setting vddk_image
# Set a value
kubectl mtv settings set --setting vddk_image --value quay.io/myorg/vddk:8.0
kubectl mtv settings set --setting controller_max_vm_inflight --value 30
kubectl mtv settings set --setting feature_ocp_live_migration --value true`,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Default action: show all settings
ctx, cancel := context.WithTimeout(cmd.Context(), 30*time.Second)
defer cancel()
opts := settings.GetSettingsOptions{
ConfigFlags: kubeConfigFlags,
AllSettings: allSettings,
}
settingValues, err := settings.GetSettings(ctx, opts)
if err != nil {
return err
}
return formatOutput(settingValues, outputFormatFlag.GetValue())
},
}
// Add output format flag
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (json, yaml, table)")
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
// Silently ignore completion registration errors
_ = err
}
// Add --all flag
cmd.Flags().BoolVar(&allSettings, "all", false, "Show all ForkliftController settings (not just common ones)")
// Add subcommands
cmd.AddCommand(newGetCmd(kubeConfigFlags, globalConfig))
cmd.AddCommand(NewSetCmd(kubeConfigFlags, globalConfig))
cmd.AddCommand(NewUnsetCmd(kubeConfigFlags, globalConfig))
return cmd
}
// newGetCmd creates the 'settings get' subcommand.
func newGetCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
outputFormatFlag := flags.NewOutputFormatTypeFlag()
var allSettings bool
var settingName string
cmd := &cobra.Command{
Use: "get",
Short: "Get ForkliftController setting value(s)",
Long: `Get the current value of one or more ForkliftController settings.
If no setting name is provided, all settings are displayed.
If a setting name is provided, only that setting's value is shown.
Use --all to see all available ForkliftController settings, including advanced
options for controller, inventory, API, validation, and other components.
Examples:
# Get common settings
kubectl mtv settings get
# Get ALL settings (including advanced)
kubectl mtv settings get --all
# Get a specific setting
kubectl mtv settings get --setting vddk_image
kubectl mtv settings get --setting controller_max_vm_inflight
kubectl mtv settings get --setting controller_container_limits_cpu`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithTimeout(cmd.Context(), 30*time.Second)
defer cancel()
opts := settings.GetSettingsOptions{
ConfigFlags: kubeConfigFlags,
AllSettings: allSettings,
SettingName: settingName,
}
settingValues, err := settings.GetSettings(ctx, opts)
if err != nil {
return err
}
// If getting a single setting, just print the value
if opts.SettingName != "" && outputFormatFlag.GetValue() == "table" {
if len(settingValues) > 0 {
fmt.Println(settings.FormatValue(settingValues[0]))
}
return nil
}
return formatOutput(settingValues, outputFormatFlag.GetValue())
},
}
// Add output format flag
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (json, yaml, table)")
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
_ = err
}
// Add --setting flag for getting a specific setting
cmd.Flags().StringVar(&settingName, "setting", "", "Setting name to get (if not provided, shows all settings)")
// Add --all flag
cmd.Flags().BoolVar(&allSettings, "all", false, "Show all ForkliftController settings (not just common ones)")
_ = cmd.RegisterFlagCompletionFunc("setting", getSettingCompletion)
return cmd
}
// getSettingCompletion provides completion for the --setting flag in 'settings get'.
func getSettingCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var completions []string
for name := range settings.SupportedSettings {
if strings.HasPrefix(name, toComplete) {
completions = append(completions, name)
}
}
sort.Strings(completions)
return completions, cobra.ShellCompDirectiveNoFileComp
}
// formatOutput formats the settings output.
func formatOutput(settingValues []settings.SettingValue, format string) error {
switch format {
case "json":
return formatJSON(settingValues)
case "yaml":
return formatYAML(settingValues)
default:
return formatTable(settingValues)
}
}
// formatTable formats settings as a table.
func formatTable(settingValues []settings.SettingValue) error {
// Create a strings.Builder to write to
var sb strings.Builder
w := tabwriter.NewWriter(&sb, 0, 0, 2, ' ', 0)
// Print header
fmt.Fprintln(w, "CATEGORY\tSETTING\tVALUE\tDEFAULT")
// Group by category and print
for _, sv := range settingValues {
value := settings.FormatValue(sv)
defaultVal := settings.FormatDefault(sv.Definition)
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n",
sv.Definition.Category,
sv.Name,
value,
defaultVal,
)
}
w.Flush()
fmt.Print(sb.String())
return nil
}
// settingOutput is used for JSON/YAML output.
type settingOutput struct {
Name string `json:"name" yaml:"name"`
Value interface{} `json:"value" yaml:"value"`
Default interface{} `json:"default" yaml:"default"`
IsSet bool `json:"isSet" yaml:"isSet"`
Category string `json:"category" yaml:"category"`
Description string `json:"description" yaml:"description"`
}
// formatJSON formats settings as JSON.
func formatJSON(settingValues []settings.SettingValue) error {
output := make([]settingOutput, 0, len(settingValues))
for _, sv := range settingValues {
value := sv.Value
if !sv.IsSet {
value = nil
}
output = append(output, settingOutput{
Name: sv.Name,
Value: value,
Default: sv.Default,
IsSet: sv.IsSet,
Category: string(sv.Definition.Category),
Description: sv.Definition.Description,
})
}
data, err := json.MarshalIndent(output, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal JSON: %w", err)
}
fmt.Println(string(data))
return nil
}
// formatYAML formats settings as YAML.
func formatYAML(settingValues []settings.SettingValue) error {
output := make([]settingOutput, 0, len(settingValues))
for _, sv := range settingValues {
value := sv.Value
if !sv.IsSet {
value = nil
}
output = append(output, settingOutput{
Name: sv.Name,
Value: value,
Default: sv.Default,
IsSet: sv.IsSet,
Category: string(sv.Definition.Category),
Description: sv.Definition.Description,
})
}
data, err := yaml.Marshal(output)
if err != nil {
return fmt.Errorf("failed to marshal YAML: %w", err)
}
fmt.Print(string(data))
return nil
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/settings/unset.go | Go | package settings
import (
"context"
"fmt"
"sort"
"strings"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/settings"
)
// NewUnsetCmd creates the 'settings unset' subcommand.
func NewUnsetCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig GlobalConfigGetter) *cobra.Command {
var settingName string
cmd := &cobra.Command{
Use: "unset",
Short: "Remove a ForkliftController setting (revert to default)",
Long: `Remove a ForkliftController setting, reverting it to the default value.
This removes the setting from the ForkliftController spec, causing the controller
to use its default value instead.
Examples:
# Remove the VDDK image setting (revert to default)
kubectl mtv settings unset --setting vddk_image
# Remove extra virt-v2v arguments
kubectl mtv settings unset --setting virt_v2v_extra_args
# Revert max concurrent VMs to default (20)
kubectl mtv settings unset --setting controller_max_vm_inflight`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithTimeout(cmd.Context(), 30*time.Second)
defer cancel()
opts := settings.UnsetSettingOptions{
ConfigFlags: kubeConfigFlags,
Name: settingName,
Verbosity: globalConfig.GetVerbosity(),
}
if err := settings.UnsetSetting(ctx, opts); err != nil {
return err
}
def := settings.GetSettingDefinition(settingName)
if def != nil {
fmt.Printf("Setting '%s' removed (will use default: %s)\n", settingName, settings.FormatDefault(*def))
} else {
fmt.Printf("Setting '%s' removed\n", settingName)
}
return nil
},
}
cmd.Flags().StringVar(&settingName, "setting", "", "Setting name")
if err := cmd.MarkFlagRequired("setting"); err != nil {
_ = err
}
_ = cmd.RegisterFlagCompletionFunc("setting", unsetSettingCompletion)
return cmd
}
// unsetSettingCompletion provides completion for the --setting flag.
func unsetSettingCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var completions []string
for name := range settings.SupportedSettings {
if strings.HasPrefix(name, toComplete) {
completions = append(completions, name)
}
}
sort.Strings(completions)
return completions, cobra.ShellCompDirectiveNoFileComp
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/start/plan.go | Go | package start
import (
"errors"
"fmt"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/cmd/get"
"github.com/yaacov/kubectl-mtv/pkg/cmd/start/plan"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewPlanCmd creates the plan start command
func NewPlanCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig get.GlobalConfigGetter) *cobra.Command {
var cutoverTimeStr string
var all bool
var dryRun bool
var outputFormat string
var planNames []string
cmd := &cobra.Command{
Use: "plan",
Short: "Start one or more migration plans",
Long: `Start one or more migration plans.
For cold migrations, the migration begins immediately. For warm migrations,
you can optionally specify a cutover time; if not provided, cutover defaults
to 1 hour from the start time.
The plan must be in a 'Ready' state to be started.
Use --dry-run to output the Migration CR(s) to stdout instead of creating
them in Kubernetes. This is useful for debugging, validation, and inspection.`,
Example: ` # Start a migration plan
kubectl-mtv start plan --name my-migration
# Start multiple plans
kubectl-mtv start plans --name plan1,plan2,plan3
# Start all plans in the namespace
kubectl-mtv start plans --all
# Start with scheduled cutover (warm migration)
kubectl-mtv start plan --name my-migration --cutover 2026-12-31T23:00:00Z
# Start warm migration with cutover in 2 hours (Linux)
kubectl-mtv start plan --name my-migration --cutover "$(date -d '+2 hours' --iso-8601=sec)"
# Dry-run: output Migration CR to stdout (YAML format)
kubectl-mtv start plan --name my-migration --dry-run
# Dry-run: output Migration CR in JSON format
kubectl-mtv start plan --name my-migration --dry-run --output json
# Dry-run: output all Migration CRs in namespace
kubectl-mtv start plans --all --dry-run`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate mutual exclusivity of --name and --all
if all && len(planNames) > 0 {
return errors.New("cannot use --name with --all")
}
if !all && len(planNames) == 0 {
return errors.New("must specify --name or --all")
}
// Cache kubeconfig flags for reuse throughout the function
cfg := globalConfig.GetKubeConfigFlags()
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(cfg)
var cutoverTime *time.Time
if cutoverTimeStr != "" {
// Parse the provided cutover time
t, err := time.Parse(time.RFC3339, cutoverTimeStr)
if err != nil {
return fmt.Errorf("failed to parse cutover time: %v", err)
}
cutoverTime = &t
}
if all {
// Get all plan names from the namespace
var err error
planNames, err = client.GetAllPlanNames(cmd.Context(), cfg, namespace)
if err != nil {
return fmt.Errorf("failed to get all plan names: %v", err)
}
if len(planNames) == 0 {
fmt.Fprintf(cmd.OutOrStdout(), "No plans found in namespace %s\n", namespace)
return nil
}
}
// Validate that --output is only used with --dry-run
if !dryRun && outputFormat != "" {
return fmt.Errorf("--output flag can only be used with --dry-run")
}
// Validate output format for dry-run
if dryRun && outputFormat != "" && outputFormat != "json" && outputFormat != "yaml" {
return fmt.Errorf("invalid output format for dry-run: %s. Valid formats are: json, yaml", outputFormat)
}
// Set default output format for dry-run
if dryRun && outputFormat == "" {
outputFormat = "yaml"
}
// Loop over each plan name and start it (dry-run is handled inside plan.Start)
for _, name := range planNames {
if err := plan.Start(cfg, name, namespace, cutoverTime, globalConfig.GetUseUTC(), dryRun, outputFormat); err != nil {
return fmt.Errorf("failed to start plan %q: %w", name, err)
}
}
return nil
},
}
cmd.Flags().StringSliceVarP(&planNames, "name", "M", nil, "Plan name(s) to start (comma-separated, e.g. \"plan1,plan2\")")
cmd.Flags().StringSliceVar(&planNames, "names", nil, "Alias for --name")
_ = cmd.Flags().MarkHidden("names")
cmd.Flags().StringVarP(&cutoverTimeStr, "cutover", "c", "", "Cutover time in ISO8601 format (e.g., 2023-12-31T15:30:00Z, '$(date -d \"+1 hour\" --iso-8601=sec)' ). If not provided, defaults to 1 hour from now.")
cmd.Flags().BoolVar(&all, "all", false, "Start all migration plans in the namespace")
cmd.Flags().BoolVar(&dryRun, "dry-run", false, "Output Migration CR(s) to stdout instead of creating them")
cmd.Flags().StringVarP(&outputFormat, "output", "o", "", "Output format for dry-run (json, yaml). Defaults to yaml when --dry-run is used")
_ = cmd.RegisterFlagCompletionFunc("name", completion.PlanNameCompletion(kubeConfigFlags))
// Add completion for output format flag
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"json", "yaml"}, cobra.ShellCompDirectiveNoFileComp
}); err != nil {
// Ignore completion registration errors - not critical
_ = err
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/start/start.go | Go | package start
import (
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/cmd/get"
)
// NewStartCmd creates the start command with all its subcommands
func NewStartCmd(kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig get.GlobalConfigGetter) *cobra.Command {
cmd := &cobra.Command{
Use: "start",
Short: "Start resources",
Long: `Start various MTV resources`,
SilenceUsage: true,
}
// Add plan subcommand with plural alias
planCmd := NewPlanCmd(kubeConfigFlags, globalConfig)
planCmd.Aliases = []string{"plans"}
cmd.AddCommand(planCmd)
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/unarchive/plan.go | Go | package unarchive
import (
"errors"
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/pkg/cmd/archive/plan"
"github.com/yaacov/kubectl-mtv/pkg/util/client"
"github.com/yaacov/kubectl-mtv/pkg/util/completion"
)
// NewPlanCmd creates the plan unarchive command
func NewPlanCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
var all bool
var planNames []string
cmd := &cobra.Command{
Use: "plan",
Short: "Unarchive one or more migration plans",
Long: `Unarchive one or more migration plans.
Unarchiving restores a previously archived plan, allowing it to be started again.
This is useful if you need to retry a migration or make changes to an archived plan.`,
Example: ` # Unarchive a plan
kubectl-mtv unarchive plan --name my-migration
# Unarchive multiple plans
kubectl-mtv unarchive plans --name plan1,plan2,plan3
# Unarchive all archived plans in the namespace
kubectl-mtv unarchive plans --all`,
Args: cobra.NoArgs,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate mutual exclusivity of --name and --all
if all && len(planNames) > 0 {
return errors.New("cannot use --name with --all")
}
if !all && len(planNames) == 0 {
return errors.New("must specify --name or --all")
}
// Resolve the appropriate namespace based on context and flags
namespace := client.ResolveNamespace(kubeConfigFlags)
if all {
// Get all plan names from the namespace
var err error
planNames, err = client.GetAllPlanNames(cmd.Context(), kubeConfigFlags, namespace)
if err != nil {
return fmt.Errorf("failed to get all plan names: %v", err)
}
if len(planNames) == 0 {
fmt.Printf("No plans found in namespace %s\n", namespace)
return nil
}
}
// Loop over each plan name and unarchive it
for _, name := range planNames {
err := plan.Archive(cmd.Context(), kubeConfigFlags, name, namespace, false) // Set archived to false for unarchiving
if err != nil {
return err
}
}
return nil
},
}
cmd.Flags().StringSliceVarP(&planNames, "name", "M", nil, "Plan name(s) to unarchive (comma-separated, e.g. \"plan1,plan2\")")
cmd.Flags().StringSliceVar(&planNames, "names", nil, "Alias for --name")
_ = cmd.Flags().MarkHidden("names")
cmd.Flags().BoolVar(&all, "all", false, "Unarchive all migration plans in the namespace")
_ = cmd.RegisterFlagCompletionFunc("name", completion.PlanNameCompletion(kubeConfigFlags))
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/unarchive/unarchive.go | Go | package unarchive
import (
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
// NewUnArchiveCmd creates the unarchive command with all its subcommands
func NewUnArchiveCmd(kubeConfigFlags *genericclioptions.ConfigFlags) *cobra.Command {
cmd := &cobra.Command{
Use: "unarchive",
Short: "Un-archive resources",
Long: `Un-archive various MTV resources`,
SilenceUsage: true,
}
// Add plan subcommand with plural alias
planCmd := NewPlanCmd(kubeConfigFlags)
planCmd.Aliases = []string{"plans"}
cmd.AddCommand(planCmd)
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
cmd/version/version.go | Go | package version
import (
"context"
"fmt"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/yaacov/kubectl-mtv/cmd/get"
"github.com/yaacov/kubectl-mtv/pkg/cmd/version"
"github.com/yaacov/kubectl-mtv/pkg/util/flags"
)
// NewVersionCmd creates the version command
func NewVersionCmd(clientVersion string, kubeConfigFlags *genericclioptions.ConfigFlags, globalConfig get.GlobalConfigGetter) *cobra.Command {
var clientOnly bool
outputFormatFlag := flags.NewOutputFormatTypeFlag()
cmd := &cobra.Command{
Use: "version",
Short: "Print the version information",
Long: `Print the version information for kubectl-mtv and MTV Operator.
Use --client to print only the client version without connecting to the cluster.
This is useful for CI/CD pipelines, MCP servers, or when the cluster is unavailable.`,
RunE: func(cmd *cobra.Command, args []string) error {
// If --client flag is set, skip cluster connectivity and return only client version
if clientOnly {
clientInfo := version.Info{
ClientVersion: clientVersion,
}
output, err := clientInfo.FormatOutput(outputFormatFlag.GetValue())
if err != nil {
return err
}
fmt.Print(output)
return nil
}
// Create context with 20s timeout
ctx, cancel := context.WithTimeout(cmd.Context(), 20*time.Second)
defer cancel()
// Get version information (globalConfig handles inventory URL and insecure flag)
versionInfo := version.GetVersionInfo(ctx, clientVersion, kubeConfigFlags, globalConfig)
// Format and output the version information
output, err := versionInfo.FormatOutput(outputFormatFlag.GetValue())
if err != nil {
return err
}
fmt.Print(output)
return nil
},
}
cmd.Flags().BoolVar(&clientOnly, "client", false, "Print only the client version (skip cluster connectivity)")
cmd.Flags().VarP(outputFormatFlag, "output", "o", "Output format (json, yaml, table)")
// Add completion for output format flag
if err := cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return outputFormatFlag.GetValidValues(), cobra.ShellCompDirectiveNoFileComp
}); err != nil {
panic(err)
}
return cmd
}
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/auth/test_auth.py | Python | """Auth -- verify that bearer-token authentication is enforced."""
import pytest
from conftest import (
TEST_NAMESPACE,
MCPToolError,
_make_mcp_session,
call_tool,
)
# A clearly fake token that no Kubernetes cluster would accept.
BAD_TOKEN = "bad-token-this-should-never-authenticate"
# A simple read command used to probe authentication.
GET_PROVIDER_CMD = {
"command": "get provider",
"flags": {"namespace": TEST_NAMESPACE},
}
@pytest.mark.order(4)
async def test_bad_token_rejected(mcp_server_process):
"""Session with an invalid bearer token must fail tool calls."""
bad_headers = {"Authorization": f"Bearer {BAD_TOKEN}"}
async with _make_mcp_session(headers=bad_headers) as session:
with pytest.raises(MCPToolError):
await call_tool(session, "mtv_read", GET_PROVIDER_CMD, verbose=0)
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/conftest.py | Python | """
Root conftest -- shared fixtures, helpers, and constants for MCP E2E tests.
Tests assume a running MCP server is available at MCP_SSE_URL.
Server lifecycle management (start/stop) is handled separately via make
targets, allowing flexible deployment:
- Binary mode: Local kubectl-mtv process
- Container mode: Docker/podman container
- Remote mode: External service
Environment variables are loaded from a .env file if present; otherwise
plain ``os.environ`` is used.
"""
import asyncio as _asyncio
import contextlib
import json
import os
import socket
import subprocess
import time
import urllib.parse
import pytest
import pytest_asyncio
from mcp import ClientSession
from mcp.client.sse import sse_client
# ---------------------------------------------------------------------------
# Environment -- load .env file if present, otherwise use system env vars
# ---------------------------------------------------------------------------
from dotenv import load_dotenv
_env_path = os.path.join(os.path.dirname(__file__), ".env")
if os.path.exists(_env_path):
load_dotenv(_env_path)
# ---------------------------------------------------------------------------
# Required environment variables — fail early with a clear message
# ---------------------------------------------------------------------------
_MISSING: list[str] = []
def _require(name: str) -> str:
"""Return the value of an environment variable or record it as missing."""
val = os.environ.get(name)
if not val:
_MISSING.append(name)
return ""
return val
GOVC_URL: str = _require("GOVC_URL")
GOVC_USERNAME: str = _require("GOVC_USERNAME")
GOVC_PASSWORD: str = _require("GOVC_PASSWORD")
GOVC_INSECURE: str = os.environ.get("GOVC_INSECURE", "true")
KUBE_API_URL: str = _require("KUBE_API_URL")
KUBE_TOKEN: str = _require("KUBE_TOKEN")
# ESXi host credentials (fall back to GOVC_* values if not set explicitly)
ESXI_USERNAME: str = os.environ.get("ESXI_USERNAME") or GOVC_USERNAME
ESXI_PASSWORD: str = os.environ.get("ESXI_PASSWORD") or GOVC_PASSWORD
# Normalize vSphere URL: ensure https:// prefix and /sdk suffix
_raw_govc = GOVC_URL
if not _raw_govc.startswith(("http://", "https://")):
_raw_govc = f"https://{_raw_govc}"
if not _raw_govc.rstrip("/").endswith("/sdk"):
_raw_govc = _raw_govc.rstrip("/") + "/sdk"
VSPHERE_URL: str = _raw_govc
# ---------------------------------------------------------------------------
# MCP server settings -- tests connect to a running server
# ---------------------------------------------------------------------------
MCP_SSE_HOST: str = os.environ.get("MCP_SSE_HOST", "127.0.0.1")
MCP_SSE_PORT: str = os.environ.get("MCP_SSE_PORT", "18443")
MCP_SSE_URL: str = os.environ.get(
"MCP_SSE_URL",
f"http://{MCP_SSE_HOST}:{MCP_SSE_PORT}/sse"
)
# Server management settings (used by make targets, not by tests)
MTV_BINARY: str = os.environ.get(
"MTV_BINARY",
os.path.join(os.path.dirname(__file__), "..", "..", "kubectl-mtv"),
)
MCP_IMAGE: str = os.environ.get("MCP_IMAGE", "")
CONTAINER_ENGINE: str = os.environ.get("CONTAINER_ENGINE", "")
# ---------------------------------------------------------------------------
# Test resources — environment-specific, REQUIRED (no defaults)
# ---------------------------------------------------------------------------
ESXI_HOST_NAME: str = _require("ESXI_HOST_NAME")
COLD_VMS: str = _require("COLD_VMS")
WARM_VMS: str = _require("WARM_VMS")
NETWORK_PAIRS: str = _require("NETWORK_PAIRS")
STORAGE_PAIRS: str = _require("STORAGE_PAIRS")
# ---------------------------------------------------------------------------
# Test naming — have sensible defaults, override only if needed
# ---------------------------------------------------------------------------
TEST_NAMESPACE: str = os.environ.get("TEST_NAMESPACE", "mcp-e2e-test")
VSPHERE_PROVIDER_NAME: str = os.environ.get("VSPHERE_PROVIDER_NAME", "mcp-e2e-vsphere")
OCP_PROVIDER_NAME: str = os.environ.get("OCP_PROVIDER_NAME", "mcp-e2e-host")
COLD_PLAN_NAME: str = os.environ.get("COLD_PLAN_NAME", "mcp-e2e-cold-plan")
WARM_PLAN_NAME: str = os.environ.get("WARM_PLAN_NAME", "mcp-e2e-warm-plan")
# ---------------------------------------------------------------------------
# Fail fast if any required variables are missing
# ---------------------------------------------------------------------------
if _MISSING:
raise EnvironmentError(
"The following required environment variables are not set:\n"
+ "".join(f" - {v}\n" for v in _MISSING)
+ "\nCopy e2e/mcp/env.example to .env and fill in real values, "
"or export them in your shell."
)
# Derived from NETWORK_PAIRS / STORAGE_PAIRS -- the source-side names the
# inventory *must* contain for the migration plans to work.
TEST_NETWORK_NAME = NETWORK_PAIRS.split(":")[0] # e.g. "VM Network"
TEST_DATASTORE_NAME = STORAGE_PAIRS.split(":")[0] # e.g. "nfs-us-mtv-v8"
# ---------------------------------------------------------------------------
# Custom exception
# ---------------------------------------------------------------------------
class MCPToolError(Exception):
"""Raised when an MCP tool call returns an error."""
def __init__(self, tool: str, message: str):
self.tool = tool
super().__init__(f"MCP tool '{tool}' error: {message}")
# ---------------------------------------------------------------------------
# Helper: call an MCP tool and return parsed result
# ---------------------------------------------------------------------------
# Default verbosity for MCP tool calls (0=silent, 1=info, 2=debug, 3=trace)
MCP_VERBOSE: int = int(os.environ.get("MCP_VERBOSE", "1"))
def _redact_secrets(obj, secret_keys: set):
"""Return a deep copy of *obj* with values for *secret_keys* replaced."""
if isinstance(obj, dict):
return {
k: ("***" if k in secret_keys else _redact_secrets(v, secret_keys))
for k, v in obj.items()
}
if isinstance(obj, list):
return [_redact_secrets(v, secret_keys) for v in obj]
return obj
async def call_tool(
session: ClientSession,
tool_name: str,
arguments: dict,
*,
verbose: int | None = None,
) -> dict:
"""Call an MCP tool via the SSE session and return the parsed response.
The kubectl-mtv MCP server returns a JSON envelope::
{"return_value": 0, "data": [...]} # structured (json output)
{"return_value": 0, "output": "..."} # text (table / yaml / health)
This helper extracts that envelope. On ``isError`` it raises
:class:`MCPToolError`.
Args:
verbose: Override the default verbosity (``MCP_VERBOSE`` env var).
When >= 1, the tool name and arguments are printed before the
call, and a summary of the response is printed after.
"""
level = verbose if verbose is not None else MCP_VERBOSE
# Inject verbose into flags when the tool supports it
if "flags" in arguments and level > 0:
arguments = {**arguments, "flags": {**arguments["flags"], "verbose": level}}
_secret_keys = {"password", "token"}
if level >= 2:
sanitized = _redact_secrets(arguments, _secret_keys)
print(f"\n [call] {tool_name} {json.dumps(sanitized, indent=2)}")
elif level >= 1:
cmd = arguments.get("command", arguments.get("action", ""))
flags_summary = {
k: v for k, v in arguments.get("flags", {}).items()
if k not in _secret_keys and k != "verbose"
}
print(f"\n [call] {tool_name} {cmd} {flags_summary}")
result = await session.call_tool(tool_name, arguments)
if result.isError:
parts = []
for content in result.content:
if hasattr(content, "text"):
parts.append(content.text)
error_msg = "\n".join(parts)
if level >= 1:
# Strip klog noise so the real error is visible
meaningful = "\n".join(
ln for ln in error_msg.splitlines()
if not ln.lstrip().startswith("I0") and ln.strip()
)
print(f" [error] {tool_name}: {meaningful[:800]}")
raise MCPToolError(tool_name, error_msg)
# Prefer structuredContent (Go MCP SDK populates this from output schema)
if result.structuredContent is not None:
parsed = result.structuredContent
else:
# Fall back to parsing the first text content block as JSON
parsed = {}
for content in result.content:
if hasattr(content, "text"):
try:
parsed = json.loads(content.text)
except json.JSONDecodeError:
parsed = {"output": content.text, "return_value": 0}
break
if level >= 2:
preview = json.dumps(parsed, indent=2)[:800]
print(f" [result] {preview}")
elif level >= 1:
rc = parsed.get("return_value", "?")
data_len = len(parsed.get("data", []))
out_len = len(parsed.get("output", ""))
print(f" [result] return_value={rc} data_items={data_len} output_len={out_len}")
return parsed
# ---------------------------------------------------------------------------
# Helper: kubectl wait
# ---------------------------------------------------------------------------
def _kubectl_wait(
resource: str | list[str],
condition: str,
*,
namespace: str | None = None,
timeout: int = 120,
) -> subprocess.CompletedProcess:
"""Run ``kubectl wait --for=<condition>`` and return the result.
Args:
resource: One or more resources, e.g. ``"namespace/foo"`` or
``["providers.forklift.konveyor.io/a", "...b"]``.
condition: The ``--for`` value, e.g. ``"delete"`` or
``"jsonpath={.status.phase}=Ready"``.
namespace: Kubernetes namespace (omit for cluster-scoped resources).
timeout: Maximum seconds to wait.
Raises:
RuntimeError: If ``kubectl wait`` exits with a non-zero code.
"""
if isinstance(resource, str):
resource = [resource]
cmd = _kubectl_base_args() + [
"wait", *resource,
f"--for={condition}",
f"--timeout={timeout}s",
]
if namespace:
cmd += ["-n", namespace]
r = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout + 30)
if r.returncode != 0:
raise RuntimeError(
f"kubectl wait failed (rc={r.returncode}): {r.stderr.strip()}"
)
return r
# ---------------------------------------------------------------------------
# Helper: retry a CLI command until it succeeds
# ---------------------------------------------------------------------------
def _retry_command(
cmd: list[str],
*,
timeout: int = 180,
interval: int = 10,
description: str = "command",
) -> subprocess.CompletedProcess:
"""Retry *cmd* until it exits 0 or *timeout* seconds elapse.
Use this for application-level readiness checks where no Kubernetes
resource condition exists (e.g. waiting for the inventory service to
start serving requests after a provider becomes Ready).
Raises:
RuntimeError: If the command never succeeds within *timeout*.
"""
deadline = time.monotonic() + timeout
last_result = None
while time.monotonic() < deadline:
last_result = subprocess.run(
cmd, capture_output=True, text=True, timeout=30,
)
if last_result.returncode == 0:
return last_result
time.sleep(interval)
stderr = last_result.stderr.strip() if last_result else "(no output)"
raise RuntimeError(
f"Timed out after {timeout}s waiting for {description}: {stderr}"
)
# ---------------------------------------------------------------------------
# Helper: kubectl namespace management (direct subprocess, not via MCP)
# ---------------------------------------------------------------------------
def _kubectl_base_args() -> list[str]:
return [
"kubectl",
"--server", KUBE_API_URL,
"--token", KUBE_TOKEN,
"--insecure-skip-tls-verify",
]
def _mtv_base_args() -> list[str]:
return [
MTV_BINARY,
"--server", KUBE_API_URL,
"--token", KUBE_TOKEN,
"--insecure-skip-tls-verify",
]
def _create_namespace(name: str) -> None:
cmd = _kubectl_base_args() + ["create", "namespace", name]
r = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
if r.returncode != 0 and "already exists" not in r.stderr:
raise RuntimeError(f"Failed to create namespace {name}: {r.stderr}")
def _delete_namespace(name: str) -> None:
cmd = _kubectl_base_args() + ["delete", "namespace", name, "--ignore-not-found"]
subprocess.run(cmd, capture_output=True, text=True, timeout=120)
# ---------------------------------------------------------------------------
# Helper: verify MCP server is reachable
# ---------------------------------------------------------------------------
def _verify_server_reachable() -> None:
"""Verify the MCP SSE server is reachable at the configured URL.
Raises:
RuntimeError: If the server is not reachable.
"""
parsed = urllib.parse.urlparse(MCP_SSE_URL)
host = parsed.hostname or "127.0.0.1"
port = parsed.port or 80
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(5)
if s.connect_ex((host, port)) != 0:
raise RuntimeError(
f"MCP server is not reachable at {MCP_SSE_URL}\n"
f"Please start the server first:\n"
f" make server-start # Start binary mode server\n"
f" make server-start-image # Start container mode server\n"
f"Or use an existing server by setting MCP_SSE_URL"
)
# ---------------------------------------------------------------------------
# Fixture: verify server is running (session-scoped, runs once)
# ---------------------------------------------------------------------------
@pytest.fixture(scope="session")
def mcp_server_process():
"""Verify MCP server is reachable before tests start.
Tests assume an external process is managing the server lifecycle.
Use make targets to start/stop the server:
- make server-start (binary mode)
- make server-start-image (container mode)
- make server-stop
"""
_verify_server_reachable()
print(f"\n[Connected] MCP server at {MCP_SSE_URL}")
yield None # No process to manage
# No teardown - server lifecycle is managed externally
# ---------------------------------------------------------------------------
# Fixture: MCP client session over SSE (session-scoped)
# ---------------------------------------------------------------------------
@contextlib.asynccontextmanager
async def _safe_sse_client(*args, **kwargs):
"""Wrap ``sse_client`` and suppress the harmless anyio cancel-scope error.
During pytest-asyncio session-scoped fixture teardown the event loop
may finalize the ``sse_client`` context manager in a different task
than the one that created it, causing::
RuntimeError: Attempted to exit cancel scope in a different task …
This wrapper catches that specific error so the test run exits cleanly.
"""
try:
async with sse_client(*args, **kwargs) as streams:
yield streams
except RuntimeError as exc:
if "cancel scope" in str(exc):
pass # harmless teardown race
else:
raise
except BaseException:
raise
# ---------------------------------------------------------------------------
# Helper: create an MCP session with custom headers
# ---------------------------------------------------------------------------
@contextlib.asynccontextmanager
async def _make_mcp_session(headers=None):
"""Create an MCP SSE session with arbitrary headers.
Used by the ``mcp_session`` fixture (with the real token) and by
auth tests (with a bad or missing token).
"""
async with _safe_sse_client(
MCP_SSE_URL, headers=headers, timeout=30, sse_read_timeout=120,
) as (read_stream, write_stream):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
yield session
@pytest_asyncio.fixture(loop_scope="session", scope="session")
async def mcp_session(mcp_server_process):
"""Connect to the running MCP SSE server and yield a ClientSession.
Authentication is sent via the ``Authorization: Bearer`` header on
every HTTP request — the server itself holds no token.
"""
headers = {"Authorization": f"Bearer {KUBE_TOKEN}"}
async with _make_mcp_session(headers=headers) as session:
yield session
# ---------------------------------------------------------------------------
# Fixture: create / destroy the test namespace (session-scoped, autouse)
# ---------------------------------------------------------------------------
@pytest_asyncio.fixture(loop_scope="session", scope="session", autouse=True)
async def cleanup_test_resources(mcp_session):
"""Clean up all test resources after the session completes."""
yield
# --- TEARDOWN (best-effort, reverse order) ---
# Delete plans
try:
await call_tool(mcp_session, "mtv_write", {
"command": "delete plan",
"flags": {
"name": f"{COLD_PLAN_NAME},{WARM_PLAN_NAME}",
"namespace": TEST_NAMESPACE,
"skip-archive": True,
},
})
except Exception:
pass
# Delete host -- discover the K8s resource name(s) first, since
# create host generates names like "{inventoryID}-{hash}".
try:
result = await call_tool(mcp_session, "mtv_read", {
"command": "get host",
"flags": {"namespace": TEST_NAMESPACE, "output": "json"},
})
data = result.get("data", [])
hosts = data if isinstance(data, list) else [data]
host_names = [
h.get("name") or h.get("metadata", {}).get("name", "")
for h in hosts
if (h.get("name") or h.get("metadata", {}).get("name", "")).startswith(ESXI_HOST_NAME)
]
if host_names:
await call_tool(mcp_session, "mtv_write", {
"command": "delete host",
"flags": {
"name": ",".join(host_names),
"namespace": TEST_NAMESPACE,
},
})
except Exception:
pass
# Delete providers
try:
await call_tool(mcp_session, "mtv_write", {
"command": "delete provider",
"flags": {
"name": f"{VSPHERE_PROVIDER_NAME},{OCP_PROVIDER_NAME}",
"namespace": TEST_NAMESPACE,
},
})
except Exception:
pass
# Delete namespace last (subprocess -- no async context needed)
try:
_delete_namespace(TEST_NAMESPACE)
except Exception:
pass
# Allow the event loop a moment to drain before session fixtures tear down
await _asyncio.sleep(0.5)
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/health/test_check.py | Python | """Health · read -- verify the MTV health report."""
import pytest
from conftest import (
OCP_PROVIDER_NAME,
TEST_NAMESPACE,
VSPHERE_PROVIDER_NAME,
call_tool,
)
@pytest.mark.order(70)
async def test_health_report(mcp_session):
"""Run the health command scoped to the test namespace."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "health",
"flags": {"namespace": TEST_NAMESPACE, "skip-logs": True},
})
output = result.get("output", "")
assert result.get("return_value") == 0, f"Health check failed: {result}"
assert "HEALTH REPORT" in output, f"Expected 'HEALTH REPORT' in output: {output[:200]}"
assert VSPHERE_PROVIDER_NAME in output or OCP_PROVIDER_NAME in output, (
"Health report should mention test providers"
)
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/hosts/test_create.py | Python | """Hosts · write -- create an ESXi host resource."""
import pytest
from conftest import (
ESXI_HOST_NAME,
ESXI_USERNAME,
ESXI_PASSWORD,
TEST_NAMESPACE,
VSPHERE_PROVIDER_NAME,
call_tool,
)
@pytest.mark.order(14)
async def test_create_host(mcp_session):
"""Create an ESXi host resource using network-adapter lookup."""
result = await call_tool(mcp_session, "mtv_write", {
"command": "create host",
"flags": {
"host-id": ESXI_HOST_NAME,
"provider": VSPHERE_PROVIDER_NAME,
"username": ESXI_USERNAME,
"password": ESXI_PASSWORD,
"network-adapter": "Management Network",
"host-insecure-skip-tls": True,
"namespace": TEST_NAMESPACE,
},
})
assert result.get("return_value") == 0, f"Unexpected result: {result}"
print(f"\n Created ESXi host '{ESXI_HOST_NAME}'") | yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/hosts/test_read.py | Python | """Hosts · read -- list and describe ESXi hosts."""
import pytest
from conftest import (
ESXI_HOST_NAME,
TEST_NAMESPACE,
VSPHERE_PROVIDER_NAME,
call_tool,
)
# Module-level store so test_describe_host can reuse the discovered K8s name.
_discovered_resource_name: str | None = None
def _find_host_resource_name(hosts: list[dict], inventory_id: str) -> str | None:
"""Return the K8s resource name whose name starts with *inventory_id*.
The ``create host`` command generates a resource name like
``{inventoryID}-{hash}``, so we match on prefix.
"""
for h in hosts:
name = h.get("name") or h.get("metadata", {}).get("name", "")
if name.startswith(inventory_id):
return name
return None
@pytest.mark.order(30)
async def test_get_host(mcp_session):
"""List hosts and verify our ESXi host resource exists."""
global _discovered_resource_name # noqa: PLW0603
result = await call_tool(mcp_session, "mtv_read", {
"command": "get host",
"flags": {"namespace": TEST_NAMESPACE, "output": "json"},
})
data = result.get("data", [])
hosts = data if isinstance(data, list) else [data]
resource_name = _find_host_resource_name(hosts, ESXI_HOST_NAME)
assert resource_name is not None, (
f"No host resource starting with '{ESXI_HOST_NAME}' found; "
f"got {[h.get('name') or h.get('metadata', {}).get('name') for h in hosts]}"
)
_discovered_resource_name = resource_name
print(f"\n ✓ Host resource '{resource_name}' found (inventory ID: {ESXI_HOST_NAME})")
@pytest.mark.order(31)
async def test_describe_host(mcp_session):
"""Describe the ESXi host and check key fields."""
name = _discovered_resource_name
assert name, "test_get_host must run first to discover the resource name"
result = await call_tool(mcp_session, "mtv_read", {
"command": "describe host",
"flags": {"name": name, "namespace": TEST_NAMESPACE},
})
output = result.get("output", "")
assert name in output, f"Resource name '{name}' missing from describe output"
assert VSPHERE_PROVIDER_NAME in output, "Provider reference missing"
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/inventory/test_read.py | Python | """Inventory · read -- query vSphere inventory for VMs, datastores, networks, hosts."""
import pytest
from conftest import (
COLD_VMS,
ESXI_HOST_NAME,
TEST_DATASTORE_NAME,
TEST_NAMESPACE,
TEST_NETWORK_NAME,
VSPHERE_PROVIDER_NAME,
WARM_VMS,
call_tool,
)
# All test VMs that must exist in the inventory
_TEST_VM_NAMES = set((COLD_VMS + "," + WARM_VMS).split(","))
# ===================================================================
# VMs
# ===================================================================
@pytest.mark.order(60)
async def test_inventory_vms(mcp_session):
"""List VMs and verify that every test VM exists in the inventory."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "get inventory vm",
"flags": {
"provider": VSPHERE_PROVIDER_NAME,
"namespace": TEST_NAMESPACE,
"output": "json",
},
})
vms = result.get("data", [])
assert isinstance(vms, list)
assert len(vms) >= len(_TEST_VM_NAMES), (
f"Expected at least {len(_TEST_VM_NAMES)} VMs, got {len(vms)}"
)
vm_names = {v.get("name") for v in vms}
for vm in _TEST_VM_NAMES:
assert vm in vm_names, f"Test VM '{vm}' not found in inventory"
print(f"\n ✓ All {len(_TEST_VM_NAMES)} test VMs found (total VMs: {len(vms)})")
@pytest.mark.order(61)
async def test_inventory_vms_query(mcp_session):
"""Filter VMs using a TSL query."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "get inventory vm",
"flags": {
"provider": VSPHERE_PROVIDER_NAME,
"namespace": TEST_NAMESPACE,
"output": "json",
"query": "where name ~= 'mtv-rhel8-.*'",
},
})
vms = result.get("data", [])
assert isinstance(vms, list)
assert len(vms) >= 1, "TSL query should return at least one VM"
for vm in vms:
name = vm.get("name", "")
assert name.startswith("mtv-rhel8-"), (
f"VM '{name}' does not match query pattern 'mtv-rhel8-.*'"
)
# ===================================================================
# Datastores
# ===================================================================
@pytest.mark.order(62)
async def test_inventory_datastores(mcp_session):
"""List datastores and verify the test datastore exists."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "get inventory datastore",
"flags": {
"provider": VSPHERE_PROVIDER_NAME,
"namespace": TEST_NAMESPACE,
"output": "json",
},
})
datastores = result.get("data", [])
assert isinstance(datastores, list)
assert len(datastores) >= 1, "Expected at least 1 datastore"
ds_names = {d.get("name") for d in datastores}
assert TEST_DATASTORE_NAME in ds_names, (
f"Test datastore '{TEST_DATASTORE_NAME}' not found; got {ds_names}"
)
print(f"\n ✓ Test datastore '{TEST_DATASTORE_NAME}' found (total: {len(datastores)})")
# ===================================================================
# Networks
# ===================================================================
@pytest.mark.order(63)
async def test_inventory_networks(mcp_session):
"""List networks and verify the test network exists."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "get inventory network",
"flags": {
"provider": VSPHERE_PROVIDER_NAME,
"namespace": TEST_NAMESPACE,
"output": "json",
},
})
networks = result.get("data", [])
assert isinstance(networks, list)
assert len(networks) >= 1, "Expected at least 1 network"
net_names = {n.get("name") for n in networks}
assert TEST_NETWORK_NAME in net_names, (
f"Test network '{TEST_NETWORK_NAME}' not found; got {net_names}"
)
print(f"\n ✓ Test network '{TEST_NETWORK_NAME}' found (total: {len(networks)})")
# ===================================================================
# Hosts
# ===================================================================
@pytest.mark.order(64)
async def test_inventory_hosts(mcp_session):
"""List inventory hosts and verify the ESXi host used for testing."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "get inventory host",
"flags": {
"provider": VSPHERE_PROVIDER_NAME,
"namespace": TEST_NAMESPACE,
"output": "json",
},
})
hosts = result.get("data", [])
assert isinstance(hosts, list)
assert len(hosts) >= 1, "Expected at least 1 inventory host"
# ESXI_HOST_NAME may be an inventory ID (e.g. "host-8") or a display
# name (e.g. "10.6.46.29"). Check both fields.
host_ids = {h.get("id") for h in hosts}
host_names = {h.get("name") for h in hosts}
assert ESXI_HOST_NAME in host_ids | host_names, (
f"ESXi host '{ESXI_HOST_NAME}' not found in inventory; "
f"ids={host_ids}, names={host_names}"
)
print(f"\n ✓ ESXi host '{ESXI_HOST_NAME}' found (total: {len(hosts)})")
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/mappings/test_read.py | Python | """Mappings · read -- verify auto-generated network and storage mappings."""
import pytest
from conftest import TEST_NAMESPACE, call_tool
@pytest.mark.order(50)
async def test_get_network_mappings(mcp_session):
"""Verify auto-generated network mappings exist."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "get mapping network",
"flags": {"namespace": TEST_NAMESPACE, "output": "json"},
})
data = result.get("data", [])
mappings = data if isinstance(data, list) else [data]
assert len(mappings) >= 1, "Expected at least 1 network mapping"
@pytest.mark.order(51)
async def test_get_storage_mappings(mcp_session):
"""Verify auto-generated storage mappings exist."""
result = await call_tool(mcp_session, "mtv_read", {
"command": "get mapping storage",
"flags": {"namespace": TEST_NAMESPACE, "output": "json"},
})
data = result.get("data", [])
mappings = data if isinstance(data, list) else [data]
assert len(mappings) >= 1, "Expected at least 1 storage mapping"
| yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
e2e/mcp/plans/test_create.py | Python | """Plans · write -- create cold and warm migration plans."""
import pytest
from conftest import (
COLD_PLAN_NAME,
COLD_VMS,
NETWORK_PAIRS,
OCP_PROVIDER_NAME,
STORAGE_PAIRS,
TEST_NAMESPACE,
VSPHERE_PROVIDER_NAME,
WARM_PLAN_NAME,
WARM_VMS,
call_tool,
)
@pytest.mark.order(15)
async def test_create_cold_plan(mcp_session):
"""Create a cold migration plan with two VMs."""
result = await call_tool(mcp_session, "mtv_write", {
"command": "create plan",
"flags": {
"name": COLD_PLAN_NAME,
"source": VSPHERE_PROVIDER_NAME,
"target": OCP_PROVIDER_NAME,
"vms": COLD_VMS,
"network-pairs": NETWORK_PAIRS,
"storage-pairs": STORAGE_PAIRS,
"namespace": TEST_NAMESPACE,
},
})
assert result.get("return_value") == 0, f"Unexpected result: {result}"
print(f"\n ✓ Created cold plan '{COLD_PLAN_NAME}' with VMs: {COLD_VMS}")
@pytest.mark.order(16)
async def test_create_warm_plan(mcp_session):
"""Create a warm migration plan with two VMs."""
result = await call_tool(mcp_session, "mtv_write", {
"command": "create plan",
"flags": {
"name": WARM_PLAN_NAME,
"source": VSPHERE_PROVIDER_NAME,
"target": OCP_PROVIDER_NAME,
"vms": WARM_VMS,
"migration-type": "warm",
"network-pairs": NETWORK_PAIRS,
"storage-pairs": STORAGE_PAIRS,
"namespace": TEST_NAMESPACE,
},
})
assert result.get("return_value") == 0, f"Unexpected result: {result}"
print(f"\n ✓ Created warm plan '{WARM_PLAN_NAME}' with VMs: {WARM_VMS}") | yaacov/kubectl-mtv | 11 | A kubectl plugin that helps users of Forklift migrate virtualization workloads from oVirt, VMware, OpenStack, and OVA files to KubeVirt on Kubernetes. | Go | yaacov | Yaacov Zamir | Red Hat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.