DevOps-slm / app.py
lakhera2023's picture
Create app.py
0501ab1 verified
#!/usr/bin/env python3
"""
Deploy DevOps SLM to Hugging Face Space
Creates an interactive Gradio app for the DevOps-SLM model
"""
import os
import json
import tempfile
import shutil
from pathlib import Path
from huggingface_hub import HfApi, login, create_repo, upload_folder
from huggingface_hub.utils import RepositoryNotFoundError
import argparse
class DevOpsSLMSpaceDeployer:
def __init__(self, hf_token: str = None, username: str = None, model_name: str = "devops-slm"):
"""Initialize the DevOps SLM Space Deployer."""
self.hf_token = hf_token
self.username = username
self.model_name = model_name
self.api = HfApi()
self.temp_dir = None
# Space configuration
self.space_name = f"{model_name}-chat"
self.space_title = "DevOps SLM - Specialized AI Assistant"
self.space_description = "Interactive DevOps and Kubernetes AI Assistant powered by specialized language model"
def setup_authentication(self):
"""Setup Hugging Face authentication."""
print("πŸ” Setting up Hugging Face authentication...")
if self.hf_token:
login(token=self.hf_token)
print("βœ… Authentication successful with provided token")
else:
try:
login()
print("βœ… Using existing Hugging Face token")
except Exception as e:
print("❌ Authentication failed. Please provide a Hugging Face token.")
raise e
# Get current user info
try:
user_info = self.api.whoami()
self.username = user_info['name']
print(f"βœ… Authenticated as: {self.username}")
except Exception as e:
print(f"❌ Could not get user info: {e}")
raise e
def create_gradio_app(self):
"""Create the Gradio app for the DevOps SLM."""
print("🎨 Creating Gradio app...")
app_code = '''import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import re
class DevOpsSLM:
def __init__(self):
"""Initialize the DevOps SLM."""
self.device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"πŸš€ Loading DevOps SLM on {self.device}...")
# Load the model
self.model = AutoModelForCausalLM.from_pretrained(
"''' + f"{self.username}/{self.model_name}" + '''",
torch_dtype=torch.float16,
device_map="auto"
)
self.tokenizer = AutoTokenizer.from_pretrained("''' + f"{self.username}/{self.model_name}" + '''")
print("βœ… DevOps SLM loaded successfully!")
def generate_response(self, message, history, system_message, max_tokens, temperature):
"""Generate a response from the DevOps SLM."""
if not message.strip():
return history, ""
# Prepare messages
messages = [
{"role": "system", "content": system_message},
{"role": "user", "content": message}
]
# Apply chat template
text = self.tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize
inputs = self.tokenizer([text], return_tensors="pt").to(self.device)
# Generate response
with torch.no_grad():
outputs = self.model.generate(
**inputs,
max_new_tokens=max_tokens,
temperature=temperature,
do_sample=True,
pad_token_id=self.tokenizer.eos_token_id,
eos_token_id=self.tokenizer.eos_token_id,
repetition_penalty=1.1
)
# Decode response
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
response = response[len(text):].strip()
# Add to history
history.append([message, response])
return history, ""
def create_kubernetes_deployment(self, app_name, image, replicas, namespace):
"""Generate Kubernetes deployment YAML."""
prompt = f"Create a Kubernetes deployment YAML for {app_name} using image {image} with {replicas} replicas in namespace {namespace}"
return self.generate_response(prompt, [], "You are a specialized DevOps assistant.", 300, 0.7)
def create_dockerfile(self, app_type, base_image, requirements):
"""Generate Dockerfile."""
prompt = f"Create a Dockerfile for a {app_type} application using base image {base_image}"
if requirements:
prompt += f" with these requirements: {requirements}"
return self.generate_response(prompt, [], "You are a specialized DevOps assistant.", 250, 0.7)
def design_cicd_pipeline(self, project_type, deployment_target, tools):
"""Design CI/CD pipeline."""
prompt = f"Design a CI/CD pipeline for a {project_type} project to deploy to {deployment_target}"
if tools:
prompt += f" using {tools}"
return self.generate_response(prompt, [], "You are a specialized DevOps assistant.", 400, 0.7)
# Initialize the model
devops_slm = DevOpsSLM()
# Create Gradio interface
def create_interface():
with gr.Blocks(
title="DevOps SLM - AI Assistant",
theme=gr.themes.Soft(),
css="""
.gradio-container {
max-width: 1200px !important;
}
.chat-message {
font-family: 'Courier New', monospace;
}
"""
) as interface:
gr.Markdown("""
# πŸš€ DevOps SLM - Specialized AI Assistant
Welcome to the DevOps Specialized Language Model! This AI assistant is trained specifically for:
- **Kubernetes** operations and troubleshooting
- **Docker** containerization and best practices
- **CI/CD** pipeline design and implementation
- **Infrastructure** automation and management
- **DevOps** best practices and guidance
Ask me anything about DevOps, and I'll provide expert guidance!
""")
with gr.Tabs():
# Chat Tab
with gr.Tab("πŸ’¬ Chat"):
chatbot = gr.Chatbot(
label="DevOps Assistant",
height=500,
show_label=True,
container=True,
bubble_full_width=False
)
with gr.Row():
msg = gr.Textbox(
label="Your Message",
placeholder="Ask me about Kubernetes, Docker, CI/CD, or any DevOps topic...",
lines=2,
scale=4
)
send_btn = gr.Button("Send", variant="primary", scale=1)
with gr.Row():
clear_btn = gr.Button("Clear Chat", variant="secondary")
with gr.Accordion("βš™οΈ Advanced Settings", open=False):
system_msg = gr.Textbox(
label="System Message",
value="You are a specialized DevOps and Kubernetes assistant. You help with DevOps tasks, Kubernetes operations, Docker containerization, CI/CD pipelines, and infrastructure management only.",
lines=2
)
max_tokens = gr.Slider(
minimum=50,
maximum=500,
value=200,
step=10,
label="Max Tokens"
)
temperature = gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.7,
step=0.1,
label="Temperature"
)
# Kubernetes Tab
with gr.Tab("☸️ Kubernetes"):
gr.Markdown("### Generate Kubernetes Manifests")
with gr.Row():
with gr.Column():
k8s_app_name = gr.Textbox(label="Application Name", value="nginx")
k8s_image = gr.Textbox(label="Docker Image", value="nginx:latest")
k8s_replicas = gr.Number(label="Replicas", value=3, minimum=1, maximum=10)
k8s_namespace = gr.Textbox(label="Namespace", value="default")
k8s_generate_btn = gr.Button("Generate Deployment", variant="primary")
with gr.Column():
k8s_output = gr.Code(
label="Generated YAML",
language="yaml",
lines=20
)
# Docker Tab
with gr.Tab("🐳 Docker"):
gr.Markdown("### Generate Dockerfile")
with gr.Row():
with gr.Column():
docker_app_type = gr.Dropdown(
choices=["Node.js", "Python", "Java", "Go", "React", "Vue.js", "Angular"],
label="Application Type",
value="Node.js"
)
docker_base_image = gr.Textbox(label="Base Image", value="node:18-alpine")
docker_requirements = gr.Textbox(
label="Requirements/Dependencies",
placeholder="package.json, requirements.txt, etc.",
lines=3
)
docker_generate_btn = gr.Button("Generate Dockerfile", variant="primary")
with gr.Column():
docker_output = gr.Code(
label="Generated Dockerfile",
language="dockerfile",
lines=20
)
# CI/CD Tab
with gr.Tab("πŸ”„ CI/CD"):
gr.Markdown("### Design CI/CD Pipeline")
with gr.Row():
with gr.Column():
cicd_project_type = gr.Dropdown(
choices=["Microservices", "Monolith", "Frontend", "Backend", "Full-stack"],
label="Project Type",
value="Microservices"
)
cicd_deployment_target = gr.Dropdown(
choices=["Kubernetes", "Docker Swarm", "AWS ECS", "Azure Container Instances", "Google Cloud Run"],
label="Deployment Target",
value="Kubernetes"
)
cicd_tools = gr.Textbox(
label="CI/CD Tools",
placeholder="GitHub Actions, Jenkins, GitLab CI, etc.",
value="GitHub Actions"
)
cicd_generate_btn = gr.Button("Design Pipeline", variant="primary")
with gr.Column():
cicd_output = gr.Code(
label="Pipeline Configuration",
language="yaml",
lines=25
)
# Event handlers
def respond(message, history, system_msg, max_tokens, temperature):
if not message.strip():
return history, ""
history, _ = devops_slm.generate_response(message, history, system_msg, max_tokens, temperature)
return history, ""
def clear_chat():
return []
def generate_k8s_deployment(app_name, image, replicas, namespace):
_, response = devops_slm.create_kubernetes_deployment(app_name, image, replicas, namespace)
return response[0][1] if response else "Failed to generate deployment"
def generate_dockerfile(app_type, base_image, requirements):
_, response = devops_slm.create_dockerfile(app_type, base_image, requirements)
return response[0][1] if response else "Failed to generate Dockerfile"
def generate_cicd_pipeline(project_type, deployment_target, tools):
_, response = devops_slm.design_cicd_pipeline(project_type, deployment_target, tools)
return response[0][1] if response else "Failed to generate pipeline"
# Connect events
msg.submit(respond, [msg, chatbot, system_msg, max_tokens, temperature], [chatbot, msg])
send_btn.click(respond, [msg, chatbot, system_msg, max_tokens, temperature], [chatbot, msg])
clear_btn.click(clear_chat, outputs=chatbot)
k8s_generate_btn.click(
generate_k8s_deployment,
[k8s_app_name, k8s_image, k8s_replicas, k8s_namespace],
k8s_output
)
docker_generate_btn.click(
generate_dockerfile,
[docker_app_type, docker_base_image, docker_requirements],
docker_output
)
cicd_generate_btn.click(
generate_cicd_pipeline,
[cicd_project_type, cicd_deployment_target, cicd_tools],
cicd_output
)
return interface
# Launch the interface
if __name__ == "__main__":
interface = create_interface()
interface.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
)
'''
return app_code
def create_requirements_file(self):
"""Create requirements.txt for the space."""
requirements = '''gradio>=4.0.0
torch>=2.0.0
transformers>=4.37.0
accelerate>=0.20.0
safetensors>=0.3.0
'''
return requirements
def create_readme(self):
"""Create README.md for the space."""
readme_content = f'''---
title: DevOps SLM - AI Assistant
emoji: πŸš€
colorFrom: blue
colorTo: purple
sdk: gradio
sdk_version: 4.0.0
app_file: app.py
pinned: false
license: apache-2.0
short_description: Interactive DevOps and Kubernetes AI Assistant
---
# DevOps SLM - AI Assistant
An interactive AI assistant specialized in DevOps, Kubernetes, Docker, and CI/CD operations.
## Features
- **πŸ’¬ Chat Interface**: Ask questions about DevOps topics
- **☸️ Kubernetes Generator**: Create deployment YAMLs
- **🐳 Docker Generator**: Generate Dockerfiles
- **πŸ”„ CI/CD Designer**: Design pipeline configurations
## Model
This space uses the [DevOps-SLM model](https://huggingface.co/{self.username}/{self.model_name}) - a specialized language model trained for DevOps tasks.
## Usage
1. **Chat Tab**: Ask any DevOps-related questions
2. **Kubernetes Tab**: Generate deployment manifests
3. **Docker Tab**: Create Dockerfiles for different applications
4. **CI/CD Tab**: Design CI/CD pipeline configurations
## Examples
- "How do I create a Kubernetes deployment?"
- "Generate a Dockerfile for a Node.js application"
- "Design a CI/CD pipeline for microservices"
- "Troubleshoot a failing pod in Kubernetes"
## Model Information
- **Parameters**: 494M
- **Specialization**: DevOps, Kubernetes, Docker, CI/CD
- **Base Model**: Custom transformer architecture
- **License**: Apache 2.0
## Support
For questions or issues, please open an issue in the [model repository](https://huggingface.co/{self.username}/{self.model_name}).
'''
return readme_content
def create_space_config(self):
"""Create space configuration files."""
print("πŸ“ Creating space configuration...")
# Create temporary directory
self.temp_dir = tempfile.mkdtemp(prefix="devops_slm_space_")
space_path = os.path.join(self.temp_dir, "space")
os.makedirs(space_path, exist_ok=True)
# Create app.py
app_code = self.create_gradio_app()
with open(os.path.join(space_path, "app.py"), 'w') as f:
f.write(app_code)
# Create requirements.txt
requirements = self.create_requirements_file()
with open(os.path.join(space_path, "requirements.txt"), 'w') as f:
f.write(requirements)
# Create README.md
readme = self.create_readme()
with open(os.path.join(space_path, "README.md"), 'w') as f:
f.write(readme)
# Create .gitattributes
gitattributes = '''*.py linguist-language=Python
*.md linguist-language=Markdown
*.txt linguist-language=Text
'''
with open(os.path.join(space_path, ".gitattributes"), 'w') as f:
f.write(gitattributes)
print(f"βœ… Space configuration created in {space_path}")
return space_path
def create_space_repository(self):
"""Create the Hugging Face Space repository."""
space_id = f"{self.username}/{self.space_name}"
print(f"πŸ“ Creating space repository: {space_id}")
try:
# Check if space exists
try:
self.api.repo_info(space_id, repo_type="space")
print(f"βœ… Space {space_id} already exists")
return space_id
except RepositoryNotFoundError:
pass
# Create new space
create_repo(
repo_id=space_id,
token=self.hf_token,
private=False,
repo_type="space",
space_sdk="gradio"
)
print(f"βœ… Space {space_id} created successfully")
return space_id
except Exception as e:
print(f"❌ Failed to create space: {e}")
raise e
def upload_space(self, space_path: str):
"""Upload the space to Hugging Face."""
space_id = f"{self.username}/{self.space_name}"
print(f"πŸ“€ Uploading space to {space_id}...")
try:
# Upload the entire folder
upload_folder(
folder_path=space_path,
repo_id=space_id,
token=self.hf_token,
repo_type="space",
commit_message="Initial deployment of DevOps SLM Space"
)
print(f"βœ… Space uploaded successfully to https://huggingface.co/spaces/{space_id}")
return space_id
except Exception as e:
print(f"❌ Space upload failed: {e}")
raise e
def cleanup(self):
"""Clean up temporary files."""
if self.temp_dir and os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
print("🧹 Cleaned up temporary files")
def run(self):
"""Run the complete space deployment process."""
try:
print("πŸš€ Starting DevOps SLM Space Deployment")
print("=" * 60)
# Step 1: Setup authentication
self.setup_authentication()
# Step 2: Create space configuration
space_path = self.create_space_config()
# Step 3: Create space repository
space_id = self.create_space_repository()
# Step 4: Upload space
uploaded_space = self.upload_space(space_path)
print("\n" + "=" * 60)
print("πŸŽ‰ DevOps SLM Space Deployment Complete!")
print("=" * 60)
print(f"βœ… Space: {uploaded_space}")
print(f"βœ… Model: {self.username}/{self.model_name}")
print(f"βœ… URL: https://huggingface.co/spaces/{uploaded_space}")
print("=" * 60)
return uploaded_space
except Exception as e:
print(f"❌ Space deployment failed: {e}")
raise e
finally:
self.cleanup()
def main():
"""Main function with command line interface."""
parser = argparse.ArgumentParser(description="Deploy DevOps SLM to Hugging Face Space")
parser.add_argument("--token", type=str, help="Hugging Face token")
parser.add_argument("--username", type=str, help="Hugging Face username")
parser.add_argument("--model-name", type=str, default="devops-slm", help="Model name")
parser.add_argument("--space-name", type=str, help="Custom space name")
args = parser.parse_args()
# Get token from environment if not provided
if not args.token:
args.token = os.getenv("HUGGINGFACE_TOKEN")
if not args.token:
print("❌ Hugging Face token is required!")
print("Set HUGGINGFACE_TOKEN environment variable or use --token argument")
return 1
# Create and run the deployer
deployer = DevOpsSLMSpaceDeployer(
hf_token=args.token,
username=args.username,
model_name=args.model_name
)
if args.space_name:
deployer.space_name = args.space_name
try:
space_id = deployer.run()
print(f"\n🎯 Your DevOps SLM Space is ready at: https://huggingface.co/spaces/{space_id}")
print(f"πŸ”— Model: https://huggingface.co/{deployer.username}/{deployer.model_name}")
except Exception as e:
print(f"\n❌ Failed to deploy DevOps SLM Space: {e}")
return 1
return 0
if __name__ == "__main__":
exit(main())