lakhera2023 commited on
Commit
6046cf4
·
verified ·
1 Parent(s): 257681f

Initial deployment of DevOps SLM Space

Browse files
Files changed (4) hide show
  1. .gitattributes +3 -35
  2. README.md +46 -6
  3. app.py +288 -0
  4. requirements.txt +5 -0
.gitattributes CHANGED
@@ -1,35 +1,3 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.py linguist-language=Python
2
+ *.md linguist-language=Markdown
3
+ *.txt linguist-language=Text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -1,12 +1,52 @@
1
  ---
2
- title: Devops Slm Chat
3
- emoji: 💻
4
- colorFrom: green
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 5.46.0
8
  app_file: app.py
9
  pinned: false
 
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: DevOps SLM - AI Assistant
3
+ emoji: 🚀
4
+ colorFrom: blue
5
+ colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 4.0.0
8
  app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
11
+ short_description: Interactive DevOps and Kubernetes AI Assistant
12
  ---
13
 
14
+ # DevOps SLM - AI Assistant
15
+
16
+ An interactive AI assistant specialized in DevOps, Kubernetes, Docker, and CI/CD operations.
17
+
18
+ ## Features
19
+
20
+ - **💬 Chat Interface**: Ask questions about DevOps topics
21
+ - **☸️ Kubernetes Generator**: Create deployment YAMLs
22
+ - **🐳 Docker Generator**: Generate Dockerfiles
23
+ - **🔄 CI/CD Designer**: Design pipeline configurations
24
+
25
+ ## Model
26
+
27
+ This space uses the [DevOps-SLM model](https://huggingface.co/lakhera2023/devops-slm) - a specialized language model trained for DevOps tasks.
28
+
29
+ ## Usage
30
+
31
+ 1. **Chat Tab**: Ask any DevOps-related questions
32
+ 2. **Kubernetes Tab**: Generate deployment manifests
33
+ 3. **Docker Tab**: Create Dockerfiles for different applications
34
+ 4. **CI/CD Tab**: Design CI/CD pipeline configurations
35
+
36
+ ## Examples
37
+
38
+ - "How do I create a Kubernetes deployment?"
39
+ - "Generate a Dockerfile for a Node.js application"
40
+ - "Design a CI/CD pipeline for microservices"
41
+ - "Troubleshoot a failing pod in Kubernetes"
42
+
43
+ ## Model Information
44
+
45
+ - **Parameters**: 494M
46
+ - **Specialization**: DevOps, Kubernetes, Docker, CI/CD
47
+ - **Base Model**: Custom transformer architecture
48
+ - **License**: Apache 2.0
49
+
50
+ ## Support
51
+
52
+ For questions or issues, please open an issue in the [model repository](https://huggingface.co/lakhera2023/devops-slm).
app.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ import re
5
+
6
+ class DevOpsSLM:
7
+ def __init__(self):
8
+ """Initialize the DevOps SLM."""
9
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
10
+ print(f"🚀 Loading DevOps SLM on {self.device}...")
11
+
12
+ # Load the model
13
+ self.model = AutoModelForCausalLM.from_pretrained(
14
+ "lakhera2023/devops-slm",
15
+ torch_dtype=torch.float16,
16
+ device_map="auto"
17
+ )
18
+ self.tokenizer = AutoTokenizer.from_pretrained("lakhera2023/devops-slm")
19
+
20
+ print("✅ DevOps SLM loaded successfully!")
21
+
22
+ def generate_response(self, message, history, system_message, max_tokens, temperature):
23
+ """Generate a response from the DevOps SLM."""
24
+ if not message.strip():
25
+ return history, ""
26
+
27
+ # Prepare messages
28
+ messages = [
29
+ {"role": "system", "content": system_message},
30
+ {"role": "user", "content": message}
31
+ ]
32
+
33
+ # Apply chat template
34
+ text = self.tokenizer.apply_chat_template(
35
+ messages,
36
+ tokenize=False,
37
+ add_generation_prompt=True
38
+ )
39
+
40
+ # Tokenize
41
+ inputs = self.tokenizer([text], return_tensors="pt").to(self.device)
42
+
43
+ # Generate response
44
+ with torch.no_grad():
45
+ outputs = self.model.generate(
46
+ **inputs,
47
+ max_new_tokens=max_tokens,
48
+ temperature=temperature,
49
+ do_sample=True,
50
+ pad_token_id=self.tokenizer.eos_token_id,
51
+ eos_token_id=self.tokenizer.eos_token_id,
52
+ repetition_penalty=1.1
53
+ )
54
+
55
+ # Decode response
56
+ response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
57
+ response = response[len(text):].strip()
58
+
59
+ # Add to history
60
+ history.append([message, response])
61
+
62
+ return history, ""
63
+
64
+ def create_kubernetes_deployment(self, app_name, image, replicas, namespace):
65
+ """Generate Kubernetes deployment YAML."""
66
+ prompt = f"Create a Kubernetes deployment YAML for {app_name} using image {image} with {replicas} replicas in namespace {namespace}"
67
+ return self.generate_response(prompt, [], "You are a specialized DevOps assistant.", 300, 0.7)
68
+
69
+ def create_dockerfile(self, app_type, base_image, requirements):
70
+ """Generate Dockerfile."""
71
+ prompt = f"Create a Dockerfile for a {app_type} application using base image {base_image}"
72
+ if requirements:
73
+ prompt += f" with these requirements: {requirements}"
74
+ return self.generate_response(prompt, [], "You are a specialized DevOps assistant.", 250, 0.7)
75
+
76
+ def design_cicd_pipeline(self, project_type, deployment_target, tools):
77
+ """Design CI/CD pipeline."""
78
+ prompt = f"Design a CI/CD pipeline for a {project_type} project to deploy to {deployment_target}"
79
+ if tools:
80
+ prompt += f" using {tools}"
81
+ return self.generate_response(prompt, [], "You are a specialized DevOps assistant.", 400, 0.7)
82
+
83
+ # Initialize the model
84
+ devops_slm = DevOpsSLM()
85
+
86
+ # Create Gradio interface
87
+ def create_interface():
88
+ with gr.Blocks(
89
+ title="DevOps SLM - AI Assistant",
90
+ theme=gr.themes.Soft(),
91
+ css="""
92
+ .gradio-container {
93
+ max-width: 1200px !important;
94
+ }
95
+ .chat-message {
96
+ font-family: 'Courier New', monospace;
97
+ }
98
+ """
99
+ ) as interface:
100
+
101
+ gr.Markdown("""
102
+ # 🚀 DevOps SLM - Specialized AI Assistant
103
+
104
+ Welcome to the DevOps Specialized Language Model! This AI assistant is trained specifically for:
105
+ - **Kubernetes** operations and troubleshooting
106
+ - **Docker** containerization and best practices
107
+ - **CI/CD** pipeline design and implementation
108
+ - **Infrastructure** automation and management
109
+ - **DevOps** best practices and guidance
110
+
111
+ Ask me anything about DevOps, and I'll provide expert guidance!
112
+ """)
113
+
114
+ with gr.Tabs():
115
+ # Chat Tab
116
+ with gr.Tab("💬 Chat"):
117
+ chatbot = gr.Chatbot(
118
+ label="DevOps Assistant",
119
+ height=500,
120
+ show_label=True,
121
+ container=True,
122
+ bubble_full_width=False
123
+ )
124
+
125
+ with gr.Row():
126
+ msg = gr.Textbox(
127
+ label="Your Message",
128
+ placeholder="Ask me about Kubernetes, Docker, CI/CD, or any DevOps topic...",
129
+ lines=2,
130
+ scale=4
131
+ )
132
+ send_btn = gr.Button("Send", variant="primary", scale=1)
133
+
134
+ with gr.Row():
135
+ clear_btn = gr.Button("Clear Chat", variant="secondary")
136
+
137
+ with gr.Accordion("⚙️ Advanced Settings", open=False):
138
+ system_msg = gr.Textbox(
139
+ label="System Message",
140
+ value="You are a specialized DevOps and Kubernetes assistant. You help with DevOps tasks, Kubernetes operations, Docker containerization, CI/CD pipelines, and infrastructure management only.",
141
+ lines=2
142
+ )
143
+ max_tokens = gr.Slider(
144
+ minimum=50,
145
+ maximum=500,
146
+ value=200,
147
+ step=10,
148
+ label="Max Tokens"
149
+ )
150
+ temperature = gr.Slider(
151
+ minimum=0.1,
152
+ maximum=1.0,
153
+ value=0.7,
154
+ step=0.1,
155
+ label="Temperature"
156
+ )
157
+
158
+ # Kubernetes Tab
159
+ with gr.Tab("☸️ Kubernetes"):
160
+ gr.Markdown("### Generate Kubernetes Manifests")
161
+
162
+ with gr.Row():
163
+ with gr.Column():
164
+ k8s_app_name = gr.Textbox(label="Application Name", value="nginx")
165
+ k8s_image = gr.Textbox(label="Docker Image", value="nginx:latest")
166
+ k8s_replicas = gr.Number(label="Replicas", value=3, minimum=1, maximum=10)
167
+ k8s_namespace = gr.Textbox(label="Namespace", value="default")
168
+ k8s_generate_btn = gr.Button("Generate Deployment", variant="primary")
169
+
170
+ with gr.Column():
171
+ k8s_output = gr.Code(
172
+ label="Generated YAML",
173
+ language="yaml",
174
+ lines=20
175
+ )
176
+
177
+ # Docker Tab
178
+ with gr.Tab("🐳 Docker"):
179
+ gr.Markdown("### Generate Dockerfile")
180
+
181
+ with gr.Row():
182
+ with gr.Column():
183
+ docker_app_type = gr.Dropdown(
184
+ choices=["Node.js", "Python", "Java", "Go", "React", "Vue.js", "Angular"],
185
+ label="Application Type",
186
+ value="Node.js"
187
+ )
188
+ docker_base_image = gr.Textbox(label="Base Image", value="node:18-alpine")
189
+ docker_requirements = gr.Textbox(
190
+ label="Requirements/Dependencies",
191
+ placeholder="package.json, requirements.txt, etc.",
192
+ lines=3
193
+ )
194
+ docker_generate_btn = gr.Button("Generate Dockerfile", variant="primary")
195
+
196
+ with gr.Column():
197
+ docker_output = gr.Code(
198
+ label="Generated Dockerfile",
199
+ language="dockerfile",
200
+ lines=20
201
+ )
202
+
203
+ # CI/CD Tab
204
+ with gr.Tab("🔄 CI/CD"):
205
+ gr.Markdown("### Design CI/CD Pipeline")
206
+
207
+ with gr.Row():
208
+ with gr.Column():
209
+ cicd_project_type = gr.Dropdown(
210
+ choices=["Microservices", "Monolith", "Frontend", "Backend", "Full-stack"],
211
+ label="Project Type",
212
+ value="Microservices"
213
+ )
214
+ cicd_deployment_target = gr.Dropdown(
215
+ choices=["Kubernetes", "Docker Swarm", "AWS ECS", "Azure Container Instances", "Google Cloud Run"],
216
+ label="Deployment Target",
217
+ value="Kubernetes"
218
+ )
219
+ cicd_tools = gr.Textbox(
220
+ label="CI/CD Tools",
221
+ placeholder="GitHub Actions, Jenkins, GitLab CI, etc.",
222
+ value="GitHub Actions"
223
+ )
224
+ cicd_generate_btn = gr.Button("Design Pipeline", variant="primary")
225
+
226
+ with gr.Column():
227
+ cicd_output = gr.Code(
228
+ label="Pipeline Configuration",
229
+ language="yaml",
230
+ lines=25
231
+ )
232
+
233
+ # Event handlers
234
+ def respond(message, history, system_msg, max_tokens, temperature):
235
+ if not message.strip():
236
+ return history, ""
237
+
238
+ history, _ = devops_slm.generate_response(message, history, system_msg, max_tokens, temperature)
239
+ return history, ""
240
+
241
+ def clear_chat():
242
+ return []
243
+
244
+ def generate_k8s_deployment(app_name, image, replicas, namespace):
245
+ _, response = devops_slm.create_kubernetes_deployment(app_name, image, replicas, namespace)
246
+ return response[0][1] if response else "Failed to generate deployment"
247
+
248
+ def generate_dockerfile(app_type, base_image, requirements):
249
+ _, response = devops_slm.create_dockerfile(app_type, base_image, requirements)
250
+ return response[0][1] if response else "Failed to generate Dockerfile"
251
+
252
+ def generate_cicd_pipeline(project_type, deployment_target, tools):
253
+ _, response = devops_slm.design_cicd_pipeline(project_type, deployment_target, tools)
254
+ return response[0][1] if response else "Failed to generate pipeline"
255
+
256
+ # Connect events
257
+ msg.submit(respond, [msg, chatbot, system_msg, max_tokens, temperature], [chatbot, msg])
258
+ send_btn.click(respond, [msg, chatbot, system_msg, max_tokens, temperature], [chatbot, msg])
259
+ clear_btn.click(clear_chat, outputs=chatbot)
260
+
261
+ k8s_generate_btn.click(
262
+ generate_k8s_deployment,
263
+ [k8s_app_name, k8s_image, k8s_replicas, k8s_namespace],
264
+ k8s_output
265
+ )
266
+
267
+ docker_generate_btn.click(
268
+ generate_dockerfile,
269
+ [docker_app_type, docker_base_image, docker_requirements],
270
+ docker_output
271
+ )
272
+
273
+ cicd_generate_btn.click(
274
+ generate_cicd_pipeline,
275
+ [cicd_project_type, cicd_deployment_target, cicd_tools],
276
+ cicd_output
277
+ )
278
+
279
+ return interface
280
+
281
+ # Launch the interface
282
+ if __name__ == "__main__":
283
+ interface = create_interface()
284
+ interface.launch(
285
+ server_name="0.0.0.0",
286
+ server_port=7860,
287
+ share=False
288
+ )
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gradio>=4.0.0
2
+ torch>=2.0.0
3
+ transformers>=4.37.0
4
+ accelerate>=0.20.0
5
+ safetensors>=0.3.0