AIencoder commited on
Commit
446c488
·
verified ·
1 Parent(s): d14de0e

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +29 -8
  2. app.py +407 -0
  3. requirements.txt +2 -0
README.md CHANGED
@@ -1,12 +1,33 @@
1
  ---
2
- title: Axon ADJUSTABLE
3
- emoji: 📊
4
- colorFrom: red
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 6.4.0
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Axon-ADJUSTABLE
 
 
 
 
 
3
  app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 5.42.0
6
  ---
7
 
8
+ # 🔥 FREE GOD Coding Machine
9
+
10
+ A powerful AI coding assistant using **100% FREE** HuggingFace Inference API.
11
+
12
+ ## Features
13
+
14
+ - 💬 **Chat** - Have coding conversations
15
+ - ⚡ **Generate** - Describe → Get code
16
+ - 🔍 **Explain** - Understand any code
17
+ - 🔧 **Fix** - Debug and fix errors
18
+ - 📝 **Review** - Get code reviews
19
+
20
+ ## Models Available (All FREE!)
21
+
22
+ - **Qwen2.5-Coder-32B** - Best quality
23
+ - **Qwen2.5-Coder-7B** - Faster responses
24
+ - **CodeLlama-34B** - Great for code
25
+ - **Mistral-7B** - General purpose
26
+
27
+ ## Tips
28
+
29
+ - Add `HF_TOKEN` as a secret for higher rate limits
30
+ - Use lower temperature (0.2-0.3) for code generation
31
+ - Use higher temperature (0.7-0.8) for creative tasks
32
+
33
+ **Cost: $0** 🎉
app.py ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ 🔥 GOD Coding Machine - Gradio App
3
+ Deploy with: gradio deploy
4
+
5
+ Uses FREE HuggingFace Inference API to access powerful coding models.
6
+ """
7
+
8
+ import gradio as gr
9
+ from huggingface_hub import InferenceClient
10
+ import os
11
+
12
+ # Initialize the client - uses HF Inference API (FREE!)
13
+ # You can set HF_TOKEN as environment variable or it uses anonymous access
14
+ HF_TOKEN = os.getenv("HF_TOKEN", None)
15
+
16
+ # Best FREE models for coding via HF Inference API
17
+ MODELS = {
18
+ "Qwen2.5-Coder-32B (Best)": "Qwen/Qwen2.5-Coder-32B-Instruct",
19
+ "Qwen2.5-Coder-7B (Fast)": "Qwen/Qwen2.5-Coder-7B-Instruct",
20
+ "CodeLlama-34B": "codellama/CodeLlama-34b-Instruct-hf",
21
+ "Mistral-7B": "mistralai/Mistral-7B-Instruct-v0.3",
22
+ "Llama-3.1-8B": "meta-llama/Llama-3.1-8B-Instruct",
23
+ }
24
+
25
+ DEFAULT_MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct"
26
+
27
+ def get_client(model_id: str) -> InferenceClient:
28
+ """Get inference client for a model"""
29
+ return InferenceClient(model_id, token=HF_TOKEN)
30
+
31
+ def chat_with_model(message: str, history: list, model_name: str, temperature: float, max_tokens: int):
32
+ """Stream chat responses from the model"""
33
+
34
+ model_id = MODELS.get(model_name, DEFAULT_MODEL)
35
+ client = get_client(model_id)
36
+
37
+ # Build messages
38
+ messages = [
39
+ {
40
+ "role": "system",
41
+ "content": """You are an expert coding assistant. You help with:
42
+ - Writing clean, efficient, well-documented code
43
+ - Debugging and fixing issues
44
+ - Explaining code and programming concepts
45
+ - Code reviews and best practices
46
+ - Answering programming questions
47
+
48
+ Always provide code examples in markdown code blocks with the language specified."""
49
+ }
50
+ ]
51
+
52
+ # Add history
53
+ for user_msg, assistant_msg in history:
54
+ messages.append({"role": "user", "content": user_msg})
55
+ if assistant_msg:
56
+ messages.append({"role": "assistant", "content": assistant_msg})
57
+
58
+ # Add current message
59
+ messages.append({"role": "user", "content": message})
60
+
61
+ # Stream the response
62
+ try:
63
+ response = ""
64
+ for chunk in client.chat_completion(
65
+ messages=messages,
66
+ max_tokens=max_tokens,
67
+ temperature=temperature,
68
+ stream=True
69
+ ):
70
+ if chunk.choices[0].delta.content:
71
+ response += chunk.choices[0].delta.content
72
+ yield response
73
+ except Exception as e:
74
+ yield f"❌ Error: {str(e)}\n\nTip: Try a different model or check your connection."
75
+
76
+ def generate_code(prompt: str, language: str, model_name: str):
77
+ """Generate code from a description"""
78
+
79
+ if not prompt.strip():
80
+ return "Please describe what you want to build."
81
+
82
+ model_id = MODELS.get(model_name, DEFAULT_MODEL)
83
+ client = get_client(model_id)
84
+
85
+ full_prompt = f"""Write {language} code for the following:
86
+
87
+ {prompt}
88
+
89
+ Requirements:
90
+ - Clean, readable, well-commented code
91
+ - Follow {language} best practices
92
+ - Include error handling where appropriate
93
+
94
+ Respond with ONLY the code in a markdown code block, no explanations."""
95
+
96
+ try:
97
+ response = client.chat_completion(
98
+ messages=[{"role": "user", "content": full_prompt}],
99
+ max_tokens=2048,
100
+ temperature=0.3
101
+ )
102
+
103
+ result = response.choices[0].message.content
104
+
105
+ # Extract code from markdown block if present
106
+ if "```" in result:
107
+ # Find code between ``` markers
108
+ parts = result.split("```")
109
+ if len(parts) >= 2:
110
+ code_part = parts[1]
111
+ # Remove language identifier if present
112
+ if code_part.startswith(language.lower()) or code_part.startswith("\n"):
113
+ code_part = code_part.split("\n", 1)[-1] if "\n" in code_part else code_part
114
+ return code_part.strip()
115
+
116
+ return result
117
+
118
+ except Exception as e:
119
+ return f"❌ Error: {str(e)}"
120
+
121
+ def explain_code(code: str, model_name: str):
122
+ """Explain code in detail"""
123
+
124
+ if not code.strip():
125
+ return "Please paste some code to explain."
126
+
127
+ model_id = MODELS.get(model_name, DEFAULT_MODEL)
128
+ client = get_client(model_id)
129
+
130
+ prompt = f"""Analyze and explain this code in detail:
131
+
132
+ ```
133
+ {code}
134
+ ```
135
+
136
+ Please provide:
137
+ 1. **Purpose**: What does this code do?
138
+ 2. **How it works**: Step-by-step explanation
139
+ 3. **Key concepts**: Important programming concepts used
140
+ 4. **Complexity**: Time and space complexity if applicable
141
+ 5. **Potential improvements**: Suggestions for better code"""
142
+
143
+ try:
144
+ response = client.chat_completion(
145
+ messages=[{"role": "user", "content": prompt}],
146
+ max_tokens=2048,
147
+ temperature=0.5
148
+ )
149
+ return response.choices[0].message.content
150
+ except Exception as e:
151
+ return f"❌ Error: {str(e)}"
152
+
153
+ def fix_code(code: str, error_msg: str, model_name: str):
154
+ """Fix buggy code"""
155
+
156
+ if not code.strip():
157
+ return "Please paste the code you want to fix."
158
+
159
+ model_id = MODELS.get(model_name, DEFAULT_MODEL)
160
+ client = get_client(model_id)
161
+
162
+ prompt = f"""Fix the following buggy code:
163
+
164
+ **Code:**
165
+ ```
166
+ {code}
167
+ ```
168
+
169
+ **Error/Problem:**
170
+ {error_msg if error_msg.strip() else "The code doesn't work correctly"}
171
+
172
+ Please:
173
+ 1. Identify what's wrong
174
+ 2. Explain the bug
175
+ 3. Provide the corrected code in a code block
176
+ 4. Explain the fix"""
177
+
178
+ try:
179
+ response = client.chat_completion(
180
+ messages=[{"role": "user", "content": prompt}],
181
+ max_tokens=2048,
182
+ temperature=0.3
183
+ )
184
+ return response.choices[0].message.content
185
+ except Exception as e:
186
+ return f"❌ Error: {str(e)}"
187
+
188
+ def review_code(code: str, model_name: str):
189
+ """Review code and suggest improvements"""
190
+
191
+ if not code.strip():
192
+ return "Please paste code to review."
193
+
194
+ model_id = MODELS.get(model_name, DEFAULT_MODEL)
195
+ client = get_client(model_id)
196
+
197
+ prompt = f"""Review this code and provide feedback:
198
+
199
+ ```
200
+ {code}
201
+ ```
202
+
203
+ Please evaluate:
204
+ 1. **Code Quality**: Is it clean, readable, well-organized?
205
+ 2. **Best Practices**: Does it follow language conventions?
206
+ 3. **Bugs/Issues**: Any potential bugs or problems?
207
+ 4. **Performance**: Any performance concerns?
208
+ 5. **Security**: Any security issues?
209
+ 6. **Suggestions**: Specific improvements with code examples
210
+
211
+ Be constructive and specific."""
212
+
213
+ try:
214
+ response = client.chat_completion(
215
+ messages=[{"role": "user", "content": prompt}],
216
+ max_tokens=2048,
217
+ temperature=0.5
218
+ )
219
+ return response.choices[0].message.content
220
+ except Exception as e:
221
+ return f"❌ Error: {str(e)}"
222
+
223
+
224
+ # ============== BUILD THE UI ==============
225
+
226
+ with gr.Blocks(
227
+ title="🔥 GOD Coding Machine",
228
+ theme=gr.themes.Soft(primary_hue="purple", secondary_hue="blue"),
229
+ ) as demo:
230
+
231
+ gr.Markdown("""
232
+ # 🔥 FREE GOD Coding Machine
233
+ ### AI Coding Assistant - Powered by Open Source Models via HuggingFace
234
+
235
+ **100% FREE** • Uses HuggingFace Inference API • No API key required (or add your own for higher limits)
236
+ """)
237
+
238
+ # Settings row
239
+ with gr.Row():
240
+ model_dropdown = gr.Dropdown(
241
+ choices=list(MODELS.keys()),
242
+ value="Qwen2.5-Coder-32B (Best)",
243
+ label="🤖 Model",
244
+ scale=2
245
+ )
246
+ temperature = gr.Slider(
247
+ minimum=0.0, maximum=1.0, value=0.7, step=0.1,
248
+ label="🌡️ Temperature",
249
+ scale=1
250
+ )
251
+ max_tokens = gr.Slider(
252
+ minimum=256, maximum=4096, value=2048, step=256,
253
+ label="📏 Max Tokens",
254
+ scale=1
255
+ )
256
+
257
+ with gr.Tabs():
258
+
259
+ # ===== CHAT TAB =====
260
+ with gr.TabItem("💬 Chat"):
261
+ chatbot = gr.Chatbot(
262
+ height=450,
263
+ show_label=False,
264
+ avatar_images=(None, "https://em-content.zobj.net/source/twitter/376/robot_1f916.png"),
265
+ bubble_full_width=False
266
+ )
267
+
268
+ with gr.Row():
269
+ msg = gr.Textbox(
270
+ placeholder="Ask me anything about coding... (Press Enter to send)",
271
+ show_label=False,
272
+ scale=9,
273
+ container=False
274
+ )
275
+ send_btn = gr.Button("Send", variant="primary", scale=1)
276
+
277
+ clear_btn = gr.Button("🗑️ Clear Chat")
278
+
279
+ gr.Examples(
280
+ examples=[
281
+ "Write a Python function to find the longest palindromic substring",
282
+ "Explain how async/await works in JavaScript",
283
+ "What's the difference between a list and a tuple in Python?",
284
+ "Write a REST API in FastAPI with CRUD operations for a todo app",
285
+ "How do I implement a binary search tree?",
286
+ ],
287
+ inputs=msg
288
+ )
289
+
290
+ # ===== GENERATE TAB =====
291
+ with gr.TabItem("⚡ Generate Code"):
292
+ with gr.Row():
293
+ with gr.Column():
294
+ gen_prompt = gr.Textbox(
295
+ label="📝 Describe what you want to build",
296
+ placeholder="A function that validates email addresses...",
297
+ lines=4
298
+ )
299
+ gen_lang = gr.Dropdown(
300
+ choices=["Python", "JavaScript", "TypeScript", "Rust", "Go", "Java", "C++", "C#", "Ruby", "PHP", "Swift", "Kotlin", "SQL", "Bash"],
301
+ value="Python",
302
+ label="💻 Language"
303
+ )
304
+ gen_btn = gr.Button("🚀 Generate Code", variant="primary", size="lg")
305
+
306
+ with gr.Column():
307
+ gen_output = gr.Code(label="Generated Code", language="python", lines=20)
308
+
309
+ gr.Examples(
310
+ examples=[
311
+ ["A function to merge two sorted linked lists", "Python"],
312
+ ["A React hook for debouncing input", "TypeScript"],
313
+ ["A function to validate a credit card number using Luhn algorithm", "JavaScript"],
314
+ ["A thread-safe singleton pattern", "Java"],
315
+ ],
316
+ inputs=[gen_prompt, gen_lang]
317
+ )
318
+
319
+ # ===== EXPLAIN TAB =====
320
+ with gr.TabItem("🔍 Explain Code"):
321
+ with gr.Row():
322
+ with gr.Column():
323
+ explain_input = gr.Code(
324
+ label="📋 Paste code to explain",
325
+ language="python",
326
+ lines=15
327
+ )
328
+ explain_btn = gr.Button("🔍 Explain", variant="primary", size="lg")
329
+
330
+ with gr.Column():
331
+ explain_output = gr.Markdown(label="Explanation")
332
+
333
+ # ===== FIX TAB =====
334
+ with gr.TabItem("🔧 Fix Code"):
335
+ with gr.Row():
336
+ with gr.Column():
337
+ fix_input = gr.Code(
338
+ label="🐛 Paste buggy code",
339
+ language="python",
340
+ lines=12
341
+ )
342
+ fix_error = gr.Textbox(
343
+ label="❌ Error message (optional)",
344
+ placeholder="Paste the error message...",
345
+ lines=3
346
+ )
347
+ fix_btn = gr.Button("🔧 Fix Code", variant="primary", size="lg")
348
+
349
+ with gr.Column():
350
+ fix_output = gr.Markdown(label="Solution")
351
+
352
+ # ===== REVIEW TAB =====
353
+ with gr.TabItem("📝 Code Review"):
354
+ with gr.Row():
355
+ with gr.Column():
356
+ review_input = gr.Code(
357
+ label="📋 Paste code to review",
358
+ language="python",
359
+ lines=15
360
+ )
361
+ review_btn = gr.Button("📝 Review Code", variant="primary", size="lg")
362
+
363
+ with gr.Column():
364
+ review_output = gr.Markdown(label="Review")
365
+
366
+ # Footer
367
+ gr.Markdown("""
368
+ ---
369
+ <center>
370
+
371
+ **Models**: [Qwen2.5-Coder](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) (32B) • [CodeLlama](https://huggingface.co/codellama) (34B) • [Mistral](https://huggingface.co/mistralai) (7B)
372
+
373
+ 💡 **Tip**: Add your HuggingFace token as `HF_TOKEN` secret for higher rate limits
374
+
375
+ </center>
376
+ """)
377
+
378
+ # ===== EVENT HANDLERS =====
379
+
380
+ def respond(message, history, model, temp, max_tok):
381
+ history = history or []
382
+ response = ""
383
+ for chunk in chat_with_model(message, history, model, temp, max_tok):
384
+ response = chunk
385
+ yield history + [[message, response]], ""
386
+
387
+ # Chat events
388
+ msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
389
+ send_btn.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
390
+ clear_btn.click(lambda: [], None, chatbot)
391
+
392
+ # Generate events
393
+ gen_btn.click(generate_code, [gen_prompt, gen_lang, model_dropdown], gen_output)
394
+
395
+ # Explain events
396
+ explain_btn.click(explain_code, [explain_input, model_dropdown], explain_output)
397
+
398
+ # Fix events
399
+ fix_btn.click(fix_code, [fix_input, fix_error, model_dropdown], fix_output)
400
+
401
+ # Review events
402
+ review_btn.click(review_code, [review_input, model_dropdown], review_output)
403
+
404
+
405
+ # Launch
406
+ if __name__ == "__main__":
407
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio>=4.0.0
2
+ huggingface_hub>=0.20.0