Spaces:
Paused
Paused
File size: 7,047 Bytes
8273e0b 9d3a830 8273e0b 9d3a830 8273e0b 9d3a830 8273e0b 9d3a830 8273e0b 9d3a830 8273e0b 9d3a830 8273e0b 9d3a830 8273e0b 9d3a830 8273e0b 9d3a830 8273e0b 9d3a830 8273e0b 9d3a830 8273e0b 9d3a830 8273e0b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 |
import os
import gradio as gr
from huggingface_hub import InferenceClient
import random
def get_coding_samples():
"""Diverse coding challenges"""
return [
"Write a function to reverse a linked list",
"Implement binary search in Python",
"Create a REST API endpoint for user authentication",
"Write a SQL query to find duplicate records",
"Build a React component for a todo list",
"Implement quicksort algorithm",
"Parse JSON and handle errors gracefully",
"Create a decorator for timing function execution",
"Write regex to validate email addresses",
"Implement a LRU cache in Python",
"Build a simple web scraper with BeautifulSoup",
"Create a custom hook in React",
"Write unit tests for a calculator function",
"Implement depth-first search for a graph",
"Build a CLI tool with argparse",
"Create a database migration script",
"Write a function to detect palindromes",
"Implement JWT token authentication",
"Build a responsive navbar with Tailwind CSS",
"Create a rate limiter middleware",
]
def create_coder_system_message():
"""System prompt for coding assistance"""
return """You are CoderAI, an expert programming assistant.
**Your Approach:**
1. Understand the requirements clearly
2. Provide clean, working code with comments
3. Explain key concepts and design decisions
4. Suggest best practices and optimizations
5. Include error handling where appropriate
**Code Quality:**
- Write readable, maintainable code
- Follow language conventions and style guides
- Use meaningful variable names
- Add docstrings/comments for complex logic
- Consider edge cases and error handling
**Format:**
- Use markdown code blocks with language tags
- Explain code sections when helpful
- Provide usage examples
- Mention dependencies if needed
Be practical, efficient, and educational."""
def respond(message, history, system_message, max_tokens, temperature, top_p, model_choice):
"""Streaming response with error handling"""
hf_token = os.getenv("HF_TOKEN")
if not hf_token:
yield "β **Error:** HF_TOKEN not found. Set it in environment or Spaces secrets."
return
# Model selection
models = {
"DeepSeek Coder": "deepseek-ai/deepseek-coder-33b-instruct",
"CodeLlama": "codellama/CodeLlama-34b-Instruct-hf",
"Qwen Coder": "Qwen/Qwen2.5-Coder-32B-Instruct",
}
client = InferenceClient(
model=models.get(model_choice, models["Qwen Coder"]),
token=hf_token
)
messages = [{"role": "system", "content": system_message}]
for msg in history:
if isinstance(msg, dict):
role = msg.get("role", "user")
content = msg.get("content", "")
else:
role = "user"
content = str(msg)
if content:
messages.append({"role": role, "content": content})
messages.append({"role": "user", "content": message})
try:
response_text = ""
for chunk in client.chat_completion(
messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
stream=True
):
if chunk.choices[0].delta.content:
response_text += chunk.choices[0].delta.content
yield response_text
except Exception as e:
error_msg = f"β **Error:** {str(e)}\n\nTry:\n- Checking your HF_TOKEN\n- Simplifying the request\n- Using a different model"
yield error_msg
def get_random_sample():
"""Get random coding challenge"""
return random.choice(get_coding_samples())
# Gradio Interface
with gr.Blocks(title="π» CoderAI", theme=gr.themes.Soft()) as demo:
gr.Markdown("# π» **CoderAI**\n*Your AI Programming Assistant*")
chatbot = gr.Chatbot(
height=500,
type='messages',
label="π¬ Conversation",
show_copy_button=True
)
msg = gr.Textbox(
placeholder="Ask a coding question or describe what you need...",
show_label=False,
scale=4
)
with gr.Row():
submit = gr.Button("π Code", variant="primary", scale=1)
clear = gr.Button("ποΈ Clear", variant="secondary", scale=1)
sample = gr.Button("π² Random", variant="secondary", scale=1)
with gr.Accordion("βοΈ Advanced Settings", open=False):
model_dropdown = gr.Dropdown(
choices=["Qwen Coder", "DeepSeek Coder", "CodeLlama"],
value="Qwen Coder",
label="Model Selection"
)
temp_slider = gr.Slider(0.1, 1.0, value=0.2, step=0.1, label="Temperature")
tokens_slider = gr.Slider(512, 4096, value=2048, step=256, label="Max Tokens")
top_p_slider = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p")
with gr.Accordion("π‘ Help & Examples", open=False):
gr.Markdown("""
**Tips:**
- Be specific about language and requirements
- Mention if you need comments or tests
- Ask for explanations of complex parts
- Request specific design patterns or styles
**What I can help with:**
- Writing functions and classes
- Debugging code
- Code review and optimization
- Algorithm implementation
- API design and database queries
- Testing and documentation
""")
gr.Examples(
examples=[
["Write a Python function to check if a string is a palindrome"],
["Create a React component for a searchable dropdown"],
["Implement a binary tree traversal in JavaScript"],
["Write a SQL query to find the top 5 customers by revenue"],
["Build a simple Flask API with error handling"],
["Create a custom validation decorator in Python"],
],
inputs=msg
)
system_msg = gr.State(create_coder_system_message())
def chat_response(message, history, sys_msg, max_tok, temp, top_p, model):
"""Handle chat with streaming"""
if not message.strip():
return history, ""
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": ""})
for response in respond(message, history[:-1], sys_msg, max_tok, temp, top_p, model):
history[-1]["content"] = response
yield history, ""
return history, ""
def clear_chat():
return [], ""
msg.submit(
chat_response,
[msg, chatbot, system_msg, tokens_slider, temp_slider, top_p_slider, model_dropdown],
[chatbot, msg]
)
submit.click(
chat_response,
[msg, chatbot, system_msg, tokens_slider, temp_slider, top_p_slider, model_dropdown],
[chatbot, msg]
)
clear.click(clear_chat, outputs=[chatbot, msg])
sample.click(get_random_sample, outputs=msg)
demo.launch() |