Ai-genius / app.py
maria355's picture
Update app.py
ab3d7ba verified
import os
import gradio as gr
import requests
# System prompt for the code assistant
SYSTEM_PROMPT = """
You are an advanced Code Assistant, designed to help users with programming tasks. Your capabilities include:
1. Answering coding questions and explaining programming concepts
2. Debugging user code and suggesting improvements
3. Optimizing code for better performance or readability
4. Generating code based on user requirements
5. Providing step-by-step explanations for complex algorithms
Always respond with well-structured, well-commented code that follows best practices. For debugging tasks, clearly identify the issues and explain your solutions. When explaining concepts, use clear language and relevant examples.
When generating code:
- Include comprehensive comments
- Follow style guides appropriate for the language
- Handle edge cases and potential errors
- Structure code for readability and maintainability
For code optimization:
- Identify inefficiencies in time or space complexity
- Suggest alternative algorithms or data structures
- Explain the trade-offs involved in your recommendations
- Provide before/after performance estimates when possible
Your goal is to not only solve the immediate problem but also help users improve their overall coding skills and understanding.
"""
# Function to interact with Groq API
def query_groq(messages, model="llama-3.3-70b-versatile"):
api_key = os.getenv("GROQ_API_KEY")
if not api_key:
return "Error: Groq API key not set. Please set the GROQ_API_KEY environment variable."
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
data = {
"messages": messages,
"model": model,
"temperature": 0.5,
"max_tokens": 4000,
"top_p": 0.9,
"stream": False
}
try:
response = requests.post(
"https://api.groq.com/openai/v1/chat/completions",
headers=headers,
json=data
)
if response.status_code == 200:
return response.json()["choices"][0]["message"]["content"]
else:
return f"Error: {response.status_code}, {response.text}"
except Exception as e:
return f"Request error: {str(e)}"
# Function to use HuggingFace model as a fallback
def query_huggingface(prompt, model_id="microsoft/DialoGPT-medium"):
hf_token = os.getenv("HF_TOKEN")
if not hf_token:
return "Error: HuggingFace token not set. Please set the HF_TOKEN environment variable."
try:
# Use HuggingFace Inference API directly
api_url = f"https://api-inference.huggingface.co/models/{model_id}"
headers = {"Authorization": f"Bearer {hf_token}"}
# Format the payload for text generation
payload = {
"inputs": prompt,
"parameters": {
"max_new_tokens": 1024,
"temperature": 0.7,
"top_p": 0.95,
"do_sample": True,
"return_full_text": False
}
}
response = requests.post(api_url, headers=headers, json=payload)
if response.status_code == 200:
result = response.json()
if isinstance(result, list) and len(result) > 0:
return result[0].get("generated_text", "No response generated")
else:
return "No response generated"
else:
return f"HuggingFace API error: {response.status_code}, {response.text}"
except Exception as e:
return f"HuggingFace API error: {str(e)}"
# Alternative HuggingFace function using transformers pipeline (if available)
def query_huggingface_pipeline(prompt):
try:
from transformers import pipeline
# Use a smaller, more reliable model for fallback
generator = pipeline('text-generation', model='gpt2', device=-1) # Use CPU
response = generator(
prompt,
max_length=len(prompt.split()) + 200, # Limit output length
temperature=0.7,
pad_token_id=50256,
do_sample=True,
truncation=True
)
return response[0]['generated_text'][len(prompt):].strip()
except ImportError:
return "Error: transformers library not installed. Please install it with: pip install transformers torch"
except Exception as e:
return f"Pipeline error: {str(e)}"
# Chat history handler
def format_chat_history(chat_history):
formatted_messages = [{"role": "system", "content": SYSTEM_PROMPT}]
for user_msg, assistant_msg in chat_history:
formatted_messages.append({"role": "user", "content": user_msg})
if assistant_msg: # Only add assistant messages that exist
formatted_messages.append({"role": "assistant", "content": assistant_msg})
return formatted_messages
# Main function to process user input
def process_input(user_input, query_type, chat_history=None):
if chat_history is None:
chat_history = []
# Format user input based on query type
if query_type == "Generate code":
full_prompt = f"Generate code for the following task: {user_input}"
elif query_type == "Debug code":
full_prompt = f"Debug the following code and explain the issues: {user_input}"
elif query_type == "Optimize code":
full_prompt = f"Optimize the following code and explain your improvements: {user_input}"
elif query_type == "Explain code":
full_prompt = f"Explain the following code in detail: {user_input}"
else: # General query
full_prompt = user_input
# Add new user message to history
chat_history.append([full_prompt, None])
# Format messages for API call
messages = format_chat_history(chat_history)
try:
# Get response from Groq
response = query_groq(messages)
# Use HuggingFace as fallback if Groq fails
if "Error" in response:
print("Groq API failed, falling back to HuggingFace...")
# Simplified prompt for HuggingFace
simple_prompt = f"Code Assistant: {full_prompt}\n\nResponse:"
# Try HuggingFace API first
response = query_huggingface(simple_prompt)
# If HuggingFace API fails, try local pipeline
if "Error" in response:
print("HuggingFace API failed, trying local pipeline...")
response = query_huggingface_pipeline(simple_prompt)
# Update the last message in chat history with the response
chat_history[-1][1] = response
return chat_history
except Exception as e:
error_msg = f"An error occurred: {str(e)}"
chat_history[-1][1] = error_msg
return chat_history
# Create Gradio interface
def create_demo():
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🤖 CodeCraft: AI Genius")
gr.Markdown("Get help with coding questions, debugging, optimization, and code generation using AI.")
with gr.Row():
with gr.Column(scale=1):
query_type = gr.Dropdown(
choices=["General query", "Generate code", "Debug code", "Optimize code", "Explain code"],
value="General query",
label="Query Type"
)
user_input = gr.Textbox(
placeholder="Enter your coding question or paste code here...",
label="Your Question or Code",
lines=8
)
submit_btn = gr.Button("Submit", variant="primary")
clear_btn = gr.Button("Clear Chat")
with gr.Column(scale=2):
chat_history = gr.Chatbot(
label="Assistant Response",
height=500,
bubble_full_width=False
)
# Set up event handlers
submit_btn.click(
process_input,
inputs=[user_input, query_type, chat_history],
outputs=[chat_history]
)
clear_btn.click(
lambda: ([]),
outputs=[chat_history]
)
gr.Markdown("""
### Usage Tips:
- For general coding questions, just type your question
- For code generation, select "Generate code" and describe what you need
- For debugging or optimization, paste your code directly in the input box
- Be specific in your questions for better results
### API Requirements:
- **GROQ_API_KEY**: Required for primary AI responses
- **HF_TOKEN**: Optional, for HuggingFace fallback
### Model Information:
- Primary: Groq's llama-3.3-70b-versatile
- Fallback: HuggingFace models or local transformers
""")
return demo
# For local development and testing
if __name__ == "__main__":
# Check for API key
if not os.getenv("GROQ_API_KEY"):
print("⚠ Warning: GROQ_API_KEY environment variable not set")
print("Please set your Groq API key before running the application")
print("Example: export GROQ_API_KEY=your_api_key_here")
print("⚙ Launching Gradio interface...")
demo = create_demo()
demo.launch()
else:
# For Hugging Face Spaces deployment
demo = create_demo()