Spaces:
Sleeping
Sleeping
Upload 4 files
Browse files- .gitattributes +35 -35
- app.py +98 -0
- llm_handler.py +56 -0
- requirements.txt +5 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,35 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from llm_handler import generate_response
|
| 3 |
+
|
| 4 |
+
# Predefined system prompts
|
| 5 |
+
SYSTEM_PROMPTS = [
|
| 6 |
+
"You are a professional coding assistant",
|
| 7 |
+
"You are a professional teaching assistant",
|
| 8 |
+
"You are professional email writer",
|
| 9 |
+
"Other"
|
| 10 |
+
]
|
| 11 |
+
|
| 12 |
+
# Available models
|
| 13 |
+
MODELS = [
|
| 14 |
+
"Claude Haiku",
|
| 15 |
+
"DeepSeek",
|
| 16 |
+
"Claude Premium",
|
| 17 |
+
"GPT Pro"
|
| 18 |
+
]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def handle_system_prompt_change(choice):
|
| 22 |
+
if choice == "Other":
|
| 23 |
+
return gr.Textbox(visible=True)
|
| 24 |
+
return gr.Textbox(visible=False)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def process_input(user_prompt, model_choice, system_prompt_choice, custom_system_prompt):
|
| 28 |
+
# Use custom system prompt if "Other" is selected
|
| 29 |
+
final_system_prompt = custom_system_prompt if system_prompt_choice == "Other" else system_prompt_choice
|
| 30 |
+
|
| 31 |
+
if not user_prompt:
|
| 32 |
+
return "Please enter a prompt"
|
| 33 |
+
|
| 34 |
+
# Generate response using llmhandler
|
| 35 |
+
response = generate_response(user_prompt, model_choice, final_system_prompt)
|
| 36 |
+
return response
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
# Create Gradio interface
|
| 40 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 41 |
+
gr.Markdown("# LLM Interface")
|
| 42 |
+
|
| 43 |
+
with gr.Row():
|
| 44 |
+
with gr.Column():
|
| 45 |
+
user_prompt = gr.Textbox(
|
| 46 |
+
label="Your Prompt",
|
| 47 |
+
placeholder="Enter your prompt here...",
|
| 48 |
+
lines=5
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
model_choice = gr.Dropdown(
|
| 52 |
+
choices=MODELS,
|
| 53 |
+
label="Select Model",
|
| 54 |
+
value=MODELS[0]
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
system_prompt_choice = gr.Dropdown(
|
| 58 |
+
choices=SYSTEM_PROMPTS,
|
| 59 |
+
label="Select System Prompt",
|
| 60 |
+
value=SYSTEM_PROMPTS[0]
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
custom_system_prompt = gr.Textbox(
|
| 64 |
+
label="Custom System Prompt",
|
| 65 |
+
placeholder="Enter your custom system prompt...",
|
| 66 |
+
visible=False
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
submit_btn = gr.Button("Generate Response")
|
| 70 |
+
|
| 71 |
+
with gr.Column():
|
| 72 |
+
output = gr.Textbox(
|
| 73 |
+
label="Response",
|
| 74 |
+
lines=10,
|
| 75 |
+
show_copy_button=True # Adding copy button to the output
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
# Handle system prompt change
|
| 79 |
+
system_prompt_choice.change(
|
| 80 |
+
fn=handle_system_prompt_change,
|
| 81 |
+
inputs=[system_prompt_choice],
|
| 82 |
+
outputs=[custom_system_prompt]
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
# Handle submit button click
|
| 86 |
+
submit_btn.click(
|
| 87 |
+
fn=process_input,
|
| 88 |
+
inputs=[
|
| 89 |
+
user_prompt,
|
| 90 |
+
model_choice,
|
| 91 |
+
system_prompt_choice,
|
| 92 |
+
custom_system_prompt
|
| 93 |
+
],
|
| 94 |
+
outputs=[output]
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
if __name__ == "__main__":
|
| 98 |
+
demo.launch()
|
llm_handler.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from openai import OpenAI
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
import logging
|
| 5 |
+
|
| 6 |
+
load_dotenv()
|
| 7 |
+
|
| 8 |
+
logging.basicConfig(level=logging.INFO)
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def generate_response(user_prompt, model_name, system_prompt):
|
| 13 |
+
try:
|
| 14 |
+
# Get API key from environment variable
|
| 15 |
+
api_key = os.getenv("OPENROUTER_API_KEY")
|
| 16 |
+
|
| 17 |
+
if not api_key:
|
| 18 |
+
return "Error: API key not found in environment variables"
|
| 19 |
+
|
| 20 |
+
# Initialize OpenRouter client using OpenAI with custom base URL
|
| 21 |
+
client = OpenAI(
|
| 22 |
+
base_url="https://openrouter.ai/api/v1",
|
| 23 |
+
api_key=api_key,
|
| 24 |
+
default_headers={
|
| 25 |
+
"HTTP-Referer": "null",
|
| 26 |
+
"X-Title": "LLMInterface",
|
| 27 |
+
}
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
# Map display names to actual model names
|
| 31 |
+
model_mapping = {
|
| 32 |
+
"Claude Haiku": "anthropic/claude-3.5-haiku-20241022:beta",
|
| 33 |
+
"DeepSeek": "deepseek/deepseek-r1:free",
|
| 34 |
+
"Claude Premium": "anthropic/claude-3.5-sonnet",
|
| 35 |
+
"GPT Pro": "openai/gpt-4-0125-preview"
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
# Get the actual model name
|
| 39 |
+
actual_model = model_mapping.get(model_name)
|
| 40 |
+
if not actual_model:
|
| 41 |
+
return "Error: Invalid model selection"
|
| 42 |
+
|
| 43 |
+
# Make the API call
|
| 44 |
+
response = client.chat.completions.create(
|
| 45 |
+
model=actual_model,
|
| 46 |
+
messages=[
|
| 47 |
+
{"role": "system", "content": system_prompt},
|
| 48 |
+
{"role": "user", "content": user_prompt}
|
| 49 |
+
]
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
return response.choices[0].message.content
|
| 53 |
+
|
| 54 |
+
except Exception as e:
|
| 55 |
+
logger.error(f"Error in generate_response: {str(e)}")
|
| 56 |
+
return f"Error occurred: {str(e)}"
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
openai==1.58.1
|
| 3 |
+
python-dotenv==1.0.0
|
| 4 |
+
gradio>=3.0
|
| 5 |
+
openrouter
|