Spaces:
Build error
Build error
| import streamlit as st | |
| import requests | |
| import json | |
| import os | |
| import re | |
| HF_API_URL = "https://router.huggingface.co/novita/v3/openai/chat/completions" | |
| HF_TOKEN = os.environ["HF_PROJECT_TOKEN"] | |
| HEADERS = { | |
| "Authorization": f"Bearer {HF_TOKEN}", | |
| } | |
| # Correctly format the prompt string with {user_prompt} placeholder inside the string | |
| PROMPT_TEMPLATE = """ | |
| You are a prompt evaluation assistant called ๐ฎ PromptPolice. Evaluate the following user prompt in JSON format using the structure provided below. | |
| Prompt: | |
| {user_prompt} | |
| Evaluate based on the following criteria: | |
| - Clarity (1-5) | |
| - Context (1-5) | |
| - Specificity (1-5) | |
| - Intent (1-5) | |
| Also include a suggestion for improving the prompt. | |
| Respond ONLY in this JSON format: | |
| {{ | |
| "prompt": "...", | |
| "evaluation": {{ | |
| "Clarity": ..., | |
| "Context": ..., | |
| "Specificity": ..., | |
| "Intent": ..., | |
| "suggestion": "..." | |
| }} | |
| }} | |
| """ | |
| def evaluate_prompt(user_prompt): | |
| # Format the input prompt by inserting the user_prompt dynamically | |
| formatted_prompt = PROMPT_TEMPLATE.format(user_prompt=user_prompt) | |
| payload = { | |
| "messages": [ | |
| { | |
| "role": "user", | |
| "content": formatted_prompt | |
| } | |
| ], | |
| "model": "deepseek/deepseek-v3-0324" | |
| } | |
| try: | |
| # Send request to DeepSeek API | |
| response = requests.post(HF_API_URL, headers=HEADERS, json=payload) | |
| response.raise_for_status() | |
| result = response.json() | |
| # Extract JSON formatted evaluation | |
| if isinstance(result, dict) and "choices" in result: | |
| raw_text = result["choices"][0]["message"]["content"] | |
| try: | |
| # This regex finds the first complete JSON block | |
| json_match = re.search(r'\{[\s\S]*\}', raw_text) | |
| if json_match: | |
| json_str = json_match.group() | |
| return json.loads(json_str) | |
| else: | |
| return {"error": "No valid JSON object found in model output."} | |
| except (ValueError, json.JSONDecodeError) as e: | |
| return {"error": f"Failed to parse model output: {str(e)}"} | |
| return result | |
| except requests.exceptions.RequestException as e: | |
| return {"error": f"API request failed: {str(e)}"} | |
| # --- Streamlit UI --- | |
| st.set_page_config(page_title="๐ฎ PromptPolice", layout="centered") | |
| st.title("๐ฎ PromptPolice: Prompt Evaluator") | |
| user_prompt = st.text_area("Paste your AI prompt here:", height=200) | |
| if st.button("Evaluate Prompt") and user_prompt: | |
| with st.spinner("Evaluating your prompt with DeepSeek..."): | |
| evaluation_result = evaluate_prompt(user_prompt) | |
| st.subheader("Evaluation Result (JSON):") | |
| st.code(json.dumps(evaluation_result, indent=2), language='json') |