File size: 2,792 Bytes
b247a75
ab8d2ed
a95ec3a
444c7d3
0a8a30a
 
444c7d3
ab8d2ed
444c7d3
b247a75
ab8d2ed
 
 
 
444c7d3
ab8d2ed
444c7d3
b247a75
444c7d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
edd62bb
c77dbbe
ab8d2ed
444c7d3
8d7a9f0
 
ab8d2ed
8d7a9f0
 
 
 
 
 
 
ab8d2ed
953136d
8d7a9f0
444c7d3
b553816
8d7a9f0
444c7d3
8d7a9f0
 
444c7d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab8d2ed
b247a75
444c7d3
b247a75
444c7d3
 
 
b247a75
444c7d3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import streamlit as st
import requests
import json
import os
import re


HF_API_URL = "https://router.huggingface.co/novita/v3/openai/chat/completions"
HF_TOKEN = os.environ["HF_PROJECT_TOKEN"]

HEADERS = {
    "Authorization": f"Bearer {HF_TOKEN}",
}

# Correctly format the prompt string with {user_prompt} placeholder inside the string
PROMPT_TEMPLATE = """
You are a prompt evaluation assistant called 👮 PromptPolice. Evaluate the following user prompt in JSON format using the structure provided below.
Prompt:
{user_prompt}
Evaluate based on the following criteria:
- Clarity (1-5)
- Context (1-5)
- Specificity (1-5)
- Intent (1-5)
Also include a suggestion for improving the prompt.
Respond ONLY in this JSON format:
{{
  "prompt": "...",
  "evaluation": {{
    "Clarity": ..., 
    "Context": ..., 
    "Specificity": ..., 
    "Intent": ..., 
    "suggestion": "..."
  }}
}}
"""

def evaluate_prompt(user_prompt):
    # Format the input prompt by inserting the user_prompt dynamically
    formatted_prompt = PROMPT_TEMPLATE.format(user_prompt=user_prompt)

    payload = {
        "messages": [
            {
                "role": "user",
                "content": formatted_prompt
            }
        ],
        "model": "deepseek/deepseek-v3-0324"
    }

    try:
        # Send request to DeepSeek API
        response = requests.post(HF_API_URL, headers=HEADERS, json=payload)
        response.raise_for_status()

        result = response.json()

        # Extract JSON formatted evaluation
        if isinstance(result, dict) and "choices" in result:
            raw_text = result["choices"][0]["message"]["content"]
            try:
                # This regex finds the first complete JSON block
                json_match = re.search(r'\{[\s\S]*\}', raw_text)
                if json_match:
                    json_str = json_match.group()
                    return json.loads(json_str)
                else:
                    return {"error": "No valid JSON object found in model output."}
            except (ValueError, json.JSONDecodeError) as e:
                return {"error": f"Failed to parse model output: {str(e)}"}
        return result
    except requests.exceptions.RequestException as e:
        return {"error": f"API request failed: {str(e)}"}

# --- Streamlit UI ---
st.set_page_config(page_title="👮 PromptPolice", layout="centered")
st.title("👮 PromptPolice: Prompt Evaluator")

user_prompt = st.text_area("Paste your AI prompt here:", height=200)

if st.button("Evaluate Prompt") and user_prompt:
    with st.spinner("Evaluating your prompt with DeepSeek..."):
        evaluation_result = evaluate_prompt(user_prompt)

    st.subheader("Evaluation Result (JSON):")
    st.code(json.dumps(evaluation_result, indent=2), language='json')