mirnaaiman commited on
Commit
eef1afe
·
verified ·
1 Parent(s): f08df8c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +158 -0
app.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import gradio as gr
3
+ from huggingface_hub import InferenceClient
4
+ import os
5
+
6
+ DEFAULT_MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.1"
7
+
8
+ # Try both common environment variable names for Hugging Face tokens
9
+ HF_TOKEN = os.getenv("API_TOKEN_2") or os.getenv("HUGGINGFACEHUB_API_TOKEN")
10
+
11
+ client = None
12
+
13
+ def get_inference_client(model_name):
14
+ global client
15
+ try:
16
+ if client is None or getattr(client, "model", None) != model_name:
17
+ client = InferenceClient(model=model_name, token=HF_TOKEN if HF_TOKEN else None)
18
+ print(f"InferenceClient initialized for {model_name}. Token {'provided' if HF_TOKEN else 'not provided'}.")
19
+ except Exception as e:
20
+ print(f"Failed to initialize InferenceClient for {model_name}: {e}")
21
+ return None
22
+ return client
23
+
24
+ def evaluate_understanding(prompt, response):
25
+ if not response or response.strip() == "":
26
+ return "❌ Not Understood (Empty or whitespace response)"
27
+ response_lower = response.lower()
28
+ misunderstanding_keywords = [
29
+ "i'm sorry", "i apologize", "i cannot", "i am unable", "unable to",
30
+ "i don't understand", "could you please rephrase", "i'm not sure i follow",
31
+ "that's not clear", "i do not have enough information", "as an ai language model, i don't",
32
+ "i'm not programmed to", "i lack the ability to"
33
+ ]
34
+ for keyword in misunderstanding_keywords:
35
+ if keyword in response_lower:
36
+ return f"⚠️ Potentially Not Understood (Contains: '{keyword}')"
37
+ if len(prompt.split()) > 7 and len(response.split()) < 10:
38
+ return "⚠️ Potentially Not Understood (Response seems too short for the prompt)"
39
+ if prompt.lower() in response_lower and len(response_lower) < len(prompt.lower()) * 1.5:
40
+ if len(prompt.split()) > 5:
41
+ return "⚠️ Potentially Not Understood (Response might be echoing the prompt)"
42
+ return "✔️ Likely Understood"
43
+
44
+ def query_model_and_evaluate(user_prompt, model_name_to_use):
45
+ if not user_prompt or user_prompt.strip() == "":
46
+ return "Please enter a prompt.", "Evaluation N/A", model_name_to_use
47
+ print(f"Querying model: {model_name_to_use}. HF_TOKEN {'is set' if HF_TOKEN else 'is NOT set/empty'}.")
48
+ current_client = get_inference_client(model_name_to_use)
49
+ if current_client is None:
50
+ error_msg = f"Error: Could not initialize the model API client for {model_name_to_use}. Check logs. This might be due to the model requiring authentication (like a token or accepting terms on Hugging Face) which was not available or successful."
51
+ return error_msg, "Evaluation N/A", model_name_to_use
52
+ try:
53
+ if "mistral" in model_name_to_use.lower() and "instruct" in model_name_to_use.lower():
54
+ formatted_prompt = f"<s>[INST] {user_prompt.strip()} [/INST]"
55
+ elif "llama-2" in model_name_to_use.lower() and "chat" in model_name_to_use.lower():
56
+ formatted_prompt = (
57
+ f"[INST] <<SYS>>\nYou are a helpful assistant. Your goal is to understand the user's prompt and respond accurately and relevantly.\n"
58
+ f"<</SYS>>\n\n{user_prompt.strip()} [/INST]"
59
+ )
60
+ else:
61
+ formatted_prompt = user_prompt.strip()
62
+ params = {
63
+ "max_new_tokens": 300,
64
+ "temperature": 0.6,
65
+ "top_p": 0.9,
66
+ "repetition_penalty": 1.1,
67
+ "do_sample": True,
68
+ "return_full_text": False
69
+ }
70
+ # Call the model
71
+ model_response_text = current_client.text_generation(formatted_prompt, **params)
72
+ if not model_response_text:
73
+ model_response_text = ""
74
+ except Exception as e:
75
+ error_message = f"Error calling model API for {model_name_to_use}: {str(e)}. This can happen if the model is gated, requires a Hugging Face token, or if you need to accept its terms of use on the Hugging Face website."
76
+ print(error_message)
77
+ return error_message, "Evaluation N/A", model_name_to_use
78
+ understanding_evaluation = evaluate_understanding(user_prompt, model_response_text)
79
+ return model_response_text, understanding_evaluation, model_name_to_use
80
+
81
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="orange")) as demo:
82
+ gr.Markdown(
83
+ f"""
84
+ # 🎯 Model Prompt Understanding Test
85
+ Enter a prompt for the selected language model. The application will send this to the model via Hugging Face's Inference API.
86
+ The model's response will be analyzed to provide a **basic heuristic assessment** of its understanding.
87
+
88
+ **Selected Model:** <span id='current-model-display'>{DEFAULT_MODEL_NAME}</span>
89
+ """
90
+ )
91
+ current_model_name_state = gr.State(DEFAULT_MODEL_NAME)
92
+ with gr.Row():
93
+ user_input_prompt = gr.Textbox(
94
+ label="✏️ Enter your Prompt:",
95
+ placeholder="e.g., Explain the concept of zero-shot learning in 3 sentences.",
96
+ lines=4,
97
+ scale=3
98
+ )
99
+ submit_button = gr.Button("🚀 Submit Prompt and Evaluate", variant="primary")
100
+ gr.Markdown("---")
101
+ gr.Markdown("### 🤖 Model Response & Evaluation")
102
+ with gr.Row():
103
+ with gr.Column(scale=2):
104
+ model_output_response = gr.Textbox(
105
+ label="📝 Model's Response:",
106
+ lines=10,
107
+ interactive=False,
108
+ show_copy_button=True
109
+ )
110
+ with gr.Column(scale=1):
111
+ evaluation_output = gr.Textbox(
112
+ label="🧐 Understanding Evaluation:",
113
+ lines=2,
114
+ interactive=False,
115
+ show_copy_button=True
116
+ )
117
+ displayed_model = gr.Textbox(
118
+ label="⚙️ Model Used for this Response:",
119
+ interactive=False,
120
+ lines=1
121
+ )
122
+ submit_button.click(
123
+ fn=query_model_and_evaluate,
124
+ inputs=[user_input_prompt, current_model_name_state],
125
+ outputs=[model_output_response, evaluation_output, displayed_model]
126
+ )
127
+ gr.Markdown(
128
+ """
129
+ ---
130
+ **Disclaimer:**
131
+ * The 'Understanding Evaluation' is a very basic automated heuristic.
132
+ * **Using Models:** This app will attempt to connect to the selected model. Some models (especially gated ones like Llama-2) may require you to have a Hugging Face account, accept their terms of use on the Hugging Face website, and might implicitly require a valid `HF_TOKEN` associated with your account (even if not explicitly set as a secret in this Space). If a model call fails, it could be due to these reasons.
133
+ * Response quality depends heavily on the chosen model and the clarity of your prompt.
134
+ """
135
+ )
136
+ gr.Examples(
137
+ examples=[
138
+ ["Explain the difference between supervised and unsupervised machine learning.", DEFAULT_MODEL_NAME],
139
+ ["Write a short poem about a curious robot.", DEFAULT_MODEL_NAME],
140
+ ["What are the main challenges in developing AGI?", DEFAULT_MODEL_NAME],
141
+ ["Summarize the plot of 'War and Peace' in one paragraph.", DEFAULT_MODEL_NAME],
142
+ ["asdfjkl; qwerpoiu", DEFAULT_MODEL_NAME]
143
+ ],
144
+ inputs=[user_input_prompt, current_model_name_state],
145
+ outputs=[model_output_response, evaluation_output, displayed_model],
146
+ fn=query_model_and_evaluate,
147
+ cache_examples=False,
148
+ label="💡 Example Prompts (click to try)"
149
+ )
150
+
151
+ if __name__ == "__main__":
152
+ print("Attempting to launch Gradio demo...")
153
+ print(f"Default model: {DEFAULT_MODEL_NAME}")
154
+ if HF_TOKEN:
155
+ print("HF_TOKEN is set.")
156
+ else:
157
+ print("HF_TOKEN is NOT set. Some models (especially gated ones like Llama) might require a token or prior agreement to terms on the Hugging Face website to function correctly. The app will attempt to run, but API calls may fail.")
158
+ demo.launch()