mirnaaiman commited on
Commit
fc24520
·
verified ·
1 Parent(s): 09bb332

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -39
app.py CHANGED
@@ -1,44 +1,21 @@
1
  import gradio as gr
2
- import requests
3
 
4
- API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.1"
 
5
 
6
- def format_prompt(user_input):
7
- return f"[INST] {user_input} [/INST]"
 
8
 
9
- def query(payload):
10
- response = requests.post(API_URL, json=payload)
11
- return response.json()
 
 
 
 
 
12
 
13
- def evaluate_response(prompt, response):
14
- prompt_lower = prompt.lower()
15
- response_lower = response.lower()
16
- if "capital" in prompt_lower and "france" in prompt_lower:
17
- return "paris" in response_lower
18
- return any(indicator in response_lower for indicator in ["answer", "is", "are", "because", ":"])
19
-
20
- def process(prompt):
21
- try:
22
- output = query({
23
- "inputs": format_prompt(prompt),
24
- "parameters": {
25
- "max_new_tokens": 100,
26
- "temperature": 0.3,
27
- "do_sample": True,
28
- "return_full_text": False
29
- }
30
- })
31
- response = output[0]['generated_text'].strip()
32
- evaluation = "✔ Understood" if evaluate_response(prompt, response) else "❌ Didn't understand"
33
- return f"{response}\n\nEvaluation: {evaluation}"
34
- except Exception as e:
35
- return f"Error: {str(e)}"
36
-
37
- with gr.Blocks() as demo:
38
- gr.Markdown("## Model Understanding Tester")
39
- prompt_input = gr.Textbox(label="Enter your prompt", lines=3)
40
- submit_btn = gr.Button("Submit")
41
- output = gr.Textbox(label="Response", lines=6)
42
- submit_btn.click(process, inputs=prompt_input, outputs=output)
43
-
44
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
 
4
+ # Load the model
5
+ model = pipeline("text-generation", model="meta-llama/Llama-2-7b-chat-hf")
6
 
7
+ def generate_response(prompt):
8
+ response = model(prompt, max_length=100, num_return_sequences=1)
9
+ return response[0]['generated_text']
10
 
11
+ # Create the Gradio interface
12
+ iface = gr.Interface(
13
+ fn=generate_response,
14
+ inputs=gr.Textbox(label="Enter your prompt or question:"),
15
+ outputs=gr.Textbox(label="Model Response:"),
16
+ title="Test Understanding Prompt",
17
+ description="Enter a prompt to see how well the model understands it."
18
+ )
19
 
20
+ # Launch the app
21
+ iface.launch()