mirnaaiman commited on
Commit
4670e2e
·
verified ·
1 Parent(s): fc24520

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -14
app.py CHANGED
@@ -1,21 +1,27 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
 
4
- # Load the model
5
- model = pipeline("text-generation", model="meta-llama/Llama-2-7b-chat-hf")
6
 
7
- def generate_response(prompt):
8
- response = model(prompt, max_length=100, num_return_sequences=1)
9
- return response[0]['generated_text']
 
 
 
 
 
 
 
10
 
11
- # Create the Gradio interface
12
  iface = gr.Interface(
13
- fn=generate_response,
14
- inputs=gr.Textbox(label="Enter your prompt or question:"),
15
- outputs=gr.Textbox(label="Model Response:"),
16
- title="Test Understanding Prompt",
17
- description="Enter a prompt to see how well the model understands it."
18
  )
19
 
20
- # Launch the app
21
- iface.launch()
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
 
4
+ # Initialize the Hugging Face Inference client with the chosen model
5
+ client = InferenceClient(repo_id="meta-llama/Llama-2-7b-chat-hf")
6
 
7
+ def evaluate_response(prompt):
8
+ # Send the prompt to the model and get the generated response
9
+ response = client.text_generation(prompt, max_new_tokens=200, temperature=0.7)
10
+ generated_text = response[0]['generated_text']
11
+
12
+ # Simple quality evaluation: check length of the response
13
+ word_count = len(generated_text.split())
14
+ quality = "Good" if word_count > 10 else "Poor"
15
+
16
+ return generated_text, f"Response Quality: {quality}"
17
 
18
+ # Build the Gradio interface
19
  iface = gr.Interface(
20
+ fn=evaluate_response,
21
+ inputs=gr.Textbox(lines=4, label="Enter your prompt"),
22
+ outputs=[gr.Textbox(label="Model Response"), gr.Textbox(label="Quality Analysis")],
23
+ title="Model Understanding Test",
24
+ description="Enter instructions or a question, the app sends it to a language model and automatically evaluates the quality of the response."
25
  )
26
 
27
+ iface.launch()