mirnaaiman commited on
Commit
5a0561f
·
verified ·
1 Parent(s): 562a1db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -21
app.py CHANGED
@@ -1,26 +1,34 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
 
4
- generator = pipeline("text-generation", model="meta-llama/Llama-2-7b-chat-hf")
 
 
 
5
 
6
- def evaluate_response(prompt):
7
- # Generate text from the prompt
8
- outputs = generator(prompt, max_new_tokens=150, do_sample=True, temperature=0.7)
9
- generated_text = outputs[0]['generated_text']
 
10
 
11
- # Simple quality evaluation: check if response length > 10 words
12
- word_count = len(generated_text.split())
13
- quality = "Good" if word_count > 10 else "Poor"
 
 
 
 
 
 
 
 
 
 
14
 
15
- return generated_text, f"Response Quality: {quality}"
16
-
17
- iface = gr.Interface(
18
- fn=evaluate_response,
19
- inputs=gr.Textbox(lines=4, label="Enter your prompt"),
20
- outputs=[gr.Textbox(label="Model Response"), gr.Textbox(label="Quality Analysis")],
21
- title="Model Understanding Test",
22
- description="Enter a prompt, get a model response, and see an automatic quality evaluation."
23
- )
24
-
25
- if __name__ == "__main__":
26
- iface.launch()
 
1
  import gradio as gr
2
+ import requests
3
 
4
+ API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.1"
5
+ headers = {
6
+ "Authorization": f"Bearer hf_HmAOAKYrjUmRdocDHP"
7
+ }
8
 
9
+ def query(payload):
10
+ response = requests.post(API_URL, headers=headers, json=payload)
11
+ if response.status_code != 200:
12
+ return {"error": f"API error {response.status_code}: {response.text}"}
13
+ return response.json()
14
 
15
+ def process(prompt):
16
+ output = query({
17
+ "inputs": prompt,
18
+ "parameters": {
19
+ "max_new_tokens": 100,
20
+ "temperature": 0.7
21
+ }
22
+ })
23
+ if "error" in output:
24
+ return output["error"]
25
+ if isinstance(output, list) and "generated_text" in output[0]:
26
+ return output[0]["generated_text"].replace(prompt, "").strip()
27
+ return f"Unexpected output: {output}"
28
 
29
+ gr.Interface(
30
+ fn=process,
31
+ inputs=gr.Textbox(label="Prompt", lines=3),
32
+ outputs=gr.Textbox(label="Response", lines=6),
33
+ title="Hugging Face Mistral API Test"
34
+ ).launch()