Spaces:
No application file
No application file
| import os | |
| import gradio as gr | |
| import requests | |
| API_TOKEN = os.getenv("HF_API_TOKEN") | |
| MODEL = "meta-llama/Llama-2-7b-chat-hf" | |
| def query_model(prompt): | |
| headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
| payload = {"inputs": prompt, "parameters": {"max_new_tokens": 100}} | |
| response = requests.post( | |
| f"https://api-inference.huggingface.co/models/{MODEL}", | |
| headers=headers, | |
| json=payload | |
| ) | |
| if response.status_code == 200: | |
| output = response.json() | |
| try: | |
| return output[0]['generated_text'] | |
| except: | |
| return "Error: Unexpected output format" | |
| else: | |
| return f"Error: {response.status_code} - {response.text}" | |
| def evaluate_response(prompt, response): | |
| prompt_keywords = set(prompt.lower().split()) | |
| response_words = set(response.lower().split()) | |
| common_words = prompt_keywords.intersection(response_words) | |
| return "ββ Understood" if len(common_words) >= 3 else "β Not Understood" | |
| def process_prompt(prompt): | |
| response = query_model(prompt) | |
| evaluation = evaluate_response(prompt, response) | |
| return response, evaluation | |
| gr.Interface( | |
| fn=process_prompt, | |
| inputs=gr.Textbox(lines=3, placeholder="Enter your prompt here...", label="Prompt"), | |
| outputs=[gr.Textbox(label="Model Response"), gr.Textbox(label="Evaluation Result")], | |
| title="Prompt Understanding Tester", | |
| description="Test if the Hugging Face model understands your prompt." | |
| ).launch() | |