import gradio as gr import requests import json import os # Set your API key as an environment variable # Important: In the Hugging Face Space, set this as a secret HF_API_KEY = os.environ.get("HF_API_KEY", "") def analyze_shampoo(prompt): """ Send the prompt to the Hugging Face Inference API instead of loading the model locally """ API_URL = "https://api-inference.huggingface.co/models/microsoft/phi-3-mini-4k-instruct" headers = { "Authorization": f"Bearer {HF_API_KEY}", "Content-Type": "application/json" } payload = { "inputs": prompt, "parameters": { "max_new_tokens": 2048, "temperature": 0.7, "top_p": 0.95, "return_full_text": False } } try: if not HF_API_KEY: return "Error: No API key provided. Please add the HF_API_KEY secret to your Space." response = requests.post(API_URL, headers=headers, json=payload) response.raise_for_status() # Raise an exception for HTTP errors result = response.json() if isinstance(result, list) and len(result) > 0: # Extract the generated text return result[0].get('generated_text', 'No response generated') else: return f"Unexpected response format: {result}" except Exception as e: return f"Error: {str(e)}" # Create the Gradio interface interface = gr.Interface( fn=analyze_shampoo, inputs=gr.Textbox(lines=10, placeholder="Enter your analysis prompt here..."), outputs=gr.Textbox(lines=20), title="Phi-3 Shampoo Analyzer", description="Analyze shampoo ingredients based on user profiles using the Hugging Face Inference API" ) interface.launch()