Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| def analyze_project(project_data, question): | |
| api_key = os.getenv("HF_API_KEY") | |
| client = InferenceClient(api_key=api_key) | |
| prompt = f"Analyze this project: {project_data}\n\nQuestion: {question}" | |
| inputs = client.encoding("text", prompt) | |
| outputs = client.generate( | |
| model="Qwen/Qwen2.5-72B-Instruct", | |
| inputs=inputs, | |
| max_new_tokens=100 | |
| ) | |
| return outputs["generated_text"][0] | |
| iface = gr.Interface( | |
| fn=analyze_project, | |
| inputs=["text", "text"], | |
| outputs="text" | |
| ) | |
| iface.launch(share=True, api=True) |