Spaces:
Sleeping
Sleeping
| import torch | |
| import gradio as gr | |
| from transformers import pipeline | |
| import openai | |
| import os | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| # Use a FREE, NON-MICROSOFT model | |
| model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" | |
| # Create pipeline | |
| generator = pipeline( | |
| "text-generation", | |
| model=model_name, | |
| torch_dtype=torch.bfloat16 | |
| #device_map="cpu" # change to "cuda" if GPU | |
| ) | |
| def explain_topic(topic): | |
| prompt = f"<|system|>Explain the following topic in a simple way.\n<|user|>{topic}\n<|assistant|>" | |
| result = generator( | |
| prompt, | |
| max_new_tokens=200, | |
| temperature=0.5, | |
| do_sample=True | |
| ) | |
| text = result[0]["generated_text"] | |
| # Clean output | |
| if "<|assistant|>" in text: | |
| text = text.split("<|assistant|>")[-1].strip() | |
| return text | |
| gr.close_all() | |
| # Build Gradio Interface | |
| app = gr.Interface( | |
| fn=explain_topic, | |
| inputs=[gr.Textbox(label="Enter topic to explain", lines=5)], | |
| outputs=[gr.Textbox(label="Explanation", lines=8)], | |
| title="AI Explainer (3mtt & Hackathon)", | |
| description="Uses Hugging Face Transformers pipeline to explain any topic." | |
| ) | |
| app.launch(debug=True) | |