Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| model_id = "matteoangeloni/EduRaccoon" | |
| # Carica tokenizer e modello | |
| tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_id, | |
| device_map="auto", | |
| torch_dtype="auto", | |
| trust_remote_code=True | |
| ) | |
| pipe = pipeline( | |
| "text-generation", | |
| model=model, | |
| tokenizer=tokenizer, | |
| ) | |
| def respond(message, history): | |
| # System prompt: controlla lingua e ruolo | |
| system_prompt = ( | |
| "You are EduRaccoon, an AI tutor. " | |
| "Always reply in the same language used by the user. " | |
| "If the question is educational (school, university, science, history, math, literature, etc.), " | |
| "answer clearly and in detail as a teacher would. " | |
| "If the question is not educational, answer briefly and naturally." | |
| ) | |
| # Costruiamo il prompt finale | |
| prompt = system_prompt + "\nUser: " + message + "\nAI:" | |
| result = pipe( | |
| prompt, | |
| max_new_tokens=128, | |
| do_sample=True, | |
| temperature=0.7, | |
| top_p=0.9, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| output_text = result[0]["generated_text"] | |
| return output_text.split("AI:")[-1].strip() | |
| # Interfaccia Gradio | |
| chatbot = gr.ChatInterface( | |
| fn=respond, | |
| type="messages", | |
| title="EduRaccoon - Educational AI", | |
| description="Chatta con EduRaccoon: un tutor AI che risponde sempre nella tua lingua!" | |
| ) | |
| if __name__ == "__main__": | |
| chatbot.launch() | |