| from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
| import gradio as gr |
|
|
| model_name = "microsoft/Phi-4-mini-instruct" |
|
|
| |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto") |
|
|
| |
| chatbot = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=200) |
|
|
| def chatbot_response(user_input): |
| response = chatbot(user_input)[0]["generated_text"] |
| return response |
|
|
| |
| iface = gr.Interface( |
| fn=chatbot_response, |
| inputs="text", |
| outputs="text", |
| title="Ethical AI Chatbot", |
| description="A chatbot for ethical AI guidance." |
| ) |
|
|
| iface.launch() |
|
|