Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| # Load your model | |
| tokenizer = AutoTokenizer.from_pretrained("colorfulscoop/gpt2-small-ja") | |
| model = AutoModelForCausalLM.from_pretrained("colorfulscoop/gpt2-small-ja") | |
| # Function to generate AI response | |
| def generate(text): | |
| inputs = tokenizer(text, return_tensors="pt") | |
| outputs = model.generate(**inputs, max_new_tokens=50) | |
| return tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]) | |
| # Gradio interface | |
| iface = gr.Interface( | |
| fn=generate, | |
| inputs="text", | |
| outputs="text", | |
| title="Japanese GPT-2 Chatbot", | |
| description="Type a message in Japanese and get a response." | |
| ) | |
| iface.launch() | |