| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| model_id = "t-tech/T-pro-it-2.0" | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| model = AutoModelForCausalLM.from_pretrained(model_id) | |
| def chat(prompt): | |
| inputs = tokenizer(prompt, return_tensors="pt") | |
| outputs = model.generate(**inputs, max_new_tokens=256, do_sample=True) | |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| iface = gr.Interface(fn=chat, inputs="text", outputs="text", title="T-Pro IT 2.0 Chat") | |
| iface.launch() |