| import streamlit as st |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| import os |
| from huggingface_hub import login |
|
|
| |
| login(token=os.getenv("HUGGINGFACE_HUB_TOKEN")) |
|
|
| MODEL_NAME = "pymmdrza/TPT_v1" |
|
|
| @st.cache_resource |
| def load_model(): |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) |
| model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) |
| return tokenizer, model |
|
|
| tokenizer, model = load_model() |
|
|
| st.title("Teste do Modelo TPT_v1") |
|
|
| input_text = st.text_area("Digite o texto de entrada:") |
|
|
| if st.button("Gerar Resposta"): |
| if input_text: |
| inputs = tokenizer(input_text, return_tensors="pt") |
| outputs = model.generate(**inputs, max_length=100) |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| st.write("Resposta gerada:") |
| st.success(response) |
| else: |
| st.warning("Digite um texto para gerar uma resposta.") |
|
|