chat-sabia / app.py
valencar's picture
Update app.py
53d9c96 verified
import time
import datetime
import streamlit as st
from huggingface_hub import login
from transformers import pipeline
import os
token = os.getenv("modelo")
login(token = token)
# question = "Name the planets in the solar system? A: "
# question = "Quais são os planetas do sistema solar?"
# question = "Qual é o maior planeta do sistema solar?"
before = datetime.datetime.now()
# =====================================================
import transformers
import torch
model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
pipeline = transformers.pipeline(
"text-generation",
model=model_id,
model_kwargs={"torch_dtype": torch.bfloat16},
device_map="auto",
)
with st.container():
st.write('\n\n')
st.title('LLM-LANAChat\n\n')
question = st.text_input("Chat", placeholder="E ai?")
if question:
messages = [
# {"role": "system", "content": "You are a chatbot"},
{"role": "user", "content": question}
]
st.write('Gerando a saída...')
outputs = pipeline(
messages,
max_new_tokens=256,
)
response = outputs[0]["generated_text"][-1]
st.write('Saída gerada...')
# =====================================================
print('\n\n')
st.write(response)
after = datetime.datetime.now()
current_time = (after - before) # .strftime("%H:%M:%S")
print("\nTime Elapsed: ", current_time)
st.write("\nTime Elapsed: ", current_time)