| | import streamlit as st |
| | from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
| |
|
| | |
| | st.title('LLaMA2Glenda') |
| |
|
| | |
| | model = AutoModelForCausalLM.from_pretrained("tminh/llama-2-7b-glenda") |
| | tokenizer = AutoTokenizer.from_pretrained("TinyPixel/Llama-2-7B-bf16-sharded") |
| |
|
| | |
| | prompt = st.text_input('Enter your prompt:') |
| |
|
| | |
| | if st.button('Generate Answer'): |
| | |
| | pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200) |
| | result = pipe(f"<s>[INST] {prompt} [/INST]") |
| | |
| | st.write(result[0]['generated_text']) |
| |
|
| |
|