Spaces:
Paused
Paused
| import streamlit as st | |
| model = st.session_state.model | |
| tokenizer = st.session_state.tokenizer | |
| pipeline_generator = st.session_state.pipeline_generator | |
| def llama_prompt(prompt, pipeline=pipeline_generator): | |
| sequences = pipeline( | |
| prompt, | |
| do_sample=True, | |
| top_k=10, | |
| num_return_sequences=1, | |
| eos_token_id=tokenizer.eos_token_id, | |
| max_length=2048, # max length of output, default=4096 | |
| return_full_text=False, # to not repeat the question, set to False | |
| # temperature=0.6, # default=0. | |
| ) | |
| res = "" | |
| for seq in sequences: | |
| res += str(seq['generated_text']) | |
| return res | |
| # # Example usage: | |
| # prompt = "What is the meaning of life?" | |
| # result = llama_prompt(prompt) | |
| # print(result) | |