File size: 761 Bytes
e4fe207
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31

import streamlit as st

model = st.session_state.model
tokenizer = st.session_state.tokenizer
pipeline_generator = st.session_state.pipeline_generator


def llama_prompt(prompt, pipeline=pipeline_generator):
    sequences = pipeline(
        prompt,
        do_sample=True,
        top_k=10,
        num_return_sequences=1,
        eos_token_id=tokenizer.eos_token_id,
        max_length=2048, # max length of output, default=4096
        return_full_text=False, # to not repeat the question, set to False
        # temperature=0.6, # default=0.
    )

    res = ""
    for seq in sequences:
        res += str(seq['generated_text'])

    return res

# # Example usage:
# prompt = "What is the meaning of life?"
# result = llama_prompt(prompt)
# print(result)