Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,41 +7,14 @@ llm = Llama.from_pretrained(
|
|
| 7 |
verbose=False
|
| 8 |
)
|
| 9 |
|
|
|
|
| 10 |
input_text = st.text_input('text')
|
|
|
|
| 11 |
if input_text:
|
| 12 |
output = llm(
|
| 13 |
-
|
| 14 |
max_tokens=32, # Generate up to 32 tokens, set to None to generate up to the end of the context window
|
| 15 |
-
stop=["
|
| 16 |
echo=True # Echo the prompt back in the output
|
| 17 |
) # Generate a completion, can also call create_completion
|
| 18 |
-
st.write(output)
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
# from ctransformers import AutoModelForCausalLM, AutoTokenizer
|
| 22 |
-
|
| 23 |
-
# model = AutoModelForCausalLM.from_pretrained("Mykes/med_gemma7b_gguf", model_file="unsloth.Q4_K_M.gguf")
|
| 24 |
-
# tokenizer = AutoTokenizer.from_pretrained(model)
|
| 25 |
-
# input_text = st.textarea('text')
|
| 26 |
-
# if text:
|
| 27 |
-
# input_ids = tokenizer(input_text, return_tensors="pt")
|
| 28 |
-
# outputs = model.generate(**input_ids)
|
| 29 |
-
# st.write(outputs)
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
# from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 34 |
-
|
| 35 |
-
# model_id = "Mykes/med_gemma7b_gguf"
|
| 36 |
-
# filename = "unsloth.Q4_K_M.gguf"
|
| 37 |
-
|
| 38 |
-
# tokenizer = AutoTokenizer.from_pretrained(model_id, gguf_file=filename)
|
| 39 |
-
# model = AutoModelForCausalLM.from_pretrained(model_id, gguf_file=filename)
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
# input_text = st.textarea('text')
|
| 44 |
-
# if text:
|
| 45 |
-
# input_ids = tokenizer(input_text, return_tensors="pt")
|
| 46 |
-
# outputs = model.generate(**input_ids)
|
| 47 |
-
# st.write(outputs)
|
|
|
|
| 7 |
verbose=False
|
| 8 |
)
|
| 9 |
|
| 10 |
+
basic_prompt = "Below is the context which is your conversation history and the last user question. Write a response according the context and question. ### Context: user: Ответь мне на вопрос о моем здоровье. assistant: Конечно! Какой у Вас вопрос? ### Question: {question} ### Response:"
|
| 11 |
input_text = st.text_input('text')
|
| 12 |
+
model_input = basic_prompt.format(question=input_text)
|
| 13 |
if input_text:
|
| 14 |
output = llm(
|
| 15 |
+
model_input, # Prompt
|
| 16 |
max_tokens=32, # Generate up to 32 tokens, set to None to generate up to the end of the context window
|
| 17 |
+
stop=["<end_of_turn>"],
|
| 18 |
echo=True # Echo the prompt back in the output
|
| 19 |
) # Generate a completion, can also call create_completion
|
| 20 |
+
st.write(output["choices"][0]["text"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|