Spaces:
Sleeping
Sleeping
Commit
ยท
1598dba
1
Parent(s):
efe3f8f
feat: run page
Browse files- src/streamlit_app.py +91 -38
src/streamlit_app.py
CHANGED
|
@@ -1,40 +1,93 @@
|
|
| 1 |
-
|
| 2 |
-
import numpy as np
|
| 3 |
-
import pandas as pd
|
| 4 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
-
""
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
st.
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from time import sleep
|
|
|
|
|
|
|
| 2 |
import streamlit as st
|
| 3 |
+
# for GPU inference, uncomment the following line
|
| 4 |
+
# from unsloth import FastLanguageModel, is_bfloat16_supported
|
| 5 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 6 |
|
| 7 |
+
AI_MODE = "ON"
|
| 8 |
+
|
| 9 |
+
if AI_MODE == "ON":
|
| 10 |
+
model_id = "choco-conoz/TwinLlama-3.2-1B-DPO"
|
| 11 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 12 |
+
model = AutoModelForCausalLM.from_pretrained(model_id)
|
| 13 |
+
# for GPU inference, uncomment the following line
|
| 14 |
+
# model = FastLanguageModel.for_inference(model)
|
| 15 |
+
|
| 16 |
+
processor = pipeline(
|
| 17 |
+
"text-generation",
|
| 18 |
+
model=model,
|
| 19 |
+
tokenizer=tokenizer,
|
| 20 |
+
max_new_tokens=10
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
terminators = [
|
| 24 |
+
tokenizer.eos_token_id,
|
| 25 |
+
tokenizer.convert_tokens_to_ids(""),
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def main():
|
| 30 |
+
st.title('DEMO - DPO')
|
| 31 |
+
st.subheader('Instruction/Response')
|
| 32 |
+
st.markdown('<div style="text-align: right;">produced by Conoz (https://www.conoz.com)</div>',
|
| 33 |
+
unsafe_allow_html=True)
|
| 34 |
+
st.markdown(
|
| 35 |
+
'<div><br />basic space hardware์์ ์๋ต์๊ฐ์ 3๋ถ ์ ๋ ์์๋ฉ๋๋ค. '
|
| 36 |
+
'์์ด, ํ๊ตญ์ด ๋ฑ์ผ๋ก ์ง๋ฌธํ ์ ์์ต๋๋ค.<br />'
|
| 37 |
+
'์ฝ๋
ธ์ฆ์์ Llama-3.2-1B model์ DPO๋ก ํ์ตํ ๋ชจ๋ธ์ ์ฌ์ฉํฉ๋๋ค. '
|
| 38 |
+
'์ํ์นด chat template์ ์ฌ์ฉํฉ๋๋ค.<br />'
|
| 39 |
+
'์ฝ๋
ธ์ฆ์์ Llama-3.1-8B ๋ชจ๋ธ์ DPO๋ก ํ์ตํ ๋ชจ๋ธ๋ก ์ฌ์ฉํ ์๋ ์์ง๋ง basic space hardware ์์ ๋์ํ์ง ์์ต๋๋ค.</div>',
|
| 40 |
+
unsafe_allow_html=True
|
| 41 |
+
)
|
| 42 |
+
st.markdown(
|
| 43 |
+
'<div>Response time on basic space hardware takes about 3 minutes. '
|
| 44 |
+
'You can ask questions in English, Korean, etc. '
|
| 45 |
+
'It is a model fine-tuned on the Llama-3.2-1B model by Conoz. '
|
| 46 |
+
'It uses the Alpaca chat template.<br />'
|
| 47 |
+
'You can also use the model fine-tuned on the Llama-3.1-8B model by Conoz, but it does not work on the basic space hardware.<br /></div>',
|
| 48 |
+
unsafe_allow_html=True
|
| 49 |
+
)
|
| 50 |
+
st.markdown('<hr />', unsafe_allow_html=True)
|
| 51 |
+
query = st.text_input('input your topic of interest. (10 ~ 1000 characters)',
|
| 52 |
+
placeholder='e.g. What is the capital of South Korea?')
|
| 53 |
+
|
| 54 |
+
alpaca_template = """Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
| 55 |
+
### Instruction:
|
| 56 |
+
{}
|
| 57 |
+
### Response:
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
if st.button("Send"):
|
| 61 |
+
if not query:
|
| 62 |
+
st.error("Please enter a query.")
|
| 63 |
+
return
|
| 64 |
+
if len(query) < 10:
|
| 65 |
+
st.error("Please enter a query with at least 10 characters.")
|
| 66 |
+
return
|
| 67 |
+
if len(query) > 1000:
|
| 68 |
+
st.error("Please enter a query with less than 1000 characters.")
|
| 69 |
+
return
|
| 70 |
+
with st.spinner("Generating response..."):
|
| 71 |
+
user_prompt = alpaca_template.format(query, "")
|
| 72 |
+
if AI_MODE == "ON":
|
| 73 |
+
# for chat models
|
| 74 |
+
# user_prompt = tokenizer.apply_chat_template(
|
| 75 |
+
# user_prompt, tokenize=False, add_generation_prompt=True)
|
| 76 |
+
outputs = processor(user_prompt,
|
| 77 |
+
max_new_tokens=4096,
|
| 78 |
+
use_cache=True,
|
| 79 |
+
do_sample=True,
|
| 80 |
+
temperature=0.6,
|
| 81 |
+
top_p=0.9,
|
| 82 |
+
)
|
| 83 |
+
# eos_token_id=terminators,
|
| 84 |
+
response = outputs[0]["generated_text"][len(user_prompt):]
|
| 85 |
+
else:
|
| 86 |
+
sleep(3)
|
| 87 |
+
response = "AI_MODE is OFF. Please turn it ON to get a response."
|
| 88 |
+
st.subheader('Response:')
|
| 89 |
+
st.write(response)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
if __name__ == "__main__":
|
| 93 |
+
main()
|