Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,62 +6,79 @@ import os
|
|
| 6 |
from threading import Thread
|
| 7 |
import random
|
| 8 |
from datasets import load_dataset
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
| 11 |
MODEL_ID = "CohereForAI/c4ai-command-r7b-12-2024"
|
| 12 |
MODELS = os.environ.get("MODELS")
|
| 13 |
MODEL_NAME = MODEL_ID.split("/")[-1]
|
| 14 |
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
CSS = """
|
| 18 |
-
.duplicate-button {
|
| 19 |
-
margin: auto !important;
|
| 20 |
-
color: white !important;
|
| 21 |
-
background: black !important;
|
| 22 |
-
border-radius: 100vh !important;
|
| 23 |
-
}
|
| 24 |
-
h3 {
|
| 25 |
-
text-align: center;
|
| 26 |
-
}
|
| 27 |
-
.chatbox .messages .message.user {
|
| 28 |
-
background-color: #e1f5fe;
|
| 29 |
-
}
|
| 30 |
-
.chatbox .messages .message.bot {
|
| 31 |
-
background-color: #eeeeee;
|
| 32 |
-
}
|
| 33 |
-
"""
|
| 34 |
|
| 35 |
-
#
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
torch_dtype=torch.bfloat16,
|
| 39 |
-
device_map="auto",
|
| 40 |
-
)
|
| 41 |
-
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
|
| 42 |
|
| 43 |
-
#
|
| 44 |
-
|
| 45 |
-
|
| 46 |
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
@spaces.GPU
|
| 53 |
def stream_chat(message: str, history: list, temperature: float, max_new_tokens: int, top_p: float, top_k: int, penalty: float):
|
| 54 |
print(f'message is - {message}')
|
| 55 |
print(f'history is - {history}')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
conversation = []
|
| 57 |
for prompt, answer in history:
|
| 58 |
-
conversation.extend([
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
input_ids = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
| 62 |
inputs = tokenizer(input_ids, return_tensors="pt").to(0)
|
| 63 |
|
| 64 |
-
streamer = TextIteratorStreamer(tokenizer, timeout
|
| 65 |
|
| 66 |
generate_kwargs = dict(
|
| 67 |
inputs,
|
|
|
|
| 6 |
from threading import Thread
|
| 7 |
import random
|
| 8 |
from datasets import load_dataset
|
| 9 |
+
from sentence_transformers import SentenceTransformer
|
| 10 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
# GPU ๋ฉ๋ชจ๋ฆฌ ๊ด๋ฆฌ
|
| 14 |
+
torch.cuda.empty_cache()
|
| 15 |
|
| 16 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
| 17 |
MODEL_ID = "CohereForAI/c4ai-command-r7b-12-2024"
|
| 18 |
MODELS = os.environ.get("MODELS")
|
| 19 |
MODEL_NAME = MODEL_ID.split("/")[-1]
|
| 20 |
|
| 21 |
+
# ์๋ฒ ๋ฉ ๋ชจ๋ธ ๋ก๋
|
| 22 |
+
embedding_model = SentenceTransformer('sentence-transformers/xlm-r-100langs-bert-base-nli-stsb-mean-tokens')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
+
# ์ํคํผ๋์ ๋ฐ์ดํฐ์
๋ก๋
|
| 25 |
+
wiki_dataset = load_dataset("lcw99/wikipedia-korean-20240501-1million-qna")
|
| 26 |
+
print("Wikipedia dataset loaded:", wiki_dataset)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
+
# ๋ฐ์ดํฐ์
์ ์ง๋ฌธ๋ค์ ์๋ฒ ๋ฉ
|
| 29 |
+
questions = wiki_dataset['train']['question'][:10000] # ์ฒ์ 10000๊ฐ๋ง ์ฌ์ฉ
|
| 30 |
+
question_embeddings = embedding_model.encode(questions, convert_to_tensor=True)
|
| 31 |
|
| 32 |
+
def find_relevant_context(query, top_k=3):
|
| 33 |
+
# ์ฟผ๋ฆฌ ์๋ฒ ๋ฉ
|
| 34 |
+
query_embedding = embedding_model.encode(query, convert_to_tensor=True)
|
| 35 |
+
|
| 36 |
+
# ์ฝ์ฌ์ธ ์ ์ฌ๋ ๊ณ์ฐ
|
| 37 |
+
similarities = cosine_similarity(
|
| 38 |
+
query_embedding.cpu().numpy().reshape(1, -1),
|
| 39 |
+
question_embeddings.cpu().numpy()
|
| 40 |
+
)[0]
|
| 41 |
+
|
| 42 |
+
# ๊ฐ์ฅ ์ ์ฌํ ์ง๋ฌธ๋ค์ ์ธ๋ฑ์ค
|
| 43 |
+
top_indices = np.argsort(similarities)[-top_k:][::-1]
|
| 44 |
+
|
| 45 |
+
# ๊ด๋ จ ์ปจํ
์คํธ ์ถ์ถ
|
| 46 |
+
relevant_contexts = []
|
| 47 |
+
for idx in top_indices:
|
| 48 |
+
relevant_contexts.append({
|
| 49 |
+
'question': questions[idx],
|
| 50 |
+
'answer': wiki_dataset['train']['answer'][idx]
|
| 51 |
+
})
|
| 52 |
+
|
| 53 |
+
return relevant_contexts
|
| 54 |
|
| 55 |
@spaces.GPU
|
| 56 |
def stream_chat(message: str, history: list, temperature: float, max_new_tokens: int, top_p: float, top_k: int, penalty: float):
|
| 57 |
print(f'message is - {message}')
|
| 58 |
print(f'history is - {history}')
|
| 59 |
+
|
| 60 |
+
# RAG: ๊ด๋ จ ์ปจํ
์คํธ ์ฐพ๊ธฐ
|
| 61 |
+
relevant_contexts = find_relevant_context(message)
|
| 62 |
+
context_prompt = "\n\n๊ด๋ จ ์ฐธ๊ณ ์ ๋ณด:\n"
|
| 63 |
+
for ctx in relevant_contexts:
|
| 64 |
+
context_prompt += f"Q: {ctx['question']}\nA: {ctx['answer']}\n\n"
|
| 65 |
+
|
| 66 |
+
# ๋ํ ํ์คํ ๋ฆฌ ๊ตฌ์ฑ
|
| 67 |
conversation = []
|
| 68 |
for prompt, answer in history:
|
| 69 |
+
conversation.extend([
|
| 70 |
+
{"role": "user", "content": prompt},
|
| 71 |
+
{"role": "assistant", "content": answer}
|
| 72 |
+
])
|
| 73 |
+
|
| 74 |
+
# ์ปจํ
์คํธ๋ฅผ ํฌํจํ ์ต์ข
ํ๋กฌํํธ ๊ตฌ์ฑ
|
| 75 |
+
final_message = context_prompt + "\nํ์ฌ ์ง๋ฌธ: " + message
|
| 76 |
+
conversation.append({"role": "user", "content": final_message})
|
| 77 |
|
| 78 |
input_ids = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
| 79 |
inputs = tokenizer(input_ids, return_tensors="pt").to(0)
|
| 80 |
|
| 81 |
+
streamer = TextIteratorStreamer(tokenizer, timeout
|
| 82 |
|
| 83 |
generate_kwargs = dict(
|
| 84 |
inputs,
|