Update app.py
Browse files
app.py
CHANGED
|
@@ -85,7 +85,7 @@ def get_messages_formatter_type(model_name):
|
|
| 85 |
@spaces.GPU(duration=120)
|
| 86 |
def respond(
|
| 87 |
message,
|
| 88 |
-
history: list[
|
| 89 |
system_message,
|
| 90 |
max_tokens,
|
| 91 |
temperature,
|
|
@@ -99,17 +99,17 @@ def respond(
|
|
| 99 |
chat_template = get_messages_formatter_type(MISTRAL_MODEL_NAME)
|
| 100 |
|
| 101 |
# ๋ชจ๋ธ ํ์ผ ๊ฒฝ๋ก ํ์ธ
|
| 102 |
-
|
| 103 |
|
| 104 |
-
print(f"Model path: {
|
| 105 |
|
| 106 |
-
if not os.path.exists(
|
| 107 |
-
print(f"Warning: Model file not found at {
|
| 108 |
print(f"Available files in ./models: {os.listdir('./models')}")
|
| 109 |
|
| 110 |
if llm is None or llm_model != MISTRAL_MODEL_NAME:
|
| 111 |
llm = Llama(
|
| 112 |
-
model_path=
|
| 113 |
flash_attn=True,
|
| 114 |
n_gpu_layers=81,
|
| 115 |
n_batch=1024,
|
|
@@ -136,17 +136,18 @@ def respond(
|
|
| 136 |
|
| 137 |
messages = BasicChatHistory()
|
| 138 |
|
|
|
|
| 139 |
for msn in history:
|
| 140 |
-
|
| 141 |
'role': Roles.user,
|
| 142 |
-
'content': msn
|
| 143 |
}
|
| 144 |
-
|
| 145 |
'role': Roles.assistant,
|
| 146 |
-
'content': msn
|
| 147 |
}
|
| 148 |
-
messages.add_message(
|
| 149 |
-
messages.add_message(
|
| 150 |
|
| 151 |
stream = agent.get_chat_response(
|
| 152 |
message,
|
|
@@ -192,7 +193,12 @@ demo = gr.ChatInterface(
|
|
| 192 |
fn=respond,
|
| 193 |
title="Ginigen Private AI",
|
| 194 |
description="6BIT ์์ํ๋ก ๋ชจ๋ธ ํฌ๊ธฐ๋ ์ค์ด๊ณ ์ฑ๋ฅ์ ์ ์งํ๋ ํ๋ผ์ด๋ฒ์ ์ค์ฌ AI ์๋ฃจ์
.",
|
| 195 |
-
theme=gr.themes.Soft(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 196 |
body_background_fill_dark="#16141c",
|
| 197 |
block_background_fill_dark="#16141c",
|
| 198 |
block_border_width="1px",
|
|
@@ -227,4 +233,4 @@ demo = gr.ChatInterface(
|
|
| 227 |
)
|
| 228 |
|
| 229 |
if __name__ == "__main__":
|
| 230 |
-
demo.launch()
|
|
|
|
| 85 |
@spaces.GPU(duration=120)
|
| 86 |
def respond(
|
| 87 |
message,
|
| 88 |
+
history: list[dict], # history ํญ๋ชฉ์ด tuple์ด ์๋ dict ํ์์ผ๋ก ์ ๋ฌ๋จ
|
| 89 |
system_message,
|
| 90 |
max_tokens,
|
| 91 |
temperature,
|
|
|
|
| 99 |
chat_template = get_messages_formatter_type(MISTRAL_MODEL_NAME)
|
| 100 |
|
| 101 |
# ๋ชจ๋ธ ํ์ผ ๊ฒฝ๋ก ํ์ธ
|
| 102 |
+
model_path_local = os.path.join("./models", MISTRAL_MODEL_NAME)
|
| 103 |
|
| 104 |
+
print(f"Model path: {model_path_local}")
|
| 105 |
|
| 106 |
+
if not os.path.exists(model_path_local):
|
| 107 |
+
print(f"Warning: Model file not found at {model_path_local}")
|
| 108 |
print(f"Available files in ./models: {os.listdir('./models')}")
|
| 109 |
|
| 110 |
if llm is None or llm_model != MISTRAL_MODEL_NAME:
|
| 111 |
llm = Llama(
|
| 112 |
+
model_path=model_path_local,
|
| 113 |
flash_attn=True,
|
| 114 |
n_gpu_layers=81,
|
| 115 |
n_batch=1024,
|
|
|
|
| 136 |
|
| 137 |
messages = BasicChatHistory()
|
| 138 |
|
| 139 |
+
# history์ ๊ฐ ํญ๋ชฉ์ด dict ํ์์ผ๋ก {'user': <user_message>, 'assistant': <assistant_message>} ํํ๋ผ๊ณ ๊ฐ์
|
| 140 |
for msn in history:
|
| 141 |
+
user_message = {
|
| 142 |
'role': Roles.user,
|
| 143 |
+
'content': msn.get('user', '')
|
| 144 |
}
|
| 145 |
+
assistant_message = {
|
| 146 |
'role': Roles.assistant,
|
| 147 |
+
'content': msn.get('assistant', '')
|
| 148 |
}
|
| 149 |
+
messages.add_message(user_message)
|
| 150 |
+
messages.add_message(assistant_message)
|
| 151 |
|
| 152 |
stream = agent.get_chat_response(
|
| 153 |
message,
|
|
|
|
| 193 |
fn=respond,
|
| 194 |
title="Ginigen Private AI",
|
| 195 |
description="6BIT ์์ํ๋ก ๋ชจ๋ธ ํฌ๊ธฐ๋ ์ค์ด๊ณ ์ฑ๋ฅ์ ์ ์งํ๋ ํ๋ผ์ด๋ฒ์ ์ค์ฌ AI ์๋ฃจ์
.",
|
| 196 |
+
theme=gr.themes.Soft(
|
| 197 |
+
primary_hue="violet",
|
| 198 |
+
secondary_hue="violet",
|
| 199 |
+
neutral_hue="gray",
|
| 200 |
+
font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]
|
| 201 |
+
).set(
|
| 202 |
body_background_fill_dark="#16141c",
|
| 203 |
block_background_fill_dark="#16141c",
|
| 204 |
block_border_width="1px",
|
|
|
|
| 233 |
)
|
| 234 |
|
| 235 |
if __name__ == "__main__":
|
| 236 |
+
demo.launch()
|