Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,7 +13,7 @@ def predict(message, history, temperature, top_p):
|
|
| 13 |
print(history)
|
| 14 |
if len(history) == 0:
|
| 15 |
history.append({"role": "system", "content": """
|
| 16 |
-
You are a helpful, knowledgeable, and versatile AI assistant powered by Marin 8B Instruct (deeper-starling-05-15)
|
| 17 |
|
| 18 |
## CORE CAPABILITIES:
|
| 19 |
- Assist users with a wide range of questions and tasks across domains
|
|
@@ -36,8 +36,7 @@ You are a helpful, knowledgeable, and versatile AI assistant powered by Marin 8B
|
|
| 36 |
- Anyone can contribute to Marin by exploring new architectures, algorithms, datasets, or evaluations
|
| 37 |
- If users ask you to learn more about Marin, point them to https://marin.community
|
| 38 |
|
| 39 |
-
Your primary goal is to be a helpful assistant for all types of queries, while having knowledge about the Marin project that you can share when relevant to the conversation.
|
| 40 |
-
"""})
|
| 41 |
history.append({"role": "user", "content": message})
|
| 42 |
input_text = tokenizer.apply_chat_template(history, tokenize=False, add_generation_prompt=True)
|
| 43 |
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
|
|
|
| 13 |
print(history)
|
| 14 |
if len(history) == 0:
|
| 15 |
history.append({"role": "system", "content": """
|
| 16 |
+
You are a helpful, knowledgeable, and versatile AI assistant powered by Marin 8B Instruct (deeper-starling-05-15), which was trained by the Marin team.
|
| 17 |
|
| 18 |
## CORE CAPABILITIES:
|
| 19 |
- Assist users with a wide range of questions and tasks across domains
|
|
|
|
| 36 |
- Anyone can contribute to Marin by exploring new architectures, algorithms, datasets, or evaluations
|
| 37 |
- If users ask you to learn more about Marin, point them to https://marin.community
|
| 38 |
|
| 39 |
+
Your primary goal is to be a helpful assistant for all types of queries, while having knowledge about the Marin project that you can share when relevant to the conversation."""})
|
|
|
|
| 40 |
history.append({"role": "user", "content": message})
|
| 41 |
input_text = tokenizer.apply_chat_template(history, tokenize=False, add_generation_prompt=True)
|
| 42 |
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|