Spaces:
Sleeping
Sleeping
Update app/app.py
Browse files- app/app.py +29 -11
app/app.py
CHANGED
|
@@ -13,17 +13,35 @@ import time
|
|
| 13 |
|
| 14 |
from utils import MODELS_PROVIDERS_MAP, PROVIDERS_API_KEYS, REASONER_PROVIDERS_MAP, check_openai_api_key, trim_messages
|
| 15 |
from settings import PROFILES_SETTINGS
|
|
|
|
|
|
|
| 16 |
|
| 17 |
-
DEFAULT_REASONER_CONFIG =
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
DEFAULT_LLM_CONFIG = {
|
| 26 |
-
"
|
| 27 |
provider="groq",
|
| 28 |
api_key=PROVIDERS_API_KEYS.get("groq"),
|
| 29 |
model="meta-llama/llama-4-scout-17b-16e-instruct",
|
|
@@ -45,14 +63,12 @@ DEFAULT_LLM_CONFIG = {
|
|
| 45 |
async def chat_profile():
|
| 46 |
return [
|
| 47 |
cl.ChatProfile(
|
| 48 |
-
name="
|
| 49 |
markdown_description="Talk with the lastest Llm models! Powered by AiCore, check it on GitHub, link in Readme",
|
| 50 |
-
icon="https://picsum.photos/200",
|
| 51 |
),
|
| 52 |
cl.ChatProfile(
|
| 53 |
name="OpenAi",
|
| 54 |
markdown_description="Talk with the lastest Llm models! Powered by AiCore, check it on GitHub, link in Readme",
|
| 55 |
-
icon="https://picsum.photos/200",
|
| 56 |
)
|
| 57 |
]
|
| 58 |
|
|
@@ -185,5 +201,7 @@ async def main(message: cl.Message):
|
|
| 185 |
|
| 186 |
except Exception as e:
|
| 187 |
await cl.ErrorMessage("Internal Server Error").send()
|
|
|
|
|
|
|
| 188 |
|
| 189 |
### TODO add future todos, include support for images and pdf upload for conversation
|
|
|
|
| 13 |
|
| 14 |
from utils import MODELS_PROVIDERS_MAP, PROVIDERS_API_KEYS, REASONER_PROVIDERS_MAP, check_openai_api_key, trim_messages
|
| 15 |
from settings import PROFILES_SETTINGS
|
| 16 |
+
import os
|
| 17 |
+
from aicore.config import LlmConfig
|
| 18 |
|
| 19 |
+
DEFAULT_REASONER_CONFIG = {
|
| 20 |
+
"openai": LlmConfig(
|
| 21 |
+
provider="openai",
|
| 22 |
+
api_key=os.getenv("OPENAI_API_KEY") or os.getenv("YOUR_SUPER_SECRET_OPENAI_API_KEY"),
|
| 23 |
+
model="deepseek-r1-distill-llama-70b",
|
| 24 |
+
temperature=0.5,
|
| 25 |
+
max_tokens=1024
|
| 26 |
+
),
|
| 27 |
+
"groq": LlmConfig(
|
| 28 |
+
provider="groq",
|
| 29 |
+
api_key=os.getenv("YOUR_SUPER_SECRET_GROQ_API_KEY"),
|
| 30 |
+
model="deepseek-r1-distill-llama-70b",
|
| 31 |
+
temperature=0.5,
|
| 32 |
+
max_tokens=1024
|
| 33 |
+
),
|
| 34 |
+
"gemini": LlmConfig(
|
| 35 |
+
provider="gemini",
|
| 36 |
+
api_key=os.getenv("GEMINI_API_KEY") or os.getenv("YOUR_SUPER_SECRET_GEMINI_API_KEY"),
|
| 37 |
+
model="deepseek-r1-distill-llama-70b",
|
| 38 |
+
temperature=0.5,
|
| 39 |
+
max_tokens=1024
|
| 40 |
+
),
|
| 41 |
+
}
|
| 42 |
|
| 43 |
DEFAULT_LLM_CONFIG = {
|
| 44 |
+
"ZeppFusion": LlmConfig(
|
| 45 |
provider="groq",
|
| 46 |
api_key=PROVIDERS_API_KEYS.get("groq"),
|
| 47 |
model="meta-llama/llama-4-scout-17b-16e-instruct",
|
|
|
|
| 63 |
async def chat_profile():
|
| 64 |
return [
|
| 65 |
cl.ChatProfile(
|
| 66 |
+
name="ZeppFusion",
|
| 67 |
markdown_description="Talk with the lastest Llm models! Powered by AiCore, check it on GitHub, link in Readme",
|
|
|
|
| 68 |
),
|
| 69 |
cl.ChatProfile(
|
| 70 |
name="OpenAi",
|
| 71 |
markdown_description="Talk with the lastest Llm models! Powered by AiCore, check it on GitHub, link in Readme",
|
|
|
|
| 72 |
)
|
| 73 |
]
|
| 74 |
|
|
|
|
| 201 |
|
| 202 |
except Exception as e:
|
| 203 |
await cl.ErrorMessage("Internal Server Error").send()
|
| 204 |
+
print(e)
|
| 205 |
+
|
| 206 |
|
| 207 |
### TODO add future todos, include support for images and pdf upload for conversation
|