Xyro123 commited on
Commit
918ff6e
·
verified ·
1 Parent(s): 69d8fc8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -46
app.py CHANGED
@@ -13,43 +13,17 @@ import time
13
 
14
  from utils import MODELS_PROVIDERS_MAP, PROVIDERS_API_KEYS, REASONER_PROVIDERS_MAP, check_openai_api_key, trim_messages
15
  from settings import PROFILES_SETTINGS
16
- import os
17
- from aicore.config import LlmConfig
18
- import gradio as gr
19
 
20
- with gr.Blocks(css="""
21
- .chat-logo::before {
22
- content: url("public/avatar.png");
23
- }
24
- """) as app:
25
- ...
26
-
27
- DEFAULT_REASONER_CONFIG = {
28
- "openai": LlmConfig(
29
- provider="openai",
30
- api_key=os.getenv("OPENAI_API_KEY") or os.getenv("YOUR_SUPER_SECRET_OPENAI_API_KEY"),
31
- model="deepseek-r1-distill-llama-70b",
32
- temperature=0.5,
33
- max_tokens=1024
34
- ),
35
- "groq": LlmConfig(
36
- provider="groq",
37
- api_key=os.getenv("YOUR_SUPER_SECRET_GROQ_API_KEY"),
38
- model="deepseek-r1-distill-llama-70b",
39
- temperature=0.5,
40
- max_tokens=1024
41
- ),
42
- "gemini": LlmConfig(
43
- provider="gemini",
44
- api_key=os.getenv("GEMINI_API_KEY") or os.getenv("YOUR_SUPER_SECRET_GEMINI_API_KEY"),
45
- model="deepseek-r1-distill-llama-70b",
46
- temperature=0.5,
47
- max_tokens=1024
48
- ),
49
- }
50
 
51
  DEFAULT_LLM_CONFIG = {
52
- "ZeppFusion": LlmConfig(
53
  provider="groq",
54
  api_key=PROVIDERS_API_KEYS.get("groq"),
55
  model="meta-llama/llama-4-scout-17b-16e-instruct",
@@ -71,14 +45,14 @@ DEFAULT_LLM_CONFIG = {
71
  async def chat_profile():
72
  return [
73
  cl.ChatProfile(
74
- name="ZeppFusion",
75
  markdown_description="Talk with the lastest Llm models! Powered by AiCore, check it on GitHub, link in Readme",
76
-
77
- settings=PROFILES_SETTINGS.get("ZeppFusion")
78
  ),
79
  cl.ChatProfile(
80
  name="OpenAi",
81
  markdown_description="Talk with the lastest Llm models! Powered by AiCore, check it on GitHub, link in Readme",
 
82
  )
83
  ]
84
 
@@ -123,11 +97,9 @@ async def start_chat():
123
  cl.user_session.set(
124
  "llm", llm
125
  )
126
-
127
- profile_settings = PROFILES_SETTINGS.get(user_profile, {})
128
  settings = await cl.ChatSettings(
129
- PROFILES_SETTINGS.get("messages",
130
- [])
131
  ).send()
132
 
133
 
@@ -212,8 +184,4 @@ async def main(message: cl.Message):
212
  await msg.send()
213
 
214
  except Exception as e:
215
- await cl.ErrorMessage("Internal Server Error").send()
216
- print(e)
217
-
218
-
219
- ### TODO add future todos, include support for images and pdf upload for conversation
 
13
 
14
  from utils import MODELS_PROVIDERS_MAP, PROVIDERS_API_KEYS, REASONER_PROVIDERS_MAP, check_openai_api_key, trim_messages
15
  from settings import PROFILES_SETTINGS
 
 
 
16
 
17
+ DEFAULT_REASONER_CONFIG = LlmConfig(
18
+ provider="groq",
19
+ api_key=PROVIDERS_API_KEYS.get("groq"),
20
+ model="deepseek-r1-distill-llama-70b",
21
+ temperature=0.5,
22
+ max_tokens=1024
23
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  DEFAULT_LLM_CONFIG = {
26
+ "Reasoner4All": LlmConfig(
27
  provider="groq",
28
  api_key=PROVIDERS_API_KEYS.get("groq"),
29
  model="meta-llama/llama-4-scout-17b-16e-instruct",
 
45
  async def chat_profile():
46
  return [
47
  cl.ChatProfile(
48
+ name="Reasoner4All",
49
  markdown_description="Talk with the lastest Llm models! Powered by AiCore, check it on GitHub, link in Readme",
50
+ icon="https://picsum.photos/200",
 
51
  ),
52
  cl.ChatProfile(
53
  name="OpenAi",
54
  markdown_description="Talk with the lastest Llm models! Powered by AiCore, check it on GitHub, link in Readme",
55
+ icon="https://picsum.photos/200",
56
  )
57
  ]
58
 
 
97
  cl.user_session.set(
98
  "llm", llm
99
  )
100
+
 
101
  settings = await cl.ChatSettings(
102
+ PROFILES_SETTINGS.get(user_profile)
 
103
  ).send()
104
 
105
 
 
184
  await msg.send()
185
 
186
  except Exception as e:
187
+ await cl.ErrorMessage("Internal Server Error").send()