Maga222006 commited on
Commit ·
6919e51
1
Parent(s): 7236cee
MultiagentPersonalAssistant
Browse files- agents/models.py +14 -5
- agents/utils/file_preprocessing.py +2 -2
agents/models.py
CHANGED
|
@@ -1,19 +1,28 @@
|
|
| 1 |
from langchain.chat_models import init_chat_model
|
| 2 |
from dotenv import load_dotenv
|
|
|
|
|
|
|
| 3 |
|
| 4 |
load_dotenv()
|
| 5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
llm_supervisor = init_chat_model(
|
| 7 |
-
"groq:openai/gpt-oss-
|
| 8 |
max_tokens=1000
|
| 9 |
)
|
| 10 |
|
| 11 |
llm_peripheral = init_chat_model(
|
| 12 |
-
|
| 13 |
-
max_tokens=4000
|
| 14 |
)
|
| 15 |
|
| 16 |
llm_sub_agents = init_chat_model(
|
| 17 |
-
"groq:qwen/qwen3-32b"
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
| 19 |
)
|
|
|
|
| 1 |
from langchain.chat_models import init_chat_model
|
| 2 |
from dotenv import load_dotenv
|
| 3 |
+
from openai import AsyncOpenAI
|
| 4 |
+
import os
|
| 5 |
|
| 6 |
load_dotenv()
|
| 7 |
|
| 8 |
+
groq_client = AsyncOpenAI(
|
| 9 |
+
base_url="https://api.groq.com/openai/v1",
|
| 10 |
+
api_key=os.getenv("GROQ_API_KEY"),
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
llm_supervisor = init_chat_model(
|
| 14 |
+
model="groq:openai/gpt-oss-120b",
|
| 15 |
max_tokens=1000
|
| 16 |
)
|
| 17 |
|
| 18 |
llm_peripheral = init_chat_model(
|
| 19 |
+
model="groq:gemma2-9b-it"
|
|
|
|
| 20 |
)
|
| 21 |
|
| 22 |
llm_sub_agents = init_chat_model(
|
| 23 |
+
model="groq:qwen/qwen3-32b"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
llm_image = init_chat_model(
|
| 27 |
+
model="groq:meta-llama/llama-4-scout-17b-16e-instruct"
|
| 28 |
)
|
agents/utils/file_preprocessing.py
CHANGED
|
@@ -21,7 +21,7 @@ async def preprocess_file(file_name: str):
|
|
| 21 |
|
| 22 |
|
| 23 |
async def preprocess_audio(file_name):
|
| 24 |
-
from
|
| 25 |
transcription = await groq_client.audio.transcriptions.create(
|
| 26 |
model="whisper-large-v3-turbo",
|
| 27 |
file=open(file_name, "rb")
|
|
@@ -30,7 +30,7 @@ async def preprocess_audio(file_name):
|
|
| 30 |
|
| 31 |
|
| 32 |
async def preprocess_image(file_name: str):
|
| 33 |
-
from
|
| 34 |
with open(file_name, "rb") as f:
|
| 35 |
img_b64 = base64.b64encode(f.read()).decode("utf-8")
|
| 36 |
response = await llm_image.ainvoke([HumanMessage(
|
|
|
|
| 21 |
|
| 22 |
|
| 23 |
async def preprocess_audio(file_name):
|
| 24 |
+
from agents.models import groq_client
|
| 25 |
transcription = await groq_client.audio.transcriptions.create(
|
| 26 |
model="whisper-large-v3-turbo",
|
| 27 |
file=open(file_name, "rb")
|
|
|
|
| 30 |
|
| 31 |
|
| 32 |
async def preprocess_image(file_name: str):
|
| 33 |
+
from agents.models import llm_image
|
| 34 |
with open(file_name, "rb") as f:
|
| 35 |
img_b64 = base64.b64encode(f.read()).decode("utf-8")
|
| 36 |
response = await llm_image.ainvoke([HumanMessage(
|