Rauhan commited on
Commit
443f81f
·
1 Parent(s): 437ba68
analyticsHub/components/codeGeneratorAgent.py CHANGED
@@ -3,7 +3,7 @@ from langchain_core.prompts import PromptTemplate
3
  from langchain_core.runnables import RunnablePassthrough
4
  from ..utils.functions import readYaml, getConfig
5
  from ..utils.exceptions import CustomException
6
- from langchain_cerebras import ChatCerebras
7
  from ..utils.logger import logger
8
  import os
9
 
@@ -18,7 +18,7 @@ class CodeGenerator:
18
  logger.info("Constructing code generation chain.")
19
  promptTemplate = readYaml(self.yamlPath)["codeGeneratorAgentPrompt"]
20
  codeGeneratorPrompt = PromptTemplate.from_template(promptTemplate)
21
- llm = ChatCerebras(
22
  model=self.config.get("CODEGENERATOR", "model"),
23
  temperature=self.config.getfloat("CODEGENERATOR", "temperature")
24
  )
 
3
  from langchain_core.runnables import RunnablePassthrough
4
  from ..utils.functions import readYaml, getConfig
5
  from ..utils.exceptions import CustomException
6
+ from langchain_groq import ChatGroq
7
  from ..utils.logger import logger
8
  import os
9
 
 
18
  logger.info("Constructing code generation chain.")
19
  promptTemplate = readYaml(self.yamlPath)["codeGeneratorAgentPrompt"]
20
  codeGeneratorPrompt = PromptTemplate.from_template(promptTemplate)
21
+ llm = ChatGroq(
22
  model=self.config.get("CODEGENERATOR", "model"),
23
  temperature=self.config.getfloat("CODEGENERATOR", "temperature")
24
  )
analyticsHub/components/failsafeAgent.py CHANGED
@@ -3,7 +3,7 @@ from langchain_core.prompts import PromptTemplate
3
  from langchain_core.runnables import RunnablePassthrough
4
  from ..utils.functions import readYaml, getConfig
5
  from ..utils.exceptions import CustomException
6
- from langchain_cerebras import ChatCerebras
7
  from ..utils.logger import logger
8
  import os
9
 
@@ -18,7 +18,7 @@ class FailsafeCodeGenerator:
18
  logger.info("Constructing failsafe code generation chain.")
19
  promptTemplate = readYaml(self.yamlPath)["codeDebuggerAgentPrompt"]
20
  codeGeneratorPrompt = PromptTemplate.from_template(promptTemplate)
21
- llm = ChatCerebras(
22
  model=self.config.get("FAILSAFECODEGENERATOR", "model"),
23
  temperature=self.config.getfloat("FAILSAFECODEGENERATOR", "temperature")
24
  )
 
3
  from langchain_core.runnables import RunnablePassthrough
4
  from ..utils.functions import readYaml, getConfig
5
  from ..utils.exceptions import CustomException
6
+ from langchain_groq import ChatGroq
7
  from ..utils.logger import logger
8
  import os
9
 
 
18
  logger.info("Constructing failsafe code generation chain.")
19
  promptTemplate = readYaml(self.yamlPath)["codeDebuggerAgentPrompt"]
20
  codeGeneratorPrompt = PromptTemplate.from_template(promptTemplate)
21
+ llm = ChatGroq(
22
  model=self.config.get("FAILSAFECODEGENERATOR", "model"),
23
  temperature=self.config.getfloat("FAILSAFECODEGENERATOR", "temperature")
24
  )
analyticsHub/components/metadataGenerator.py CHANGED
@@ -3,7 +3,7 @@ from langchain_core.prompts import ChatPromptTemplate
3
  from langchain_core.runnables import RunnableLambda
4
  from ..utils.functions import readYaml, getConfig
5
  from ..utils.exceptions import CustomException
6
- from langchain_cerebras import ChatCerebras
7
  from ..utils.logger import logger
8
  import os
9
 
@@ -18,7 +18,7 @@ class MetadataGenerator:
18
  logger.info("Constructing metadata generation chain.")
19
  promptTemplate = readYaml(self.yamlPath)["metadataGeneratorPrompt"]
20
  prompt = ChatPromptTemplate.from_template(promptTemplate)
21
- llm = ChatCerebras(
22
  model=self.config.get("METADATAGENERATOR", "model"),
23
  temperature=self.config.getfloat("METADATAGENERATOR", "temperature")
24
  )
 
3
  from langchain_core.runnables import RunnableLambda
4
  from ..utils.functions import readYaml, getConfig
5
  from ..utils.exceptions import CustomException
6
+ from langchain_groq import ChatGroq
7
  from ..utils.logger import logger
8
  import os
9
 
 
18
  logger.info("Constructing metadata generation chain.")
19
  promptTemplate = readYaml(self.yamlPath)["metadataGeneratorPrompt"]
20
  prompt = ChatPromptTemplate.from_template(promptTemplate)
21
+ llm = ChatGroq(
22
  model=self.config.get("METADATAGENERATOR", "model"),
23
  temperature=self.config.getfloat("METADATAGENERATOR", "temperature")
24
  )
analyticsHub/components/queryRephraserAgent.py CHANGED
@@ -3,7 +3,7 @@ from langchain_core.prompts import PromptTemplate
3
  from ..utils.functions import readYaml, getConfig
4
  from ..utils.exceptions import CustomException
5
  from pydantic import Field, BaseModel
6
- from langchain_cerebras import ChatCerebras
7
  from ..utils.logger import logger
8
  import os
9
 
@@ -30,7 +30,7 @@ class QueryRephaser:
30
  input_variables = ["metadata", "query"],
31
  partial_variables = {"format_instructions": queryRephraseParser.get_format_instructions()}
32
  )
33
- llm = ChatCerebras(
34
  model=self.config.get("QUERYREPHRASER", "model"),
35
  temperature=self.config.getfloat("QUERYREPHRASER", "temperature"),
36
  max_tokens=self.config.getint("QUERYREPHRASER", "maxTokens")
 
3
  from ..utils.functions import readYaml, getConfig
4
  from ..utils.exceptions import CustomException
5
  from pydantic import Field, BaseModel
6
+ from langchain_groq import ChatGroq
7
  from ..utils.logger import logger
8
  import os
9
 
 
30
  input_variables = ["metadata", "query"],
31
  partial_variables = {"format_instructions": queryRephraseParser.get_format_instructions()}
32
  )
33
+ llm = ChatGroq(
34
  model=self.config.get("QUERYREPHRASER", "model"),
35
  temperature=self.config.getfloat("QUERYREPHRASER", "temperature"),
36
  max_tokens=self.config.getint("QUERYREPHRASER", "maxTokens")
config.ini CHANGED
@@ -1,18 +1,18 @@
1
  [QUERYREPHRASER]
2
- model = llama-3.3-70b
3
  temperature = 1
4
  maxTokens = 512
5
 
6
  [METADATAGENERATOR]
7
- model = llama-3.3-70b
8
  temperature = 1
9
 
10
  [CODEGENERATOR]
11
- model = llama-3.3-70b
12
  temperature = 1
13
 
14
  [FAILSAFECODEGENERATOR]
15
- model = llama-3.3-70b
16
  temperature = 1
17
 
18
  [SPEECHTOTEXT]
 
1
  [QUERYREPHRASER]
2
+ model = meta-llama/llama-4-maverick-17b-128e-instruct
3
  temperature = 1
4
  maxTokens = 512
5
 
6
  [METADATAGENERATOR]
7
+ model = meta-llama/llama-4-maverick-17b-128e-instruct
8
  temperature = 1
9
 
10
  [CODEGENERATOR]
11
+ model = meta-llama/llama-4-maverick-17b-128e-instruct
12
  temperature = 1
13
 
14
  [FAILSAFECODEGENERATOR]
15
+ model = meta-llama/llama-4-maverick-17b-128e-instruct
16
  temperature = 1
17
 
18
  [SPEECHTOTEXT]