class for LOs updated with Typing
Browse files
chains/learning_objectives_generator/learning_objectives_chain.py
CHANGED
|
@@ -7,6 +7,14 @@ from langchain_core.prompts.chat import ChatPromptTemplate
|
|
| 7 |
class LearningObjectivesChain(BaseModel):
|
| 8 |
"""
|
| 9 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
class Config:
|
| 12 |
arbitrary_types_allowed = True
|
|
|
|
| 7 |
class LearningObjectivesChain(BaseModel):
|
| 8 |
"""
|
| 9 |
"""
|
| 10 |
+
template_standardize: ChatPromptTemplate
|
| 11 |
+
llm_standardize: Any
|
| 12 |
+
template_gen_prompt_a: ChatPromptTemplate
|
| 13 |
+
template_gen_prompt_b: ChatPromptTemplate
|
| 14 |
+
default_llm_a: Any
|
| 15 |
+
default_llm_b: Any
|
| 16 |
+
template_sanitize: ChatPromptTemplate
|
| 17 |
+
llm_sanitize: Any
|
| 18 |
|
| 19 |
class Config:
|
| 20 |
arbitrary_types_allowed = True
|
chains/learning_objectives_generator/runner.py
CHANGED
|
@@ -34,7 +34,7 @@ async def run_learning_objectives_generator(
|
|
| 34 |
llm_a = llms.get(model_choice_1, config["default_llm_a"])
|
| 35 |
llm_b = llms.get(model_choice_2, config["default_llm_b"])
|
| 36 |
|
| 37 |
-
llm_sanitize=llms.get(config["llm_sanitize"])
|
| 38 |
|
| 39 |
# We will store the final sanitized results in an array of 4 strings
|
| 40 |
# (2 prompts × 2 LLMs)
|
|
|
|
| 34 |
llm_a = llms.get(model_choice_1, config["default_llm_a"])
|
| 35 |
llm_b = llms.get(model_choice_2, config["default_llm_b"])
|
| 36 |
|
| 37 |
+
llm_sanitize = llms.get(config["llm_sanitize"])
|
| 38 |
|
| 39 |
# We will store the final sanitized results in an array of 4 strings
|
| 40 |
# (2 prompts × 2 LLMs)
|