from .suggest_expectations import Expectation from langchain import hub, callbacks from langchain_core.output_parsers import JsonOutputParser from langchain_openai import ChatOpenAI from pydantic import BaseModel, UUID4, Field from typing import List import os class Inputs(BaseModel): course: str module: str concepts: List[str] expectation: str class Response(BaseModel): run_id: UUID4 expectation: str check_question: str class CheckQuestion(BaseModel): check_question: str = Field( description="Targeted question that the course designer have developed to assess the learner's understanding of the learning outcomes.") class SuggestCheckQuestion: def kickoff(self, inputs={}): self.course = inputs["course"] self.module = inputs["module"] self.learning_outcome = inputs["expectation"] self.concepts = inputs["concepts"] llm_response = self._get_check_quesiton() return { "run_id": self.run_id, "expectation": self.learning_outcome, "check_question": llm_response["check_question"] } def _get_check_quesiton(self): parser = JsonOutputParser(pydantic_object=CheckQuestion) chain = self._build_chain() with callbacks.collect_runs() as cb: llm_response = chain.invoke({ "course": self.course, "module": self.module, "concepts": "* " + ("\n* ".join(self.concepts)), "format_instructions": parser.get_format_instructions(), "learning_outcome": self.learning_outcome }) self.run_id = cb.traced_runs[0].id return llm_response def _build_chain(self): parser = JsonOutputParser(pydantic_object=CheckQuestion) prompt = hub.pull("course_learn_suggest_check_question") llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.2) chain = (prompt | llm | parser).with_config({ "tags": ["course_learn", "suggest_check_question"], "run_name": "Suggest Module Expectations", "metadata": { "version": "v1.0.0", "growth_activity": "course_learn", "env": os.environ["ENV"], "model": os.environ["OPENAI_MODEL"], } }) return chain