File size: 2,341 Bytes
81e622d
 
 
 
 
 
 
 
 
 
 
 
b49d827
81e622d
 
 
 
 
 
 
 
d9efaab
81e622d
 
 
 
 
 
 
 
 
 
b49d827
81e622d
 
 
 
 
 
 
 
d9efaab
 
 
 
 
b49d827
d9efaab
 
 
 
 
 
 
 
81e622d
 
 
 
 
 
5dc54c6
81e622d
 
 
 
 
 
d9efaab
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
from .suggest_expectations import Expectation
from langchain import hub, callbacks
from langchain_core.output_parsers import JsonOutputParser
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, UUID4, Field
from typing import List
import os


class Inputs(BaseModel):
    course: str
    module: str
    concepts: List[str]
    expectation: str


class Response(BaseModel):
    run_id: UUID4
    expectation: str
    check_question: str


class CheckQuestion(BaseModel):
    check_question: str = Field(
        description="Targeted question that the course designer have developed to assess the learner's understanding of the learning outcomes.")


class SuggestCheckQuestion:
    def kickoff(self, inputs={}):
        self.course = inputs["course"]
        self.module = inputs["module"]
        self.learning_outcome = inputs["expectation"]
        self.concepts = inputs["concepts"]
        llm_response = self._get_check_quesiton()
        return {
            "run_id": self.run_id,
            "expectation": self.learning_outcome,
            "check_question": llm_response["check_question"]
        }

    def _get_check_quesiton(self):
        parser = JsonOutputParser(pydantic_object=CheckQuestion)
        chain = self._build_chain()

        with callbacks.collect_runs() as cb:
            llm_response = chain.invoke({
                "course": self.course, "module": self.module, "concepts": "* " + ("\n* ".join(self.concepts)),
                "format_instructions": parser.get_format_instructions(),
                "learning_outcome": self.learning_outcome
            })
            self.run_id = cb.traced_runs[0].id

        return llm_response

    def _build_chain(self):
        parser = JsonOutputParser(pydantic_object=CheckQuestion)
        prompt = hub.pull("course_learn_suggest_check_question")
        llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.2)
        chain = (prompt | llm | parser).with_config({
            "tags": ["course_learn", "suggest_check_question"], "run_name": "Suggest Module Expectations",
            "metadata": {
                "version": "v1.0.0",
                "growth_activity": "course_learn",
                "env": os.environ["ENV"],
                "model": os.environ["OPENAI_MODEL"],
            }
        })

        return chain