File size: 2,644 Bytes
0413dd6
3d8743c
c22b03b
0413dd6
b43ad80
cb8ca6d
c22b03b
 
3d8743c
cb8ca6d
 
b49d827
3d8743c
 
 
 
0413dd6
 
 
 
 
c22b03b
 
 
 
 
cb8ca6d
 
b49d827
0413dd6
 
 
 
 
 
c22b03b
 
 
 
 
 
 
 
5dc54c6
c22b03b
 
 
 
 
 
0413dd6
 
 
b49d827
0413dd6
 
 
c22b03b
 
 
 
3d8743c
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
from langchain import hub, callbacks
from langchain_core.output_parsers import JsonOutputParser
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, UUID4
from .suggest_expectations import Expectation
from typing import List
import os

class Inputs(BaseModel):
    course: str
    module: str
    concepts: List[str]
    expectation: str
    check_question: str
    request: str

class Response(BaseModel):
    run_id: UUID4
    expectation: str
    check_question: str

class ExpectationRevision:
    def kickoff(self, inputs={}):
        self.learning_outcome = inputs["expectation"]
        self.check_question = inputs["check_question"]
        self.request = inputs["request"]
        self.course = inputs["course"]
        self.module = inputs["module"]
        self.concepts =  inputs["concepts"]
        llm_response = self._get_suggestion()
        return {
            "run_id": self.run_id,
            "expectation": llm_response["expectation"],
            "check_question": llm_response["check_question"]
        }

    def _get_suggestion(self):
        parser = JsonOutputParser(pydantic_object=Expectation)
        prompt = hub.pull("course_learn_expectation_revision")
        llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.2)
        chain = (prompt | llm | parser).with_config({
            "tags": ["course_learn", "suggest_expectations"], "run_name": "Suggest Module Expectations",
            "metadata" : {
                "version": "v1.0.0",
                "growth_activity": "course_learn",
                "env": os.environ["ENV"],
                "model": os.environ["OPENAI_MODEL"],
            }
        })

        with callbacks.collect_runs() as cb:
            response = chain.invoke({
                "learning_outcome": self.learning_outcome, "check_question": self.check_question, "request": self.request,
                "course": self.course, "module": self.module, "concepts": "* " + ("\n* ".join(self.concepts)),
                "format_instructions": parser.get_format_instructions()
              })
            self.run_id = cb.traced_runs[0].id

        return response


# Example usage
# rework = ExpectationRevision()
# response = rework.kickoff(inputs={
#     "expectation": "Recognize the importance of query rewriting and how to transform inefficient queries into more efficient ones.",
#     "check_question": "Can you provide an example of a poorly written SQL query and demonstrate how you would rewrite it to optimize its performance?",
#     "request": "Can you provide the poorly written SQL query that user has to rewrite"
# })
# print(response)