File size: 2,653 Bytes
6b88892
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
040b0a9
6b88892
 
040b0a9
6b88892
040b0a9
6b88892
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
from langchain import callbacks, hub
from langchain_core.output_parsers import JsonOutputParser
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field, UUID4
from typing import List
import os

class Takeaway(BaseModel):
    takeaway: str
    feedback: str

class Response(BaseModel):
    run_id : UUID4
    new_til: List[Takeaway]

class ReadableTilResult(BaseModel):
    original_til: str = Field(
        description="The original TIL without any modifications.",
    )
    readability_score: str = Field(
        description="The readability score as High/Medium/Low based on the readability of the TIL.",
    )
    reason: str = Field(
        description="The reason for the assessment of the TIL readability score in one sentence.",
    )
    readable_til: str = Field(
        description="Rewrite the TIL in a readable manner only if the readability score is not High",
    )

class ReadableTilResults(BaseModel):
    tils: List[ReadableTilResult]

class RewriteTilV2():

    def kickoff(self, inputs=[]) -> Response:
        self.content = inputs["content"]
        return self._get_understandable_til()

    def _get_understandable_til(self) -> Response:
        prompt = hub.pull("til_understandability_revision")
        llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.2)
        parser = JsonOutputParser(pydantic_object=ReadableTilResults)

        chain = (prompt | llm | parser).with_config({
            "tags": ["til"], "run_name": "Rewrite Understandable TIL",
            "metadata": {
                "version": "v2.0.0",
                "growth_activity": "til",
                "env": os.environ["ENV"],
                "model": os.environ["OPENAI_MODEL"]
            }
        })

        with callbacks.collect_runs() as cb:
            self.llm_response = chain.invoke({
                "til_content": self.content,
                "format_instructions": parser.get_format_instructions(),
            })
            self.run_id = cb.traced_runs[0].id

        return self._handle_response()

    def _handle_response(self) -> Response:

        response = Response(
            run_id=self.run_id,
            new_til=[]
        )

        recommended_tils = self.llm_response["tils"]

        for til in recommended_tils:
            new_takeaway = Takeaway(
                feedback="not_ok",
                takeaway=til["readable_til"]
            )

            if til["readability_score"] == "High":
                new_takeaway.feedback = "ok"
                new_takeaway.takeaway = til["original_til"]

            response.new_til.append(new_takeaway)

        return response