File size: 2,590 Bytes
6b88892
 
 
 
7adfc31
6b88892
7adfc31
6b88892
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7adfc31
 
 
6b88892
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1fe1487
6b88892
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7adfc31
 
6b88892
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
from langchain import callbacks, hub
from langchain_core.output_parsers import JsonOutputParser
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field, UUID4
from typing import List
import os
import random

class HeadlineInfo(BaseModel):
    headline: str
    tone: str
    reason: str

class Response(BaseModel):
    run_id : UUID4
    headlines_details: List[HeadlineInfo]

class HeadlineInfoLLM(BaseModel):
    headline: str = Field(
        description="The suggested headline for the given TIL.",
    )
    tone: str = Field(
        description="The tone of the suggested headline.",
    )
    clickability_score: int = Field(
        description="Evaluate the headline's quality on a scale of 1 to 10 w.r.t the tone.",
    )
    reason: str = Field(
        description="Reason for the clickability_score in one sentence",
    )

class HeadlineResults(BaseModel):
    headlines_details: List[HeadlineInfoLLM]

class SuggestHeadlinesV2():

    def kickoff(self, inputs=[]) -> Response:
        self.content = inputs["content"]
        return self._get_til_headline()

    def _get_til_headline(self) -> Response:
        prompt = hub.pull("til_suggest_headline")
        llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.8)
        parser = JsonOutputParser(pydantic_object=HeadlineResults)

        chain = (prompt | llm | parser).with_config({
            "tags": ["til"], "run_name": "Suggest TIL Headlines",
            "metadata": {
                "version": "v2.0.0",
                "growth_activity": "til",
                "env": os.environ["ENV"],
                "model": os.environ["OPENAI_MODEL"]
            }
        })

        with callbacks.collect_runs() as cb:
            self.llm_response = chain.invoke({
                "til_content": self.content,
                "format_instructions": parser.get_format_instructions(),
            })
            self.run_id = cb.traced_runs[0].id

        return self._handle_response()

    def _handle_response(self) -> Response:
        response = Response(
            run_id=self.run_id,
            headlines_details=[]
        )

        headlines_data = self.llm_response["headlines_details"]
        random.shuffle(headlines_data)
        for headline_datum in headlines_data[:3]:
            response.headlines_details.append(
                HeadlineInfo(
                    headline=headline_datum["headline"],
                    tone=headline_datum["tone"],
                    reason=headline_datum["reason"],
                )
            )

        return response