theRealNG commited on
Commit
52b64c6
·
unverified ·
2 Parent(s): 0a983736b88892

Merge pull request #32 from beautiful-code/til_v2_2

Browse files

(til_v2): update til to include readability and headlines endpoints

endpoints.py CHANGED
@@ -6,14 +6,17 @@ from typing import List
6
  from tempenv import TemporaryEnvironment
7
  from pydantic import UUID4
8
  from fastapi.middleware.cors import CORSMiddleware
9
- from fastapi import FastAPI, Query
 
 
10
  from .workflows.utils.feedback import Feedback, post_feedback
11
  from .workflows.til.analyse_til_v2 import AnalyseTilV2, TilV2FeedbackResponse
12
  from .workflows.til.analyse_til import TilCrew, TilFeedbackResponse
13
  from .workflows.courses.suggest_expectations import SuggestExpectations, Inputs as SuggestExpectationsInputs, Expectation, Response as SuggestExpectationsResponse
14
  from .workflows.courses.suggest_check_question import SuggestCheckQuestion, Inputs as SuggestCheckQuestionInputs, Response as SuggestCheckQuestionResponse
15
  from .workflows.courses.expectation_revision import ExpectationRevision, Inputs as ExpectationRevisionInputs, Response as ExpectationRevisionResponse
16
-
 
17
 
18
  LANGSMITH_STAGING_PROJECT = "customer_agent"
19
  LANGSMITH_PROD_PROJECT = "growthy-agents"
@@ -52,11 +55,10 @@ app.add_middleware(
52
  )
53
 
54
 
 
55
  @app.post("/til_feedback", tags=["til_feedback"])
56
  async def til_feedback_kickoff(content: List[str]) -> TilFeedbackResponse:
57
- separator = "\n* "
58
- content[0] = "* " + content[0]
59
- inputs = {"content": separator.join(content)}
60
  result = TilCrew().kickoff(inputs)
61
  return result
62
 
@@ -88,10 +90,8 @@ async def staging_capture_feedback(run_id: UUID4, feedback: Feedback) -> str:
88
  return "ok"
89
 
90
 
91
- def til_v2_logic(content) -> TilV2FeedbackResponse:
92
- separator = "\n* "
93
- content[0] = "* " + content[0]
94
- inputs = {"content": separator.join(content)}
95
  result = AnalyseTilV2().kickoff(inputs)
96
  return result
97
 
@@ -99,13 +99,13 @@ def til_v2_logic(content) -> TilV2FeedbackResponse:
99
  @app.post("/v2/til_feedback", tags=["til_feedback"])
100
  async def til_v2_feedback_kickoff(content: List[str]) -> TilV2FeedbackResponse:
101
  with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
102
- return til_v2_logic(content)
103
 
104
 
105
  @app.post("/staging/v2/til_feedback", tags=["til_feedback", "staging"])
106
  async def staging_til_v2_feedback_kickoff(content: List[str]) -> TilV2FeedbackResponse:
107
  with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
108
- return til_v2_logic(content)
109
 
110
 
111
  @app.post("/v2/til_feedback/{run_id}/feedback", tags=["til_feedback"])
@@ -124,6 +124,36 @@ async def capture_feedback(run_id: UUID4, feedback: Feedback) -> str:
124
  return "ok"
125
 
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
  def course_learn_suggest_expectations_logic(inputs) -> SuggestExpectationsResponse:
128
  print("Inputs: ", inputs)
129
  result = SuggestExpectations().kickoff(inputs={
 
6
  from tempenv import TemporaryEnvironment
7
  from pydantic import UUID4
8
  from fastapi.middleware.cors import CORSMiddleware
9
+ from fastapi import FastAPI
10
+
11
+ from .utils.endpoints_utils import CreateTilInputs
12
  from .workflows.utils.feedback import Feedback, post_feedback
13
  from .workflows.til.analyse_til_v2 import AnalyseTilV2, TilV2FeedbackResponse
14
  from .workflows.til.analyse_til import TilCrew, TilFeedbackResponse
15
  from .workflows.courses.suggest_expectations import SuggestExpectations, Inputs as SuggestExpectationsInputs, Expectation, Response as SuggestExpectationsResponse
16
  from .workflows.courses.suggest_check_question import SuggestCheckQuestion, Inputs as SuggestCheckQuestionInputs, Response as SuggestCheckQuestionResponse
17
  from .workflows.courses.expectation_revision import ExpectationRevision, Inputs as ExpectationRevisionInputs, Response as ExpectationRevisionResponse
18
+ from .workflows.til.rewrite_til_v2 import RewriteTilV2, Response as RewriteTilResponse
19
+ from .workflows.til.suggest_headlines_v2 import SuggestHeadlinesV2, Response as SuggestHeadlinesResponse
20
 
21
  LANGSMITH_STAGING_PROJECT = "customer_agent"
22
  LANGSMITH_PROD_PROJECT = "growthy-agents"
 
55
  )
56
 
57
 
58
+ # TIL
59
  @app.post("/til_feedback", tags=["til_feedback"])
60
  async def til_feedback_kickoff(content: List[str]) -> TilFeedbackResponse:
61
+ inputs = CreateTilInputs(content)
 
 
62
  result = TilCrew().kickoff(inputs)
63
  return result
64
 
 
90
  return "ok"
91
 
92
 
93
+ def til_v2_analyze_logic(content) -> TilV2FeedbackResponse:
94
+ inputs = CreateTilInputs(content)
 
 
95
  result = AnalyseTilV2().kickoff(inputs)
96
  return result
97
 
 
99
  @app.post("/v2/til_feedback", tags=["til_feedback"])
100
  async def til_v2_feedback_kickoff(content: List[str]) -> TilV2FeedbackResponse:
101
  with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
102
+ return til_v2_analyze_logic(content)
103
 
104
 
105
  @app.post("/staging/v2/til_feedback", tags=["til_feedback", "staging"])
106
  async def staging_til_v2_feedback_kickoff(content: List[str]) -> TilV2FeedbackResponse:
107
  with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
108
+ return til_v2_analyze_logic(content)
109
 
110
 
111
  @app.post("/v2/til_feedback/{run_id}/feedback", tags=["til_feedback"])
 
124
  return "ok"
125
 
126
 
127
+ @app.post("/v2/til_rewrite", tags=["til_readability"])
128
+ async def til_v2_rewrite_kickoff(content: List[str]) -> RewriteTilResponse:
129
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
130
+ inputs = CreateTilInputs(content)
131
+ result = RewriteTilV2().kickoff(inputs)
132
+ return result
133
+
134
+ @app.post("/staging/v2/til_rewrite", tags=["til_readability", "staging"])
135
+ async def staging_til_v2_rewrite_kickoff(content: List[str]) -> RewriteTilResponse:
136
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
137
+ inputs = CreateTilInputs(content)
138
+ result = RewriteTilV2().kickoff(inputs)
139
+ return result
140
+
141
+ @app.post("/v2/til_headlines", tags=["til_headlines"])
142
+ async def til_v2_suggest_headlines(content: List[str]) -> SuggestHeadlinesResponse:
143
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
144
+ inputs = CreateTilInputs(content)
145
+ result = SuggestHeadlinesV2().kickoff(inputs)
146
+ return result
147
+
148
+ @app.post("/staging/v2/til_headlines", tags=["til_headlines", "staging"])
149
+ async def staging_til_v2_suggest_headlines(content: List[str]) -> SuggestHeadlinesResponse:
150
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
151
+ inputs = CreateTilInputs(content)
152
+ result = SuggestHeadlinesV2().kickoff(inputs)
153
+ return result
154
+
155
+
156
+ # Course Learn
157
  def course_learn_suggest_expectations_logic(inputs) -> SuggestExpectationsResponse:
158
  print("Inputs: ", inputs)
159
  result = SuggestExpectations().kickoff(inputs={
utils/endpoints_utils.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ def CreateTilInputs(content: List[str]) -> dict:
4
+ separator = "\n* "
5
+ content[0] = "* " + content[0]
6
+ return {"content": separator.join(content)}
workflows/til/rewrite_til_v2.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain import callbacks, hub
2
+ from langchain_core.output_parsers import JsonOutputParser
3
+ from langchain_openai import ChatOpenAI
4
+ from pydantic import BaseModel, Field, UUID4
5
+ from typing import List
6
+ import os
7
+
8
+ class Takeaway(BaseModel):
9
+ takeaway: str
10
+ feedback: str
11
+
12
+ class Response(BaseModel):
13
+ run_id : UUID4
14
+ new_til: List[Takeaway]
15
+
16
+ class ReadableTilResult(BaseModel):
17
+ original_til: str = Field(
18
+ description="The original TIL without any modifications.",
19
+ )
20
+ readability_score: str = Field(
21
+ description="The readability score as High/Medium/Low based on the readability of the TIL.",
22
+ )
23
+ reason: str = Field(
24
+ description="The reason for the assessment of the TIL readability score in one sentence.",
25
+ )
26
+ readable_til: str = Field(
27
+ description="Rewrite the TIL in a readable manner only if the readability score is not High",
28
+ )
29
+
30
+ class ReadableTilResults(BaseModel):
31
+ tils: List[ReadableTilResult]
32
+
33
+ class RewriteTilV2():
34
+
35
+ def kickoff(self, inputs=[]) -> Response:
36
+ self.content = inputs["content"]
37
+ return self._get_understandable_til()
38
+
39
+ def _get_understandable_til(self) -> Response:
40
+ prompt = hub.pull("til_understandability_revision")
41
+ llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.2)
42
+ parser = JsonOutputParser(pydantic_object=ReadableTilResults)
43
+
44
+ chain = (prompt | llm | parser).with_config({
45
+ "tags": ["til"], "run_name": "Rewrite Understandable TIL",
46
+ "metadata": {
47
+ "version": "v2.0.0",
48
+ "growth_activity": "til",
49
+ "env": os.environ["ENV"],
50
+ "model": os.environ["OPENAI_MODEL"]
51
+ }
52
+ })
53
+
54
+ with callbacks.collect_runs() as cb:
55
+ self.llm_response = chain.invoke({
56
+ "til_content": self.content,
57
+ "format_instructions": parser.get_format_instructions(),
58
+ })
59
+ self.run_id = cb.traced_runs[0].id
60
+
61
+ return self._handle_response()
62
+
63
+ def _handle_response(self) -> Response:
64
+
65
+ response = Response(
66
+ run_id=self.run_id,
67
+ new_til=[]
68
+ )
69
+
70
+ recommended_tils = self.llm_response["tils"]
71
+
72
+ for til in recommended_tils:
73
+ new_takeaway = Takeaway(
74
+ feedback="not_ok",
75
+ takeaway=til["original_til"]
76
+ )
77
+
78
+ if til["readability_score"] != "High":
79
+ new_takeaway.feedback = "ok"
80
+ new_takeaway.takeaway = til["readable_til"]
81
+
82
+ response.new_til.append(new_takeaway)
83
+
84
+ return response
workflows/til/suggest_headlines_v2.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain import callbacks, hub
2
+ from langchain_core.messages import SystemMessage
3
+ from langchain_core.output_parsers import JsonOutputParser
4
+ from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
5
+ from langchain_openai import ChatOpenAI
6
+ from pydantic import BaseModel, Field, UUID4
7
+ from typing import List, Optional
8
+ import os
9
+ import pprint
10
+
11
+ class HeadlineInfo(BaseModel):
12
+ headline: str
13
+ tone: str
14
+ reason: str
15
+
16
+ class Response(BaseModel):
17
+ run_id : UUID4
18
+ headlines_details: List[HeadlineInfo]
19
+
20
+ class HeadlineInfoLLM(BaseModel):
21
+ headline: str = Field(
22
+ description="The suggested headline for the given TIL.",
23
+ )
24
+ tone: str = Field(
25
+ description="The tone of the suggested headline.",
26
+ )
27
+ reason: str = Field(
28
+ description="Reason for the clickability_score in one sentence",
29
+ )
30
+ clickability_score: int = Field(
31
+ description="A score out of 10 on how likely the user is going to click on the headline and read the TIL.",
32
+ )
33
+
34
+ class HeadlineResults(BaseModel):
35
+ headlines_details: List[HeadlineInfoLLM]
36
+
37
+ class SuggestHeadlinesV2():
38
+
39
+ def kickoff(self, inputs=[]) -> Response:
40
+ self.content = inputs["content"]
41
+ return self._get_til_headline()
42
+
43
+ def _get_til_headline(self) -> Response:
44
+ prompt = hub.pull("til_suggest_headline")
45
+ llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.2)
46
+ parser = JsonOutputParser(pydantic_object=HeadlineResults)
47
+
48
+ chain = (prompt | llm | parser).with_config({
49
+ "tags": ["til"], "run_name": "Suggest TIL Headlines",
50
+ "metadata": {
51
+ "version": "v2.0.0",
52
+ "growth_activity": "til",
53
+ "env": os.environ["ENV"],
54
+ "model": os.environ["OPENAI_MODEL"]
55
+ }
56
+ })
57
+
58
+ with callbacks.collect_runs() as cb:
59
+ self.llm_response = chain.invoke({
60
+ "til_content": self.content,
61
+ "format_instructions": parser.get_format_instructions(),
62
+ })
63
+ self.run_id = cb.traced_runs[0].id
64
+
65
+ return self._handle_response()
66
+
67
+ def _handle_response(self) -> Response:
68
+ response = Response(
69
+ run_id=self.run_id,
70
+ headlines_details=[]
71
+ )
72
+
73
+ headlines_data = self.llm_response["headlines_details"]
74
+ for headline_datum in headlines_data:
75
+ response.headlines_details.append(
76
+ HeadlineInfo(
77
+ headline=headline_datum["headline"],
78
+ tone=headline_datum["tone"],
79
+ reason=headline_datum["reason"],
80
+ )
81
+ )
82
+
83
+ return response
84
+