theRealNG commited on
Commit
f551bf5
·
unverified ·
2 Parent(s): 4f0e86635d4946

Merge pull request #28 from beautiful-code/til_v2

Browse files
endpoints.py CHANGED
@@ -4,15 +4,20 @@ load_dotenv()
4
  from .workflows.courses.expectation_revision import ExpectationRevision, Inputs as ExpectationRevisionInputs, Response as ExpectationRevisionResponse
5
  from .workflows.courses.suggest_check_question import SuggestCheckQuestion, Inputs as SuggestCheckQuestionInputs, Response as SuggestCheckQuestionResponse
6
  from .workflows.courses.suggest_expectations import SuggestExpectations, Inputs as SuggestExpectationsInputs, Expectation, Response as SuggestExpectationsResponse
7
- from .workflows.til import TilCrew, TilFeedbackResponse
 
8
  from .workflows.utils.feedback import Feedback, post_feedback
9
  from fastapi import FastAPI, Query
10
  from fastapi.middleware.cors import CORSMiddleware
11
- from pydantic import UUID4, BaseModel
12
- from typing import List, Optional
 
13
  import uvicorn
14
 
15
 
 
 
 
16
  description = """
17
  API helps you do awesome stuff. 🚀
18
 
@@ -64,8 +69,46 @@ async def capture_feedback(run_id: UUID4, feedback: Feedback) -> str:
64
  return "ok"
65
 
66
 
67
- @app.post("/course_learn/suggest_expectations", tags=["course_learn"])
68
- async def course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -> SuggestExpectationsResponse:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  print("Inputs: ", inputs)
70
  result = SuggestExpectations().kickoff(inputs={
71
  "course": inputs.course,
@@ -76,16 +119,38 @@ async def course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -
76
  return result
77
 
78
 
79
- @app.post("/course_learn/suggest_expectations/{run_id}/feedback", tags=["course_learn"])
80
- async def capture_suggest_expectations_feedback(run_id: UUID4, feedback: Feedback) -> str:
81
  print("Helful Score: ", feedback.metric_type)
82
  print("Feedback On: ", feedback.feedback_on)
83
  post_feedback(run_id=run_id, feedback=feedback)
84
  return "ok"
85
 
86
 
87
- @app.post("/course_learn/expectation_revision", tags=["course_learn"])
88
- async def course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  print("Inputs: ", inputs)
90
  result = ExpectationRevision().kickoff(inputs={
91
  "expectation": inputs.expectation,
@@ -95,16 +160,38 @@ async def course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -
95
  return result
96
 
97
 
98
- @app.post("/course_learn/expectation_revision/{run_id}/feedback", tags=["course_learn"])
99
- async def capture_expectation_revision_feedback(run_id: UUID4, feedback: Feedback) -> str:
 
 
 
 
 
 
 
 
 
 
 
100
  print("Helful Score: ", feedback.metric_type)
101
  print("Feedback On: ", feedback.feedback_on)
102
  post_feedback(run_id=run_id, feedback=feedback)
103
  return "ok"
104
 
105
 
106
- @app.post("/course_learn/suggest_check_question", tags=["course_learn"])
107
- async def course_learn_suggest_check_question(inputs: SuggestCheckQuestionInputs) -> SuggestCheckQuestionResponse:
 
 
 
 
 
 
 
 
 
 
 
108
  print("Inputs: ", inputs)
109
  result = SuggestCheckQuestion().kickoff(inputs={
110
  "course": inputs.course,
@@ -115,14 +202,37 @@ async def course_learn_suggest_check_question(inputs: SuggestCheckQuestionInputs
115
  return result
116
 
117
 
118
- @app.post("/course_learn/suggest_check_question/{run_id}/feedback", tags=["course_learn"])
119
- async def course_learn_suggest_check_question_feedback(run_id: UUID4, feedback: Feedback) -> str:
 
 
 
 
 
 
 
 
 
 
 
120
  print("Helful Score: ", feedback.metric_type)
121
  print("Feedback On: ", feedback.feedback_on)
122
  post_feedback(run_id=run_id, feedback=feedback)
123
  return "ok"
124
 
125
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  @app.get("/healthcheck")
127
  async def read_root():
128
  return {"status": "ok"}
 
4
  from .workflows.courses.expectation_revision import ExpectationRevision, Inputs as ExpectationRevisionInputs, Response as ExpectationRevisionResponse
5
  from .workflows.courses.suggest_check_question import SuggestCheckQuestion, Inputs as SuggestCheckQuestionInputs, Response as SuggestCheckQuestionResponse
6
  from .workflows.courses.suggest_expectations import SuggestExpectations, Inputs as SuggestExpectationsInputs, Expectation, Response as SuggestExpectationsResponse
7
+ from .workflows.til.analyse_til import TilCrew, TilFeedbackResponse
8
+ from .workflows.til.analyse_til_v2 import AnalyseTilV2, TilV2FeedbackResponse
9
  from .workflows.utils.feedback import Feedback, post_feedback
10
  from fastapi import FastAPI, Query
11
  from fastapi.middleware.cors import CORSMiddleware
12
+ from pydantic import UUID4
13
+ from tempenv import TemporaryEnvironment
14
+ from typing import List
15
  import uvicorn
16
 
17
 
18
+ LANGSMITH_STAGING_PROJECT = "customer_agent"
19
+ LANGSMITH_PROD_PROJECT = "growthy-agents"
20
+
21
  description = """
22
  API helps you do awesome stuff. 🚀
23
 
 
69
  return "ok"
70
 
71
 
72
+ @app.post("/staging/til_feedback", tags=["til_feedback", "staging"])
73
+ async def staging_til_feedback_kickoff(content: List[str]) -> TilFeedbackResponse:
74
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
75
+ separator = "\n* "
76
+ content[0] = "* " + content[0]
77
+ inputs = {"content": separator.join(content)}
78
+ result = TilCrew().kickoff(inputs)
79
+ return result
80
+
81
+
82
+ @app.post("/staging/til_feedback/{run_id}/feedback", tags=["til_feedback", "staging"])
83
+ async def staging_capture_feedback(run_id: UUID4, feedback: Feedback) -> str:
84
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
85
+ print("Metric Type: ", feedback.metric_type)
86
+ print("Feedback On: ", feedback.feedback_on)
87
+ post_feedback(run_id=run_id, feedback=feedback)
88
+ return "ok"
89
+
90
+
91
+ def til_v2_logic(content) -> TilV2FeedbackResponse:
92
+ separator = "\n* "
93
+ content[0] = "* " + content[0]
94
+ inputs = {"content": separator.join(content)}
95
+ result = AnalyseTilV2().kickoff(inputs)
96
+ return result
97
+
98
+
99
+ @app.post("/v2/til_feedback", tags=["til_feedback"])
100
+ async def til_v2_feedback_kickoff(content: List[str]) -> TilV2FeedbackResponse:
101
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
102
+ return til_v2_logic(content)
103
+
104
+
105
+ @app.post("/staging/v2/til_feedback", tags=["til_feedback", "staging"])
106
+ async def staging_til_v2_feedback_kickoff(content: List[str]) -> TilV2FeedbackResponse:
107
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
108
+ return til_v2_logic(content)
109
+
110
+
111
+ def course_learn_suggest_expectations_logic(inputs) -> SuggestExpectationsResponse:
112
  print("Inputs: ", inputs)
113
  result = SuggestExpectations().kickoff(inputs={
114
  "course": inputs.course,
 
119
  return result
120
 
121
 
122
+ def course_learn_suggest_expectations_feedback_logic(run_id: UUID4, feedback: Feedback) -> str:
 
123
  print("Helful Score: ", feedback.metric_type)
124
  print("Feedback On: ", feedback.feedback_on)
125
  post_feedback(run_id=run_id, feedback=feedback)
126
  return "ok"
127
 
128
 
129
+ @app.post("/course_learn/suggest_expectations", tags=["course_learn"])
130
+ async def course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -> SuggestExpectationsResponse:
131
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
132
+ return course_learn_suggest_expectations_logic(inputs)
133
+
134
+
135
+ @app.post("/staging/course_learn/suggest_expectations", tags=["course_learn", "staging"])
136
+ async def staging_course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -> SuggestExpectationsResponse:
137
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
138
+ return course_learn_suggest_expectations_logic(inputs)
139
+
140
+
141
+ @app.post("/course_learn/suggest_expectations/{run_id}/feedback", tags=["course_learn"])
142
+ async def capture_suggest_expectations_feedback(run_id: UUID4, feedback: Feedback) -> str:
143
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
144
+ return course_learn_suggest_expectations_feedback_logic(run_id, feedback)
145
+
146
+
147
+ @app.post("/staging/course_learn/suggest_expectations/{run_id}/feedback", tags=["course_learn", "staging"])
148
+ async def staging_capture_suggest_expectations_feedback(run_id: UUID4, feedback: Feedback) -> str:
149
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
150
+ return course_learn_suggest_expectations_feedback_logic(run_id, feedback)
151
+
152
+
153
+ def course_learn_expectation_revision_logic(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse:
154
  print("Inputs: ", inputs)
155
  result = ExpectationRevision().kickoff(inputs={
156
  "expectation": inputs.expectation,
 
160
  return result
161
 
162
 
163
+ @app.post("/course_learn/expectation_revision", tags=["course_learn"])
164
+ async def course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse:
165
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
166
+ return course_learn_expectation_revision_logic(inputs)
167
+
168
+
169
+ @app.post("/staging/course_learn/expectation_revision", tags=["course_learn", "staging"])
170
+ async def staging_course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse:
171
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
172
+ return course_learn_expectation_revision_logic(inputs)
173
+
174
+
175
+ def capture_expectation_revision_feedback_logic(run_id: UUID4, feedback: Feedback) -> str:
176
  print("Helful Score: ", feedback.metric_type)
177
  print("Feedback On: ", feedback.feedback_on)
178
  post_feedback(run_id=run_id, feedback=feedback)
179
  return "ok"
180
 
181
 
182
+ @app.post("/course_learn/expectation_revision/{run_id}/feedback", tags=["course_learn"])
183
+ async def capture_expectation_revision_feedback(run_id: UUID4, feedback: Feedback) -> str:
184
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
185
+ return capture_expectation_revision_feedback_logic(run_id, feedback)
186
+
187
+
188
+ @app.post("/staging/course_learn/expectation_revision/{run_id}/feedback", tags=["course_learn", "staging"])
189
+ async def staging_capture_expectation_revision_feedback(run_id: UUID4, feedback: Feedback) -> str:
190
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
191
+ return capture_expectation_revision_feedback_logic(run_id, feedback)
192
+
193
+
194
+ def course_learn_suggest_check_question_logic(inputs: SuggestCheckQuestionInputs) -> SuggestCheckQuestionResponse:
195
  print("Inputs: ", inputs)
196
  result = SuggestCheckQuestion().kickoff(inputs={
197
  "course": inputs.course,
 
202
  return result
203
 
204
 
205
+ @app.post("/course_learn/suggest_check_question", tags=["course_learn"])
206
+ async def course_learn_suggest_check_question(inputs: SuggestCheckQuestionInputs) -> SuggestCheckQuestionResponse:
207
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
208
+ return course_learn_suggest_check_question_logic(inputs)
209
+
210
+
211
+ @app.post("/staging/course_learn/suggest_check_question", tags=["course_learn", "staging"])
212
+ async def staging_course_learn_suggest_check_question(inputs: SuggestCheckQuestionInputs) -> SuggestCheckQuestionResponse:
213
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
214
+ return course_learn_suggest_check_question_logic(inputs)
215
+
216
+
217
+ def course_learn_suggest_check_question_feedback_logic(run_id: UUID4, feedback: Feedback) -> str:
218
  print("Helful Score: ", feedback.metric_type)
219
  print("Feedback On: ", feedback.feedback_on)
220
  post_feedback(run_id=run_id, feedback=feedback)
221
  return "ok"
222
 
223
 
224
+ @app.post("/course_learn/suggest_check_question/{run_id}/feedback", tags=["course_learn"])
225
+ async def course_learn_suggest_check_question_feedback(run_id: UUID4, feedback: Feedback) -> str:
226
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
227
+ return course_learn_suggest_check_question_feedback_logic(run_id, feedback)
228
+
229
+
230
+ @app.post("/staging/course_learn/suggest_check_question/{run_id}/feedback", tags=["course_learn", "staging"])
231
+ async def staging_course_learn_suggest_check_question_feedback(run_id: UUID4, feedback: Feedback) -> str:
232
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
233
+ return course_learn_suggest_check_question_feedback_logic(run_id, feedback)
234
+
235
+
236
  @app.get("/healthcheck")
237
  async def read_root():
238
  return {"status": "ok"}
requirements.txt CHANGED
@@ -18,6 +18,8 @@ python-dotenv
18
  semanticscholar
19
  streamlit
20
  streamlit-extras
 
21
  tavily-python
 
22
  unstructured
23
  uvicorn
 
18
  semanticscholar
19
  streamlit
20
  streamlit-extras
21
+ streamlit_router
22
  tavily-python
23
+ tempenv
24
  unstructured
25
  uvicorn
ui/til_feedback.py CHANGED
@@ -1,8 +1,7 @@
1
  import streamlit as st
2
  from dotenv import load_dotenv
3
- from workflows.til import TilCrew
4
  from streamlit_extras.capture import stdout
5
- from langsmith import Client
6
  from workflows.utils.feedback import Feedback
7
 
8
  load_dotenv()
@@ -21,7 +20,7 @@ def feedback_main():
21
  [data-testid="stToolbar"]{
22
  right: 2rem;
23
  }
24
-
25
 
26
  </style>
27
  '''
@@ -86,7 +85,7 @@ def feedback_main():
86
  st.markdown(f"**Reason:** {result['reason']}")
87
  if result.get('suggestion') is not None:
88
  st.markdown(f"**Suggestion:** {result['suggestion']}")
89
-
90
  feedback_key = result['til'].replace(' ', '_')
91
  feedback_given_key = f"{feedback_key}_feedback_given"
92
 
@@ -111,14 +110,14 @@ def give_feedback(feedback_key, is_helpful):
111
  feedback_data = Feedback(
112
  metric_type=metric_type,
113
  metric_score=metric_score,
114
- feedback_on=feedback_key.replace('_', ' ').title()
115
  )
116
  try:
117
  TilCrew.post_feedback(run_id, feedback_data)
118
  st.success("Feedback submitted successfully!")
119
  except Exception as e:
120
  st.error(f"Failed to submit feedback: {e}")
121
-
122
  st.session_state[f"{feedback_key}_feedback_given"] = True
123
 
124
  def clear_feedback_state(results):
@@ -128,4 +127,4 @@ def clear_feedback_state(results):
128
 
129
 
130
  if __name__ == "__main__":
131
- feedback_main()
 
1
  import streamlit as st
2
  from dotenv import load_dotenv
3
+ from workflows.til.analyse_til import TilCrew
4
  from streamlit_extras.capture import stdout
 
5
  from workflows.utils.feedback import Feedback
6
 
7
  load_dotenv()
 
20
  [data-testid="stToolbar"]{
21
  right: 2rem;
22
  }
23
+
24
 
25
  </style>
26
  '''
 
85
  st.markdown(f"**Reason:** {result['reason']}")
86
  if result.get('suggestion') is not None:
87
  st.markdown(f"**Suggestion:** {result['suggestion']}")
88
+
89
  feedback_key = result['til'].replace(' ', '_')
90
  feedback_given_key = f"{feedback_key}_feedback_given"
91
 
 
110
  feedback_data = Feedback(
111
  metric_type=metric_type,
112
  metric_score=metric_score,
113
+ feedback_on=feedback_key.replace('_', ' ').title()
114
  )
115
  try:
116
  TilCrew.post_feedback(run_id, feedback_data)
117
  st.success("Feedback submitted successfully!")
118
  except Exception as e:
119
  st.error(f"Failed to submit feedback: {e}")
120
+
121
  st.session_state[f"{feedback_key}_feedback_given"] = True
122
 
123
  def clear_feedback_state(results):
 
127
 
128
 
129
  if __name__ == "__main__":
130
+ feedback_main()
workflows/{til.py → til/analyse_til.py} RENAMED
@@ -5,10 +5,8 @@ from langchain_core.messages import SystemMessage
5
  from langchain_core.output_parsers import JsonOutputParser
6
  from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate
7
  from langchain_openai import ChatOpenAI
8
- from langsmith import Client
9
  from pydantic import BaseModel, Field, UUID4
10
  from typing import List, Optional
11
- from .utils.feedback import Feedback
12
  import os
13
  import pprint
14
 
 
5
  from langchain_core.output_parsers import JsonOutputParser
6
  from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate
7
  from langchain_openai import ChatOpenAI
 
8
  from pydantic import BaseModel, Field, UUID4
9
  from typing import List, Optional
 
10
  import os
11
  import pprint
12
 
workflows/til/analyse_til_v2.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain import callbacks
2
+ from langchain_core.messages import SystemMessage
3
+ from langchain_core.output_parsers import JsonOutputParser
4
+ from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
5
+ from langchain_openai import ChatOpenAI
6
+ from pydantic import BaseModel, Field, UUID4
7
+ from typing import List, Optional
8
+ import os
9
+ import pprint
10
+
11
+
12
+ class AnalyseTilV2:
13
+
14
+ def kickoff(self, inputs={}):
15
+ print("Human Message:")
16
+ pprint.pp(inputs)
17
+ self.content = inputs["content"]
18
+ self._gather_feedback()
19
+ return self._final_call_on_feedback()
20
+
21
+ def _final_call_on_feedback(self):
22
+ final_results = []
23
+ for feedback in self.feedback_results:
24
+ print("Final analysis of:")
25
+ pprint.pp(feedback)
26
+ result = {
27
+ "til": feedback.get('til', ""),
28
+ "feedback": "not_ok",
29
+ }
30
+ if feedback["factuality_categorization"] != 'High':
31
+ result["feedback_criteria"] = "factuality_feedback"
32
+ result["reason"] = feedback["factuality_reason"]
33
+ final_results = final_results + [result]
34
+ continue
35
+
36
+ if feedback["insightful_categorization"] != 'High':
37
+ result["feedback_criteria"] = "insightful_feedback"
38
+ result["reason"] = feedback["insightful_reason"]
39
+ final_results = final_results + [result]
40
+ continue
41
+
42
+ result["feedback"] = "ok"
43
+ final_results = final_results + [result]
44
+
45
+ response = {"feedback": final_results, "run_id": self.run_id}
46
+ print("Final Results:")
47
+ pprint.pp(response)
48
+ return response
49
+
50
+ def _gather_feedback(self):
51
+ feedback_chain = self._build_feedback_chain()
52
+ pprint.pp("Analysing the TIL.....")
53
+ with callbacks.collect_runs() as cb:
54
+ self.feedback_results = feedback_chain.invoke(
55
+ {"til_content": self.content})['tils']
56
+ self.run_id = cb.traced_runs[0].id
57
+ print("Run ID: ", self.run_id)
58
+
59
+ print("Feedback: ")
60
+ pprint.pp(self.feedback_results)
61
+
62
+ def _build_feedback_chain(self):
63
+ feedback_parser = JsonOutputParser(pydantic_object=TilV2FeedbackResults)
64
+ feedback_prompt = ChatPromptTemplate.from_messages([
65
+ SystemMessage(
66
+ "You are a 'Personal TIL Reviewer' who works in a Product Engineering Services company. "
67
+ "You are an expert in writing TILs which are Insightful, Factually correct, Easy to read and grammatically correct."
68
+ "Your goal is to review user's TILs and categorize their correctness as High, Medium, or Low based on the following metrics:"
69
+ "1. Is the TIL insightful?"
70
+ "2. Is the TIL factually correct and accurate?"
71
+
72
+ "The criteria to use for assessing if they are insightful or not are:\n"
73
+ "* They TIL shouldn't just be a outright statement, it should contain even the reason on why the statement is true."
74
+ "* It should showcase the understanding of the user on the subject.\n\n"
75
+
76
+ "The criteria to use for assessing if they are factual or not are:\n"
77
+ "* They are related to facts."
78
+ "* You are able to find a source which agrees to the fact from reputable websites.\n\n"
79
+
80
+ "Give reason for your assessment in one or two sentences for each metric. "
81
+ "Evaluate each TIL in the context of all the user's TILs."
82
+ f"Formatting Instructions: {feedback_parser.get_format_instructions()}"
83
+ ),
84
+ HumanMessagePromptTemplate.from_template("{til_content}")
85
+ ])
86
+ print("Prompt: ")
87
+ pprint.pp(feedback_prompt, width=80)
88
+ llm = ChatOpenAI(model=os.environ["OPENAI_MODEL"], temperature=0.2)
89
+ analysis_chain = (feedback_prompt | llm | feedback_parser).with_config({
90
+ "tags": ["til"], "run_name": "Analysing TIL v2",
91
+ "metadata": {
92
+ "versoin": "v2.0.0",
93
+ "growth_activity": "til",
94
+ "env": os.environ["ENV"],
95
+ "model": os.environ["OPENAI_MODEL"]
96
+ }
97
+ })
98
+
99
+ return analysis_chain
100
+
101
+
102
+ class TilV2FeedbackResult(BaseModel):
103
+ til: str = Field(
104
+ description="TIL as exactly captured by the user without any modifications.")
105
+ insightful_categorization: str = Field(
106
+ description="TIL categorization as High/Medium/Low based on correctness on the insightful metric.")
107
+ insightful_reason: str = Field(
108
+ description="Reason for your assessment in one or two sentences on insightful metric for the user.")
109
+ factuality_categorization: str = Field(
110
+ description="TIL categorization as High/Medium/Low based on correctness on the factuality metric.")
111
+ factuality_reason: str = Field(
112
+ description="Reason for your assessment in one or two sentences on factuality metric for the user.")
113
+
114
+
115
+ class TilV2FeedbackResults(BaseModel):
116
+ tils: List[TilV2FeedbackResult]
117
+
118
+
119
+ class TilV2FinalFeedback(BaseModel):
120
+ til: str
121
+ feedback: str
122
+ feedback_criteria: Optional[str] = None
123
+ reason: Optional[str] = None
124
+ suggestion: Optional[str] = None
125
+
126
+
127
+ class TilV2FeedbackResponse(BaseModel):
128
+ run_id: UUID4
129
+ feedback: List[TilV2FinalFeedback]