ai_workflows / workflows /til /analyse_til_v2.py
theRealNG's picture
workflows(tilv2): migrate tilv2 prompt to langsmith
a78cc33
raw
history blame
3.85 kB
from langchain import callbacks, hub
from langchain_core.messages import SystemMessage
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field, UUID4
from typing import List, Optional
import os
import pprint
class AnalyseTilV2:
def kickoff(self, inputs={}):
print("Human Message:")
pprint.pp(inputs)
self.content = inputs["content"]
self._gather_feedback()
return self._final_call_on_feedback()
def _final_call_on_feedback(self):
final_results = []
for feedback in self.feedback_results:
print("Final analysis of:")
pprint.pp(feedback)
result = {
"til": feedback.get('til', ""),
"feedback": "not_ok",
}
if feedback["factuality_categorization"] != 'High':
result["reason"] = feedback["assessment_reason"]
final_results = final_results + [result]
continue
if feedback["insightful_categorization"] != 'High':
result["reason"] = feedback["assessment_reason"]
final_results = final_results + [result]
continue
result["feedback"] = "ok"
final_results = final_results + [result]
response = {"feedback": final_results, "run_id": self.run_id}
print("Final Results:")
pprint.pp(response)
return response
def _gather_feedback(self):
feedback_chain = self._build_feedback_chain()
feedback_parser = JsonOutputParser(pydantic_object=TilV2FeedbackResults)
pprint.pp("Analysing the TIL.....")
with callbacks.collect_runs() as cb:
self.feedback_results = feedback_chain.invoke(
{"til_content": self.content, "format_instructions": feedback_parser.get_format_instructions()})['tils']
self.run_id = cb.traced_runs[0].id
print("Run ID: ", self.run_id)
print("Feedback: ")
pprint.pp(self.feedback_results)
def _build_feedback_chain(self):
feedback_parser = JsonOutputParser(pydantic_object=TilV2FeedbackResults)
feedback_prompt = hub.pull("til_analysis")
print("Prompt: ")
pprint.pp(feedback_prompt, width=80)
llm = ChatOpenAI(model=os.environ["OPENAI_MODEL"], temperature=0.2)
analysis_chain = (feedback_prompt | llm | feedback_parser).with_config({
"tags": ["til"], "run_name": "Analysing TIL v2",
"metadata": {
"versoin": "v2.0.0",
"growth_activity": "til",
"env": os.environ["ENV"],
"model": os.environ["OPENAI_MODEL"]
}
})
return analysis_chain
class TilV2FeedbackResult(BaseModel):
til: str = Field(
description="TIL as exactly captured by the user without any modifications.")
insightful_categorization: str = Field(
description="TIL categorization as High/Medium/Low based on correctness on the insightful metric.")
factuality_categorization: str = Field(
description="TIL categorization as High/Medium/Low based on correctness on the factuality metric.")
assessment_reason: str = Field(
description="Reason for your assessment in one or two sentences on factuality metric and insightful metric for the user if they are not High. The assesment should be from FPV")
class TilV2FeedbackResults(BaseModel):
tils: List[TilV2FeedbackResult]
class TilV2FinalFeedback(BaseModel):
til: str
feedback: str
reason: Optional[str] = None
class TilV2FeedbackResponse(BaseModel):
run_id: UUID4
feedback: List[TilV2FinalFeedback]