setup / crew /til.py
AnanthulaShravya's picture
Rename til.py to crew/til.py
d0b9fb3 verified
from langchain import callbacks
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools.tavily_search import TavilyAnswer
from langchain_core.messages import SystemMessage
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field, UUID4
from typing import List, Optional
import os
import pprint
class TilCrew:
def kickoff(self, inputs={}):
print("Human Message:")
pprint.pp(inputs)
self.content = inputs["content"]
# self._gather_facts()
self._gather_feedback()
return self._final_call_on_feedback()
def _final_call_on_feedback(self):
final_results = []
for feedback in self.feedback_results:
print("Final analysis of:")
pprint.pp(feedback)
result = {
"til": feedback.get('til', ""),
"feedback": "not_ok",
}
if feedback["factuality_categorization"] != 'High':
result["feedback_criteria"] = "factuality_feedback"
result["reason"] = feedback["factuality_reason"]
final_results = final_results + [result]
continue
if feedback["insightful_categorization"] != 'High':
result["feedback_criteria"] = "insightful_feedback"
result["reason"] = feedback["insightful_reason"]
final_results = final_results + [result]
continue
if feedback["simplicity_categorization"] == 'Low':
result["feedback_criteria"] = "simplicity_feedback"
result["reason"] = feedback["simplicity_reason"]
result["suggestion"] = feedback["final_suggestion"]
final_results = final_results + [result]
continue
if feedback["grammatical_categorization"] == 'Low':
result["feedback_criteria"] = "grammatical_feedback"
result["reason"] = feedback["grammatical_reason"]
result["suggestion"] = feedback["final_suggestion"]
final_results = final_results + [result]
continue
result["feedback"] = "ok"
final_results = final_results + [result]
response = {"feedback": final_results, "run_id": self.run_id }
print("Final Results:")
pprint.pp(response)
return response
def _gather_feedback(self):
feedback_chain = self._build_feedback_chain()
pprint.pp("Analysing the TIL.....")
with callbacks.collect_runs() as cb:
self.feedback_results = feedback_chain.invoke({"til_content": self.content})['tils']
self.run_id = cb.traced_runs[0].id
print("Run ID: ", self.run_id)
print("Feedback: ")
pprint.pp(self.feedback_results)
# Deprecated: Not using this as we are getting similar results by using or without using this
def _gather_facts(self):
facts_prompt = PromptTemplate.from_template("What are the facts on the topics mentioned the following user's TILs: {content}")
tools = [TavilyAnswer()]
llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.2)
prompt = hub.pull("hwchase17/react")
agent = create_react_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
self.facts = agent_executor.invoke({"input": facts_prompt.format(content=self.content)})['output']
print("Gathered Facts: ")
pprint.pp(self.facts)
def _build_feedback_chain(self):
feedback_parser = JsonOutputParser(pydantic_object=TilFeedbackResults)
feedback_prompt = ChatPromptTemplate.from_messages([
SystemMessage(
"You are a 'Personal TIL Reviewer' who works in a Product Engineering Services company. "
"You are an expert in writing TILs which are Insightful, Factually correct, Easy to read and grammatically correct."
"Your goal is to review user's TILs and categorize their correctness as High, Medium, or Low based on the following metrics:"
"1. Is the TIL insightful?"
"2. Is the TIL factually correct and accurate?"
"3. Is the TIL written in simple english?"
"4. Is the TIL grammatically correct?\n\n"
"The criteria to use for assessing if they are insightful or not are:\n"
"* They TIL shouldn't just be a outright statement, it should contain even the reason on why the statement is true."
"* It should showcase the understanding of the user on the subject.\n\n"
"The criteria to use for assessing if they are factual or not are:\n"
"* They are related to facts."
"* You are able to find a source which agrees to the fact from reputable websites.\n\n"
"Give reason for your assessment in one or two sentences for each metric and And also rewrite the TIL if you were given the option to write it. "
"Evaluate each TIL in the context of all the user's TILs."
f"Formatting Instructions: {feedback_parser.get_format_instructions()}"
),
HumanMessagePromptTemplate.from_template("{til_content}")
])
print("Prompt: ")
pprint.pp(feedback_prompt, width=80)
llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.2)
analysis_chain = (feedback_prompt | llm | feedback_parser).with_config({
"tags": ["til"], "run_name": "Analysing TIL",
"metadata" : {
"versoin": "v1.0.0",
"growth_activity": "til",
"env": os.environ["ENV"],
"model": os.environ["OPENAI_MODEL"],
}
})
return analysis_chain
class TilFeedbackResult(BaseModel):
til: str = Field(description="TIL as exactly captured by the user without any modifications.")
insightful_categorization: str = Field(
description="TIL categorization as High/Medium/Low based on correctness on the insightful metric.")
insightful_reason: str = Field(description="Reason for your assessment in one or two sentences on insightful metric for the user.")
factuality_categorization: str = Field(
description="TIL categorization as High/Medium/Low based on correctness on the factuality metric.")
factuality_reason: str = Field(description="Reason for your assessment in one or two sentences on factuality metric for the user.")
simplicity_categorization: str = Field(
description="TIL categorization as High/Medium/Low based on correctness on the simplicity metric.")
simplicity_reason: str = Field(description="Reason for your assessment in one or two sentences on simplicity metric for the user.")
grammatical_categorization: str = Field(
description="TIL categorization as High/Medium/Low based on correctness on the grammatical metric.")
grammatical_reason: str = Field(description="Reason for your assessment in one or two sentences on grammatical metric for the user.")
final_suggestion: str = Field(
description="Rewrite the TIL if you were given the option to write it which should score High on all the metrics.")
class TilFeedbackResults(BaseModel):
tils: List[TilFeedbackResult]
class TilFinalFeedback(BaseModel):
til: str
feedback: str
feedback_criteria: Optional[str] = None
reason: Optional[str] = None
suggestion: Optional[str] = None
class TilFeedbackResponse(BaseModel):
run_id: UUID4
feedback: List[TilFinalFeedback]