Spaces:
Runtime error
Runtime error
File size: 3,876 Bytes
a78cc33 e34773c 35d4946 e34773c a016754 e34773c a78cc33 e34773c a78cc33 e34773c a016754 e34773c a78cc33 e34773c a78cc33 e34773c a78cc33 e34773c 35d4946 e34773c 5dc54c6 e34773c 35d4946 e34773c a78cc33 0ad822d e34773c a016754 e34773c a016754 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 | from langchain import callbacks, hub
from langchain_core.messages import SystemMessage
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field, UUID4
from typing import List, Optional
import os
import pprint
class AnalyseTilV2:
def kickoff(self, inputs={}):
print("Human Message:")
pprint.pp(inputs)
self.content = inputs["content"]
self._gather_feedback()
return self._final_call_on_feedback()
def _final_call_on_feedback(self):
final_results = []
for feedback in self.feedback_results:
print("Final analysis of:")
pprint.pp(feedback)
result = {
"takeaway": feedback.get('til', ""),
"feedback": "not_ok",
}
if feedback["factuality_categorization"] != 'High':
result["reason"] = feedback["assessment_reason"]
final_results = final_results + [result]
continue
if feedback["insightful_categorization"] != 'High':
result["reason"] = feedback["assessment_reason"]
final_results = final_results + [result]
continue
result["feedback"] = "ok"
final_results = final_results + [result]
response = {"til": final_results, "run_id": self.run_id}
print("Final Results:")
pprint.pp(response)
return response
def _gather_feedback(self):
feedback_chain = self._build_feedback_chain()
feedback_parser = JsonOutputParser(pydantic_object=TilV2FeedbackResults)
pprint.pp("Analysing the TIL.....")
with callbacks.collect_runs() as cb:
self.feedback_results = feedback_chain.invoke(
{"til_content": self.content, "format_instructions": feedback_parser.get_format_instructions()})['tils']
self.run_id = cb.traced_runs[0].id
print("Run ID: ", self.run_id)
print("Feedback: ")
pprint.pp(self.feedback_results)
def _build_feedback_chain(self):
feedback_parser = JsonOutputParser(pydantic_object=TilV2FeedbackResults)
feedback_prompt = hub.pull("til_analysis")
print("Prompt: ")
pprint.pp(feedback_prompt, width=80)
llm = ChatOpenAI(model=os.environ["OPENAI_MODEL"], temperature=0.2)
analysis_chain = (feedback_prompt | llm | feedback_parser).with_config({
"tags": ["til"], "run_name": "Analysing TIL v2",
"metadata": {
"version": "v2.0.0",
"growth_activity": "til",
"env": os.environ["ENV"],
"model": os.environ["OPENAI_MODEL"]
}
})
return analysis_chain
class TilV2FeedbackResult(BaseModel):
til: str = Field(
description="TIL as exactly captured by the user without any modifications.")
insightful_categorization: str = Field(
description="TIL categorization as High/Medium/Low based on correctness on the insightful metric.")
factuality_categorization: str = Field(
description="TIL categorization as High/Medium/Low based on correctness on the factuality metric.")
assessment_reason: str = Field(
description="Explain your assessment in one or two sentences about the factuality and insightful metrics directly to the user, but only if they are not rated as High. Use the second-person point of view.")
class TilV2FeedbackResults(BaseModel):
tils: List[TilV2FeedbackResult]
class TilV2FinalFeedback(BaseModel):
takeaway: str
feedback: str
reason: Optional[str] = None
class TilV2FeedbackResponse(BaseModel):
run_id: UUID4
til: List[TilV2FinalFeedback]
|