theRealNG commited on
Commit
ec4a7eb
·
1 Parent(s): e2ff255

pretty print json objects

Browse files
Files changed (2) hide show
  1. crew/til.py +6 -9
  2. test.py +2 -1
crew/til.py CHANGED
@@ -3,6 +3,7 @@ from langchain_core.messages import SystemMessage
3
  from pydantic import BaseModel, Field
4
  from langchain_core.output_parsers import JsonOutputParser
5
  from langchain_openai import ChatOpenAI
 
6
 
7
  HIGH_IMPACT_THRESHOLD = 8
8
  LOW_IMPACT_THRESHOLD = 7
@@ -20,7 +21,6 @@ class TilCrew:
20
  "feedback": "not_ok",
21
  "feedback_criteria": "factuality_feedback",
22
  "reason": self.feedback["factuality_reason"],
23
- "suggestion": self.feedback["final_suggestion"],
24
  }
25
 
26
  if self.feedback["insightful_score"] < HIGH_IMPACT_THRESHOLD:
@@ -28,7 +28,6 @@ class TilCrew:
28
  "feedback": "not_ok",
29
  "feedback_criteria": "insightful_feedback",
30
  "reason": self.feedback["insightful_reason"],
31
- "suggestion": self.feedback["final_suggestion"],
32
  }
33
 
34
  if self.feedback["simplicity_score"] < LOW_IMPACT_THRESHOLD:
@@ -53,9 +52,10 @@ class TilCrew:
53
 
54
  def _gather_feedback(self):
55
  feedback_chain = self._build_feedback_chain()
56
- print("Analysing the TIL.....")
57
  self.feedback = feedback_chain.invoke({"til_content": self.content})
58
- print("Feedback: ", self.feedback)
 
59
 
60
  def _build_feedback_chain(self):
61
  feedback_parser = JsonOutputParser(pydantic_object=TilFeedbackResult)
@@ -73,7 +73,8 @@ class TilCrew:
73
  ),
74
  HumanMessagePromptTemplate.from_template("{til_content}")
75
  ])
76
- print("Prompt: ", feedback_prompt)
 
77
  llm = ChatOpenAI(model=OPENAI_MODEL, temperature=0.2)
78
  analysis_chain = feedback_prompt | llm | feedback_parser
79
 
@@ -84,18 +85,14 @@ class TilFeedbackResult(BaseModel):
84
  insightful_score: int = Field(
85
  description="TIL score on insightful criteria")
86
  insightful_reason: str = Field(description="Reason for insightful_score")
87
- # insightful_pass: bool = Field(description="True if it passes the insightful criteria, else Fales")
88
  factuality_score: int = Field(
89
  description="TIL score on factuality criteria")
90
  factuality_reason: str = Field(description="Reason for factuality_score")
91
- # factuality_pass: bool = Field(description="True if it passes the factuality criteria, else Fales")
92
  simplicity_score: int = Field(
93
  description="TIL score on simplicity criteria")
94
  simplicity_reason: str = Field(description="Reason for simplicity_score")
95
- # simplicity_pass: bool = Field(description="True if it passes the simplicity criteria, else Fales")
96
  grammatical_score: int = Field(
97
  description="TIL score on grammatical criteria")
98
  grammatical_reason: str = Field(description="Reason for grammatical_score")
99
- # grammatical_pass: bool = Field(description="True if it passes the grammatical criteria, else Fales")
100
  final_suggestion: str = Field(
101
  description="Final suggested version of the TIL")
 
3
  from pydantic import BaseModel, Field
4
  from langchain_core.output_parsers import JsonOutputParser
5
  from langchain_openai import ChatOpenAI
6
+ import pprint
7
 
8
  HIGH_IMPACT_THRESHOLD = 8
9
  LOW_IMPACT_THRESHOLD = 7
 
21
  "feedback": "not_ok",
22
  "feedback_criteria": "factuality_feedback",
23
  "reason": self.feedback["factuality_reason"],
 
24
  }
25
 
26
  if self.feedback["insightful_score"] < HIGH_IMPACT_THRESHOLD:
 
28
  "feedback": "not_ok",
29
  "feedback_criteria": "insightful_feedback",
30
  "reason": self.feedback["insightful_reason"],
 
31
  }
32
 
33
  if self.feedback["simplicity_score"] < LOW_IMPACT_THRESHOLD:
 
52
 
53
  def _gather_feedback(self):
54
  feedback_chain = self._build_feedback_chain()
55
+ pprint.pp("Analysing the TIL.....")
56
  self.feedback = feedback_chain.invoke({"til_content": self.content})
57
+ print("Feedback: ")
58
+ pprint.pp(self.feedback)
59
 
60
  def _build_feedback_chain(self):
61
  feedback_parser = JsonOutputParser(pydantic_object=TilFeedbackResult)
 
73
  ),
74
  HumanMessagePromptTemplate.from_template("{til_content}")
75
  ])
76
+ print("Prompt: ")
77
+ pprint.pp(feedback_prompt, width=80)
78
  llm = ChatOpenAI(model=OPENAI_MODEL, temperature=0.2)
79
  analysis_chain = feedback_prompt | llm | feedback_parser
80
 
 
85
  insightful_score: int = Field(
86
  description="TIL score on insightful criteria")
87
  insightful_reason: str = Field(description="Reason for insightful_score")
 
88
  factuality_score: int = Field(
89
  description="TIL score on factuality criteria")
90
  factuality_reason: str = Field(description="Reason for factuality_score")
 
91
  simplicity_score: int = Field(
92
  description="TIL score on simplicity criteria")
93
  simplicity_reason: str = Field(description="Reason for simplicity_score")
 
94
  grammatical_score: int = Field(
95
  description="TIL score on grammatical criteria")
96
  grammatical_reason: str = Field(description="Reason for grammatical_score")
 
97
  final_suggestion: str = Field(
98
  description="Final suggested version of the TIL")
test.py CHANGED
@@ -43,7 +43,8 @@ def main():
43
  if result['feedback'] == "not_ok":
44
  st.markdown(f"**Criteria:** {result['feedback_criteria']}")
45
  st.markdown(f"**Reason:** {result['reason']}")
46
- st.markdown(f"**Suggestion:** {result['suggestion']}")
 
47
 
48
  if __name__ == "__main__":
49
  main()
 
43
  if result['feedback'] == "not_ok":
44
  st.markdown(f"**Criteria:** {result['feedback_criteria']}")
45
  st.markdown(f"**Reason:** {result['reason']}")
46
+ if result.get('suggestion') is not None:
47
+ st.markdown(f"**Suggestion:** {result['suggestion']}")
48
 
49
  if __name__ == "__main__":
50
  main()