theRealNG commited on
Commit
0413dd6
·
1 Parent(s): bb6a634

workflows(course_learn): Return run_id

Browse files
endpoints.py CHANGED
@@ -1,15 +1,15 @@
 
 
 
 
 
 
 
 
 
1
  from dotenv import load_dotenv
2
  load_dotenv()
3
 
4
- from .workflows.courses.expectation_revision import ExpectationRevision, Inputs as ExpectationRevisionInputs
5
- from .workflows.courses.suggest_expectations import SuggestExpectations, Inputs as SuggestExpectationsInputs, Expectations, Expectation
6
- from .workflows.til import TilCrew, TilFeedbackResponse
7
- from .workflows.utils.feedback import Feedback
8
- from fastapi import FastAPI, Query
9
- from fastapi.middleware.cors import CORSMiddleware
10
- from pydantic import UUID4, BaseModel
11
- from typing import List, Optional
12
- import uvicorn
13
 
14
  description = """
15
  API helps you do awesome stuff. 🚀
@@ -44,6 +44,7 @@ app.add_middleware(
44
  allow_headers=["*"],
45
  )
46
 
 
47
  @app.post("/til_feedback", tags=["til_feedback"])
48
  async def til_feedback_kickoff(content: List[str]) -> TilFeedbackResponse:
49
  separator = "\n* "
@@ -52,15 +53,17 @@ async def til_feedback_kickoff(content: List[str]) -> TilFeedbackResponse:
52
  result = TilCrew().kickoff(inputs)
53
  return result
54
 
 
55
  @app.post("/til_feedback/{run_id}/feedback", tags=["til_feedback"])
56
  async def capture_feedback(run_id: UUID4, feedback: Feedback) -> str:
57
- print("Helful Score: ", feedback.helpful_score)
58
  print("Feedback On: ", feedback.feedback_on)
59
- TilCrew.post_feedback(run_id=run_id, feedback=feedback)
60
  return "ok"
61
 
 
62
  @app.post("/course_learn/suggest_expectations", tags=["course_learn"])
63
- async def course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -> Expectations:
64
  print("Inputs: ", inputs)
65
  result = SuggestExpectations().kickoff(inputs={
66
  "course": inputs.course,
@@ -70,8 +73,17 @@ async def course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -
70
  })
71
  return result
72
 
 
 
 
 
 
 
 
 
 
73
  @app.post("/course_learn/expectation_revision", tags=["course_learn"])
74
- async def course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -> Expectation:
75
  print("Inputs: ", inputs)
76
  result = ExpectationRevision().kickoff(inputs={
77
  "expectation": inputs.expectation,
@@ -80,12 +92,19 @@ async def course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -
80
  })
81
  return result
82
 
 
 
 
 
 
 
 
 
 
83
  @app.get("/healthcheck")
84
  async def read_root():
85
- return {"status": "ok"}
86
 
87
 
88
  if __name__ == "__main__":
89
  uvicorn.run(app, host="127.0.0.1", port=8080)
90
-
91
-
 
1
+ import uvicorn
2
+ from typing import List, Optional
3
+ from pydantic import UUID4, BaseModel
4
+ from fastapi.middleware.cors import CORSMiddleware
5
+ from fastapi import FastAPI, Query
6
+ from .workflows.utils.feedback import Feedback, post_feedback
7
+ from .workflows.til import TilCrew, TilFeedbackResponse
8
+ from .workflows.courses.suggest_expectations import SuggestExpectations, Inputs as SuggestExpectationsInputs, Expectation, Response as SuggestExpectationsResponse
9
+ from .workflows.courses.expectation_revision import ExpectationRevision, Inputs as ExpectationRevisionInputs, Response as ExpectationRevisionResponse
10
  from dotenv import load_dotenv
11
  load_dotenv()
12
 
 
 
 
 
 
 
 
 
 
13
 
14
  description = """
15
  API helps you do awesome stuff. 🚀
 
44
  allow_headers=["*"],
45
  )
46
 
47
+
48
  @app.post("/til_feedback", tags=["til_feedback"])
49
  async def til_feedback_kickoff(content: List[str]) -> TilFeedbackResponse:
50
  separator = "\n* "
 
53
  result = TilCrew().kickoff(inputs)
54
  return result
55
 
56
+
57
  @app.post("/til_feedback/{run_id}/feedback", tags=["til_feedback"])
58
  async def capture_feedback(run_id: UUID4, feedback: Feedback) -> str:
59
+ print("Metric Type: ", feedback.metric_type)
60
  print("Feedback On: ", feedback.feedback_on)
61
+ post_feedback(run_id=run_id, feedback=feedback)
62
  return "ok"
63
 
64
+
65
  @app.post("/course_learn/suggest_expectations", tags=["course_learn"])
66
+ async def course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -> SuggestExpectationsResponse:
67
  print("Inputs: ", inputs)
68
  result = SuggestExpectations().kickoff(inputs={
69
  "course": inputs.course,
 
73
  })
74
  return result
75
 
76
+
77
+ @app.post("/course_learn/suggest_expectations/{run_id}/feedback", tags=["course_learn"])
78
+ async def capture_suggest_expectations_feedback(run_id: UUID4, feedback: Feedback) -> str:
79
+ print("Helful Score: ", feedback.metric_type)
80
+ print("Feedback On: ", feedback.feedback_on)
81
+ post_feedback(run_id=run_id, feedback=feedback)
82
+ return "ok"
83
+
84
+
85
  @app.post("/course_learn/expectation_revision", tags=["course_learn"])
86
+ async def course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse:
87
  print("Inputs: ", inputs)
88
  result = ExpectationRevision().kickoff(inputs={
89
  "expectation": inputs.expectation,
 
92
  })
93
  return result
94
 
95
+
96
+ @app.post("/course_learn/expectation_revision/{run_id}/feedback", tags=["course_learn"])
97
+ async def capture_expectation_revision_feedback(run_id: UUID4, feedback: Feedback) -> str:
98
+ print("Helful Score: ", feedback.metric_type)
99
+ print("Feedback On: ", feedback.feedback_on)
100
+ post_feedback(run_id=run_id, feedback=feedback)
101
+ return "ok"
102
+
103
+
104
  @app.get("/healthcheck")
105
  async def read_root():
106
+ return {"status": "ok"}
107
 
108
 
109
  if __name__ == "__main__":
110
  uvicorn.run(app, host="127.0.0.1", port=8080)
 
 
workflows/courses/expectation_revision.py CHANGED
@@ -1,9 +1,8 @@
1
- from langchain import hub
2
  from langchain_core.output_parsers import JsonOutputParser
3
  from langchain_openai import ChatOpenAI
4
- from pydantic import BaseModel
5
  from .suggest_expectations import Expectation
6
- from typing import List, Optional
7
  import os
8
 
9
  class Inputs(BaseModel):
@@ -11,12 +10,22 @@ class Inputs(BaseModel):
11
  check_question: str
12
  request: str
13
 
 
 
 
 
 
14
  class ExpectationRevision:
15
  def kickoff(self, inputs={}):
16
  self.learning_outcome = inputs["expectation"]
17
  self.check_question = inputs["check_question"]
18
  self.request = inputs["request"]
19
- return self._get_suggestion()
 
 
 
 
 
20
 
21
  def _get_suggestion(self):
22
  parser = JsonOutputParser(pydantic_object=Expectation)
@@ -32,10 +41,12 @@ class ExpectationRevision:
32
  }
33
  })
34
 
35
- response = chain.invoke({
36
- "learning_outcome": self.learning_outcome, "check_question": self.check_question, "request": self.request,
37
- "format_instructions": parser.get_format_instructions()
38
- })
 
 
39
 
40
  return response
41
 
 
1
+ from langchain import hub, callbacks
2
  from langchain_core.output_parsers import JsonOutputParser
3
  from langchain_openai import ChatOpenAI
4
+ from pydantic import BaseModel, UUID4
5
  from .suggest_expectations import Expectation
 
6
  import os
7
 
8
  class Inputs(BaseModel):
 
10
  check_question: str
11
  request: str
12
 
13
+ class Response(BaseModel):
14
+ run_id: UUID4
15
+ expectation: str
16
+ check_question: str
17
+
18
  class ExpectationRevision:
19
  def kickoff(self, inputs={}):
20
  self.learning_outcome = inputs["expectation"]
21
  self.check_question = inputs["check_question"]
22
  self.request = inputs["request"]
23
+ llm_response = self._get_suggestion()
24
+ return {
25
+ "run_id": self.run_id,
26
+ "expectation": llm_response["expectation"],
27
+ "check_question": llm_response["check_question"]
28
+ }
29
 
30
  def _get_suggestion(self):
31
  parser = JsonOutputParser(pydantic_object=Expectation)
 
41
  }
42
  })
43
 
44
+ with callbacks.collect_runs() as cb:
45
+ response = chain.invoke({
46
+ "learning_outcome": self.learning_outcome, "check_question": self.check_question, "request": self.request,
47
+ "format_instructions": parser.get_format_instructions()
48
+ })
49
+ self.run_id = cb.traced_runs[0].id
50
 
51
  return response
52
 
workflows/courses/suggest_expectations.py CHANGED
@@ -1,7 +1,7 @@
1
- from langchain import hub
2
  from langchain_core.output_parsers import JsonOutputParser
3
  from langchain_openai import ChatOpenAI
4
- from pydantic import BaseModel, Field
5
  from typing import List
6
  from pydantic import BaseModel
7
  from typing import List, Optional
@@ -19,6 +19,11 @@ class Expectations(BaseModel):
19
  expectations: List[Expectation]
20
 
21
 
 
 
 
 
 
22
  class Inputs(BaseModel):
23
  course: str
24
  module: str
@@ -32,7 +37,11 @@ class SuggestExpectations:
32
  self.module = inputs["module"]
33
  self.existing_expectations = inputs["existing_expectations"]
34
  self.tasks = inputs["tasks"]
35
- return self._get_suggestions()
 
 
 
 
36
 
37
  def _get_suggestions(self):
38
  parser = JsonOutputParser(pydantic_object=Expectations)
@@ -62,13 +71,15 @@ class SuggestExpectations:
62
  existing_expectations_str += "\n".join(existing_expectations)
63
  existing_expectations_str += "\n```"
64
 
65
- response = chain.invoke({
66
- "course": self.course, "module": self.module, "tasks": "* " + ("\n* ".join(self.tasks)),
67
- "format_instructions": parser.get_format_instructions(),
68
- "existing_expectations": existing_expectations_str
69
- })
 
 
70
 
71
- return response
72
 
73
 
74
  # Example usage
 
1
+ from langchain import hub, callbacks
2
  from langchain_core.output_parsers import JsonOutputParser
3
  from langchain_openai import ChatOpenAI
4
+ from pydantic import BaseModel, Field, UUID4
5
  from typing import List
6
  from pydantic import BaseModel
7
  from typing import List, Optional
 
19
  expectations: List[Expectation]
20
 
21
 
22
+ class Response(BaseModel):
23
+ run_id: UUID4
24
+ expectations: List[Expectation]
25
+
26
+
27
  class Inputs(BaseModel):
28
  course: str
29
  module: str
 
37
  self.module = inputs["module"]
38
  self.existing_expectations = inputs["existing_expectations"]
39
  self.tasks = inputs["tasks"]
40
+ llm_response = self._get_suggestions()
41
+ return {
42
+ "run_id": self.run_id,
43
+ "expectations": llm_response["expectations"]
44
+ }
45
 
46
  def _get_suggestions(self):
47
  parser = JsonOutputParser(pydantic_object=Expectations)
 
71
  existing_expectations_str += "\n".join(existing_expectations)
72
  existing_expectations_str += "\n```"
73
 
74
+ with callbacks.collect_runs() as cb:
75
+ llm_response = chain.invoke({
76
+ "course": self.course, "module": self.module, "tasks": "* " + ("\n* ".join(self.tasks)),
77
+ "format_instructions": parser.get_format_instructions(),
78
+ "existing_expectations": existing_expectations_str
79
+ })
80
+ self.run_id = cb.traced_runs[0].id
81
 
82
+ return llm_response
83
 
84
 
85
  # Example usage
workflows/til.py CHANGED
@@ -1,5 +1,4 @@
1
- from langchain import callbacks
2
- from langchain import hub
3
  from langchain.agents import AgentExecutor, create_react_agent
4
  from langchain_community.tools.tavily_search import TavilyAnswer
5
  from langchain_core.messages import SystemMessage
@@ -13,6 +12,7 @@ from .utils.feedback import Feedback
13
  import os
14
  import pprint
15
 
 
16
  class TilCrew:
17
  def kickoff(self, inputs={}):
18
  print("Human Message:")
@@ -21,17 +21,6 @@ class TilCrew:
21
  self._gather_feedback()
22
  return self._final_call_on_feedback()
23
 
24
- def post_feedback(run_id: UUID4, feedback: Feedback):
25
- client = Client()
26
- client.create_feedback(
27
- str(run_id),
28
- key=feedback.metric_type,
29
- score=feedback.metric_score,
30
- source_info={"til": feedback.feedback_on},
31
- type="api",
32
- )
33
-
34
-
35
  def _final_call_on_feedback(self):
36
  final_results = []
37
  for feedback in self.feedback_results:
@@ -70,7 +59,7 @@ class TilCrew:
70
  result["feedback"] = "ok"
71
  final_results = final_results + [result]
72
 
73
- response = {"feedback": final_results, "run_id": self.run_id }
74
  print("Final Results:")
75
  pprint.pp(response)
76
  return response
@@ -79,22 +68,25 @@ class TilCrew:
79
  feedback_chain = self._build_feedback_chain()
80
  pprint.pp("Analysing the TIL.....")
81
  with callbacks.collect_runs() as cb:
82
- self.feedback_results = feedback_chain.invoke({"til_content": self.content})['tils']
83
- self.run_id = cb.traced_runs[0].id
84
- print("Run ID: ", self.run_id)
 
85
 
86
  print("Feedback: ")
87
  pprint.pp(self.feedback_results)
88
 
89
  # Deprecated: Not using this as we are getting similar results by using or without using this
90
  def _gather_facts(self):
91
- facts_prompt = PromptTemplate.from_template("What are the facts on the topics mentioned the following user's TILs: {content}")
 
92
  tools = [TavilyAnswer()]
93
  llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.2)
94
  prompt = hub.pull("hwchase17/react")
95
  agent = create_react_agent(llm, tools, prompt)
96
  agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
97
- self.facts = agent_executor.invoke({"input": facts_prompt.format(content=self.content)})['output']
 
98
  print("Gathered Facts: ")
99
  pprint.pp(self.facts)
100
 
@@ -129,7 +121,7 @@ class TilCrew:
129
  llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.2)
130
  analysis_chain = (feedback_prompt | llm | feedback_parser).with_config({
131
  "tags": ["til"], "run_name": "Analysing TIL",
132
- "metadata" : {
133
  "versoin": "v1.0.0",
134
  "growth_activity": "til",
135
  "env": os.environ["ENV"],
@@ -141,19 +133,24 @@ class TilCrew:
141
 
142
 
143
  class TilFeedbackResult(BaseModel):
144
- til: str = Field(description="TIL as exactly captured by the user without any modifications.")
 
145
  insightful_categorization: str = Field(
146
  description="TIL categorization as High/Medium/Low based on correctness on the insightful metric.")
147
- insightful_reason: str = Field(description="Reason for your assessment in one or two sentences on insightful metric for the user.")
 
148
  factuality_categorization: str = Field(
149
  description="TIL categorization as High/Medium/Low based on correctness on the factuality metric.")
150
- factuality_reason: str = Field(description="Reason for your assessment in one or two sentences on factuality metric for the user.")
 
151
  simplicity_categorization: str = Field(
152
  description="TIL categorization as High/Medium/Low based on correctness on the simplicity metric.")
153
- simplicity_reason: str = Field(description="Reason for your assessment in one or two sentences on simplicity metric for the user.")
 
154
  grammatical_categorization: str = Field(
155
  description="TIL categorization as High/Medium/Low based on correctness on the grammatical metric.")
156
- grammatical_reason: str = Field(description="Reason for your assessment in one or two sentences on grammatical metric for the user.")
 
157
  final_suggestion: str = Field(
158
  description="Rewrite the TIL if you were given the option to write it which should score High on all the metrics.")
159
 
@@ -161,6 +158,7 @@ class TilFeedbackResult(BaseModel):
161
  class TilFeedbackResults(BaseModel):
162
  tils: List[TilFeedbackResult]
163
 
 
164
  class TilFinalFeedback(BaseModel):
165
  til: str
166
  feedback: str
@@ -168,6 +166,7 @@ class TilFinalFeedback(BaseModel):
168
  reason: Optional[str] = None
169
  suggestion: Optional[str] = None
170
 
 
171
  class TilFeedbackResponse(BaseModel):
172
  run_id: UUID4
173
  feedback: List[TilFinalFeedback]
 
1
+ from langchain import callbacks, hub
 
2
  from langchain.agents import AgentExecutor, create_react_agent
3
  from langchain_community.tools.tavily_search import TavilyAnswer
4
  from langchain_core.messages import SystemMessage
 
12
  import os
13
  import pprint
14
 
15
+
16
  class TilCrew:
17
  def kickoff(self, inputs={}):
18
  print("Human Message:")
 
21
  self._gather_feedback()
22
  return self._final_call_on_feedback()
23
 
 
 
 
 
 
 
 
 
 
 
 
24
  def _final_call_on_feedback(self):
25
  final_results = []
26
  for feedback in self.feedback_results:
 
59
  result["feedback"] = "ok"
60
  final_results = final_results + [result]
61
 
62
+ response = {"feedback": final_results, "run_id": self.run_id}
63
  print("Final Results:")
64
  pprint.pp(response)
65
  return response
 
68
  feedback_chain = self._build_feedback_chain()
69
  pprint.pp("Analysing the TIL.....")
70
  with callbacks.collect_runs() as cb:
71
+ self.feedback_results = feedback_chain.invoke(
72
+ {"til_content": self.content})['tils']
73
+ self.run_id = cb.traced_runs[0].id
74
+ print("Run ID: ", self.run_id)
75
 
76
  print("Feedback: ")
77
  pprint.pp(self.feedback_results)
78
 
79
  # Deprecated: Not using this as we are getting similar results by using or without using this
80
  def _gather_facts(self):
81
+ facts_prompt = PromptTemplate.from_template(
82
+ "What are the facts on the topics mentioned the following user's TILs: {content}")
83
  tools = [TavilyAnswer()]
84
  llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.2)
85
  prompt = hub.pull("hwchase17/react")
86
  agent = create_react_agent(llm, tools, prompt)
87
  agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
88
+ self.facts = agent_executor.invoke(
89
+ {"input": facts_prompt.format(content=self.content)})['output']
90
  print("Gathered Facts: ")
91
  pprint.pp(self.facts)
92
 
 
121
  llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.2)
122
  analysis_chain = (feedback_prompt | llm | feedback_parser).with_config({
123
  "tags": ["til"], "run_name": "Analysing TIL",
124
+ "metadata": {
125
  "versoin": "v1.0.0",
126
  "growth_activity": "til",
127
  "env": os.environ["ENV"],
 
133
 
134
 
135
  class TilFeedbackResult(BaseModel):
136
+ til: str = Field(
137
+ description="TIL as exactly captured by the user without any modifications.")
138
  insightful_categorization: str = Field(
139
  description="TIL categorization as High/Medium/Low based on correctness on the insightful metric.")
140
+ insightful_reason: str = Field(
141
+ description="Reason for your assessment in one or two sentences on insightful metric for the user.")
142
  factuality_categorization: str = Field(
143
  description="TIL categorization as High/Medium/Low based on correctness on the factuality metric.")
144
+ factuality_reason: str = Field(
145
+ description="Reason for your assessment in one or two sentences on factuality metric for the user.")
146
  simplicity_categorization: str = Field(
147
  description="TIL categorization as High/Medium/Low based on correctness on the simplicity metric.")
148
+ simplicity_reason: str = Field(
149
+ description="Reason for your assessment in one or two sentences on simplicity metric for the user.")
150
  grammatical_categorization: str = Field(
151
  description="TIL categorization as High/Medium/Low based on correctness on the grammatical metric.")
152
+ grammatical_reason: str = Field(
153
+ description="Reason for your assessment in one or two sentences on grammatical metric for the user.")
154
  final_suggestion: str = Field(
155
  description="Rewrite the TIL if you were given the option to write it which should score High on all the metrics.")
156
 
 
158
  class TilFeedbackResults(BaseModel):
159
  tils: List[TilFeedbackResult]
160
 
161
+
162
  class TilFinalFeedback(BaseModel):
163
  til: str
164
  feedback: str
 
166
  reason: Optional[str] = None
167
  suggestion: Optional[str] = None
168
 
169
+
170
  class TilFeedbackResponse(BaseModel):
171
  run_id: UUID4
172
  feedback: List[TilFinalFeedback]
workflows/utils/feedback.py CHANGED
@@ -1,7 +1,19 @@
1
- from pydantic import BaseModel
2
  from typing import List, Optional
 
3
 
4
  class Feedback(BaseModel):
5
  metric_type: Optional[str]
6
  metric_score: Optional[float]
7
  feedback_on: Optional[str]
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, UUID4
2
  from typing import List, Optional
3
+ from langsmith import Client
4
 
5
  class Feedback(BaseModel):
6
  metric_type: Optional[str]
7
  metric_score: Optional[float]
8
  feedback_on: Optional[str]
9
+
10
+
11
+ def post_feedback(run_id: UUID4, feedback: Feedback):
12
+ client = Client()
13
+ client.create_feedback(
14
+ str(run_id),
15
+ key=feedback.metric_type,
16
+ score=feedback.metric_score,
17
+ source_info={"content": feedback.feedback_on},
18
+ type="api",
19
+ )