theRealNG commited on
Commit
35d4946
·
1 Parent(s): e34773c

Add staging url support for existing endpoints

Browse files
endpoints.py CHANGED
@@ -1,19 +1,20 @@
1
  from dotenv import load_dotenv
2
  load_dotenv()
3
 
4
- from fastapi import FastAPI, Query
5
- from fastapi.middleware.cors import CORSMiddleware
6
- from pydantic import UUID4
7
- from typing import List
8
- from tempenv import TemporaryEnvironment
9
  from .workflows.courses.expectation_revision import ExpectationRevision, Inputs as ExpectationRevisionInputs, Response as ExpectationRevisionResponse
10
  from .workflows.courses.suggest_check_question import SuggestCheckQuestion, Inputs as SuggestCheckQuestionInputs, Response as SuggestCheckQuestionResponse
11
  from .workflows.courses.suggest_expectations import SuggestExpectations, Inputs as SuggestExpectationsInputs, Expectation, Response as SuggestExpectationsResponse
12
- from .workflows.til import TilCrew, TilFeedbackResponse
13
- from .workflows.til_v2 import TilV2, TilV2FeedbackResponse
14
  from .workflows.utils.feedback import Feedback, post_feedback
 
 
 
 
 
15
  import uvicorn
16
 
 
17
  LANGSMITH_STAGING_PROJECT = "customer_agent"
18
  LANGSMITH_PROD_PROJECT = "growthy-agents"
19
 
@@ -60,36 +61,54 @@ async def til_feedback_kickoff(content: List[str]) -> TilFeedbackResponse:
60
  return result
61
 
62
 
63
- def til_v2_logic(llm_model, langsmith_project, content) -> TilV2FeedbackResponse:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  separator = "\n* "
65
  content[0] = "* " + content[0]
66
  inputs = {"content": separator.join(content)}
67
- result = TilV2(llm_model, langsmith_project).kickoff(inputs)
68
  return result
69
 
70
 
71
  @app.post("/v2/til_feedback", tags=["til_feedback"])
72
  async def til_v2_feedback_kickoff(content: List[str]) -> TilV2FeedbackResponse:
73
- with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT}):
74
- return til_v2_logic("gpt-4o", "growthy-agents", content)
75
 
76
 
77
  @app.post("/staging/v2/til_feedback", tags=["til_feedback", "staging"])
78
- async def til_v2_feedback_kickoff(content: List[str]) -> TilV2FeedbackResponse:
79
- with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT}):
80
- return til_v2_logic("gpt-4o-mini", "customer_agent", content)
81
-
82
-
83
- @app.post("/til_feedback/{run_id}/feedback", tags=["til_feedback"])
84
- async def capture_feedback(run_id: UUID4, feedback: Feedback) -> str:
85
- print("Metric Type: ", feedback.metric_type)
86
- print("Feedback On: ", feedback.feedback_on)
87
- post_feedback(run_id=run_id, feedback=feedback)
88
- return "ok"
89
 
90
 
91
- @app.post("/course_learn/suggest_expectations", tags=["course_learn"])
92
- async def course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -> SuggestExpectationsResponse:
93
  print("Inputs: ", inputs)
94
  result = SuggestExpectations().kickoff(inputs={
95
  "course": inputs.course,
@@ -100,16 +119,38 @@ async def course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -
100
  return result
101
 
102
 
103
- @app.post("/course_learn/suggest_expectations/{run_id}/feedback", tags=["course_learn"])
104
- async def capture_suggest_expectations_feedback(run_id: UUID4, feedback: Feedback) -> str:
105
  print("Helful Score: ", feedback.metric_type)
106
  print("Feedback On: ", feedback.feedback_on)
107
  post_feedback(run_id=run_id, feedback=feedback)
108
  return "ok"
109
 
110
 
111
- @app.post("/course_learn/expectation_revision", tags=["course_learn"])
112
- async def course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  print("Inputs: ", inputs)
114
  result = ExpectationRevision().kickoff(inputs={
115
  "expectation": inputs.expectation,
@@ -119,16 +160,38 @@ async def course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -
119
  return result
120
 
121
 
122
- @app.post("/course_learn/expectation_revision/{run_id}/feedback", tags=["course_learn"])
123
- async def capture_expectation_revision_feedback(run_id: UUID4, feedback: Feedback) -> str:
 
 
 
 
 
 
 
 
 
 
 
124
  print("Helful Score: ", feedback.metric_type)
125
  print("Feedback On: ", feedback.feedback_on)
126
  post_feedback(run_id=run_id, feedback=feedback)
127
  return "ok"
128
 
129
 
130
- @app.post("/course_learn/suggest_check_question", tags=["course_learn"])
131
- async def course_learn_suggest_check_question(inputs: SuggestCheckQuestionInputs) -> SuggestCheckQuestionResponse:
 
 
 
 
 
 
 
 
 
 
 
132
  print("Inputs: ", inputs)
133
  result = SuggestCheckQuestion().kickoff(inputs={
134
  "course": inputs.course,
@@ -139,14 +202,37 @@ async def course_learn_suggest_check_question(inputs: SuggestCheckQuestionInputs
139
  return result
140
 
141
 
142
- @app.post("/course_learn/suggest_check_question/{run_id}/feedback", tags=["course_learn"])
143
- async def course_learn_suggest_check_question_feedback(run_id: UUID4, feedback: Feedback) -> str:
 
 
 
 
 
 
 
 
 
 
 
144
  print("Helful Score: ", feedback.metric_type)
145
  print("Feedback On: ", feedback.feedback_on)
146
  post_feedback(run_id=run_id, feedback=feedback)
147
  return "ok"
148
 
149
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  @app.get("/healthcheck")
151
  async def read_root():
152
  return {"status": "ok"}
 
1
  from dotenv import load_dotenv
2
  load_dotenv()
3
 
 
 
 
 
 
4
  from .workflows.courses.expectation_revision import ExpectationRevision, Inputs as ExpectationRevisionInputs, Response as ExpectationRevisionResponse
5
  from .workflows.courses.suggest_check_question import SuggestCheckQuestion, Inputs as SuggestCheckQuestionInputs, Response as SuggestCheckQuestionResponse
6
  from .workflows.courses.suggest_expectations import SuggestExpectations, Inputs as SuggestExpectationsInputs, Expectation, Response as SuggestExpectationsResponse
7
+ from .workflows.til.analyse_til import TilCrew, TilFeedbackResponse
8
+ from .workflows.til.analyse_til_v2 import AnalyseTilV2, TilV2FeedbackResponse
9
  from .workflows.utils.feedback import Feedback, post_feedback
10
+ from fastapi import FastAPI, Query
11
+ from fastapi.middleware.cors import CORSMiddleware
12
+ from pydantic import UUID4
13
+ from tempenv import TemporaryEnvironment
14
+ from typing import List
15
  import uvicorn
16
 
17
+
18
  LANGSMITH_STAGING_PROJECT = "customer_agent"
19
  LANGSMITH_PROD_PROJECT = "growthy-agents"
20
 
 
61
  return result
62
 
63
 
64
+ @app.post("/til_feedback/{run_id}/feedback", tags=["til_feedback"])
65
+ async def capture_feedback(run_id: UUID4, feedback: Feedback) -> str:
66
+ print("Metric Type: ", feedback.metric_type)
67
+ print("Feedback On: ", feedback.feedback_on)
68
+ post_feedback(run_id=run_id, feedback=feedback)
69
+ return "ok"
70
+
71
+
72
+ @app.post("/staging/til_feedback", tags=["til_feedback", "staging"])
73
+ async def staging_til_feedback_kickoff(content: List[str]) -> TilFeedbackResponse:
74
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
75
+ separator = "\n* "
76
+ content[0] = "* " + content[0]
77
+ inputs = {"content": separator.join(content)}
78
+ result = TilCrew().kickoff(inputs)
79
+ return result
80
+
81
+
82
+ @app.post("/staging/til_feedback/{run_id}/feedback", tags=["til_feedback", "staging"])
83
+ async def staging_capture_feedback(run_id: UUID4, feedback: Feedback) -> str:
84
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
85
+ print("Metric Type: ", feedback.metric_type)
86
+ print("Feedback On: ", feedback.feedback_on)
87
+ post_feedback(run_id=run_id, feedback=feedback)
88
+ return "ok"
89
+
90
+
91
+ def til_v2_logic(content) -> TilV2FeedbackResponse:
92
  separator = "\n* "
93
  content[0] = "* " + content[0]
94
  inputs = {"content": separator.join(content)}
95
+ result = AnalyseTilV2().kickoff(inputs)
96
  return result
97
 
98
 
99
  @app.post("/v2/til_feedback", tags=["til_feedback"])
100
  async def til_v2_feedback_kickoff(content: List[str]) -> TilV2FeedbackResponse:
101
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
102
+ return til_v2_logic(content)
103
 
104
 
105
  @app.post("/staging/v2/til_feedback", tags=["til_feedback", "staging"])
106
+ async def staging_til_v2_feedback_kickoff(content: List[str]) -> TilV2FeedbackResponse:
107
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
108
+ return til_v2_logic(content)
 
 
 
 
 
 
 
 
109
 
110
 
111
+ def course_learn_suggest_expectations_logic(inputs) -> SuggestExpectationsResponse:
 
112
  print("Inputs: ", inputs)
113
  result = SuggestExpectations().kickoff(inputs={
114
  "course": inputs.course,
 
119
  return result
120
 
121
 
122
+ def course_learn_suggest_expectations_feedback_logic(run_id: UUID4, feedback: Feedback) -> str:
 
123
  print("Helful Score: ", feedback.metric_type)
124
  print("Feedback On: ", feedback.feedback_on)
125
  post_feedback(run_id=run_id, feedback=feedback)
126
  return "ok"
127
 
128
 
129
+ @app.post("/course_learn/suggest_expectations", tags=["course_learn"])
130
+ async def course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -> SuggestExpectationsResponse:
131
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
132
+ return course_learn_suggest_expectations_logic(inputs)
133
+
134
+
135
+ @app.post("/staging/course_learn/suggest_expectations", tags=["course_learn", "staging"])
136
+ async def staging_course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -> SuggestExpectationsResponse:
137
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
138
+ return course_learn_suggest_expectations_logic(inputs)
139
+
140
+
141
+ @app.post("/course_learn/suggest_expectations/{run_id}/feedback", tags=["course_learn"])
142
+ async def capture_suggest_expectations_feedback(run_id: UUID4, feedback: Feedback) -> str:
143
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
144
+ return course_learn_suggest_expectations_feedback_logic(run_id, feedback)
145
+
146
+
147
+ @app.post("/staging/course_learn/suggest_expectations/{run_id}/feedback", tags=["course_learn", "staging"])
148
+ async def staging_capture_suggest_expectations_feedback(run_id: UUID4, feedback: Feedback) -> str:
149
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
150
+ return course_learn_suggest_expectations_feedback_logic(run_id, feedback)
151
+
152
+
153
+ def course_learn_expectation_revision_logic(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse:
154
  print("Inputs: ", inputs)
155
  result = ExpectationRevision().kickoff(inputs={
156
  "expectation": inputs.expectation,
 
160
  return result
161
 
162
 
163
+ @app.post("/course_learn/expectation_revision", tags=["course_learn"])
164
+ async def course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse:
165
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
166
+ return course_learn_expectation_revision_logic(inputs)
167
+
168
+
169
+ @app.post("/staging/course_learn/expectation_revision", tags=["course_learn", "staging"])
170
+ async def staging_course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse:
171
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
172
+ return course_learn_expectation_revision_logic(inputs)
173
+
174
+
175
+ def capture_expectation_revision_feedback_logic(run_id: UUID4, feedback: Feedback) -> str:
176
  print("Helful Score: ", feedback.metric_type)
177
  print("Feedback On: ", feedback.feedback_on)
178
  post_feedback(run_id=run_id, feedback=feedback)
179
  return "ok"
180
 
181
 
182
+ @app.post("/course_learn/expectation_revision/{run_id}/feedback", tags=["course_learn"])
183
+ async def capture_expectation_revision_feedback(run_id: UUID4, feedback: Feedback) -> str:
184
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
185
+ return capture_expectation_revision_feedback_logic(run_id, feedback)
186
+
187
+
188
+ @app.post("/staging/course_learn/expectation_revision/{run_id}/feedback", tags=["course_learn", "staging"])
189
+ async def staging_capture_expectation_revision_feedback(run_id: UUID4, feedback: Feedback) -> str:
190
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
191
+ return capture_expectation_revision_feedback_logic(run_id, feedback)
192
+
193
+
194
+ def course_learn_suggest_check_question_logic(inputs: SuggestCheckQuestionInputs) -> SuggestCheckQuestionResponse:
195
  print("Inputs: ", inputs)
196
  result = SuggestCheckQuestion().kickoff(inputs={
197
  "course": inputs.course,
 
202
  return result
203
 
204
 
205
+ @app.post("/course_learn/suggest_check_question", tags=["course_learn"])
206
+ async def course_learn_suggest_check_question(inputs: SuggestCheckQuestionInputs) -> SuggestCheckQuestionResponse:
207
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
208
+ return course_learn_suggest_check_question_logic(inputs)
209
+
210
+
211
+ @app.post("/staging/course_learn/suggest_check_question", tags=["course_learn", "staging"])
212
+ async def staging_course_learn_suggest_check_question(inputs: SuggestCheckQuestionInputs) -> SuggestCheckQuestionResponse:
213
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
214
+ return course_learn_suggest_check_question_logic(inputs)
215
+
216
+
217
+ def course_learn_suggest_check_question_feedback_logic(run_id: UUID4, feedback: Feedback) -> str:
218
  print("Helful Score: ", feedback.metric_type)
219
  print("Feedback On: ", feedback.feedback_on)
220
  post_feedback(run_id=run_id, feedback=feedback)
221
  return "ok"
222
 
223
 
224
+ @app.post("/course_learn/suggest_check_question/{run_id}/feedback", tags=["course_learn"])
225
+ async def course_learn_suggest_check_question_feedback(run_id: UUID4, feedback: Feedback) -> str:
226
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
227
+ return course_learn_suggest_check_question_feedback_logic(run_id, feedback)
228
+
229
+
230
+ @app.post("/staging/course_learn/suggest_check_question/{run_id}/feedback", tags=["course_learn", "staging"])
231
+ async def staging_course_learn_suggest_check_question_feedback(run_id: UUID4, feedback: Feedback) -> str:
232
+ with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
233
+ return course_learn_suggest_check_question_feedback_logic(run_id, feedback)
234
+
235
+
236
  @app.get("/healthcheck")
237
  async def read_root():
238
  return {"status": "ok"}
requirements.txt CHANGED
@@ -18,6 +18,7 @@ python-dotenv
18
  semanticscholar
19
  streamlit
20
  streamlit-extras
 
21
  tavily-python
22
  tempenv
23
  unstructured
 
18
  semanticscholar
19
  streamlit
20
  streamlit-extras
21
+ streamlit_router
22
  tavily-python
23
  tempenv
24
  unstructured
ui/til_feedback.py CHANGED
@@ -1,8 +1,7 @@
1
  import streamlit as st
2
  from dotenv import load_dotenv
3
- from workflows.til import TilCrew
4
  from streamlit_extras.capture import stdout
5
- from langsmith import Client
6
  from workflows.utils.feedback import Feedback
7
 
8
  load_dotenv()
@@ -21,7 +20,7 @@ def feedback_main():
21
  [data-testid="stToolbar"]{
22
  right: 2rem;
23
  }
24
-
25
 
26
  </style>
27
  '''
@@ -86,7 +85,7 @@ def feedback_main():
86
  st.markdown(f"**Reason:** {result['reason']}")
87
  if result.get('suggestion') is not None:
88
  st.markdown(f"**Suggestion:** {result['suggestion']}")
89
-
90
  feedback_key = result['til'].replace(' ', '_')
91
  feedback_given_key = f"{feedback_key}_feedback_given"
92
 
@@ -111,14 +110,14 @@ def give_feedback(feedback_key, is_helpful):
111
  feedback_data = Feedback(
112
  metric_type=metric_type,
113
  metric_score=metric_score,
114
- feedback_on=feedback_key.replace('_', ' ').title()
115
  )
116
  try:
117
  TilCrew.post_feedback(run_id, feedback_data)
118
  st.success("Feedback submitted successfully!")
119
  except Exception as e:
120
  st.error(f"Failed to submit feedback: {e}")
121
-
122
  st.session_state[f"{feedback_key}_feedback_given"] = True
123
 
124
  def clear_feedback_state(results):
@@ -128,4 +127,4 @@ def clear_feedback_state(results):
128
 
129
 
130
  if __name__ == "__main__":
131
- feedback_main()
 
1
  import streamlit as st
2
  from dotenv import load_dotenv
3
+ from workflows.til.analyse_til import TilCrew
4
  from streamlit_extras.capture import stdout
 
5
  from workflows.utils.feedback import Feedback
6
 
7
  load_dotenv()
 
20
  [data-testid="stToolbar"]{
21
  right: 2rem;
22
  }
23
+
24
 
25
  </style>
26
  '''
 
85
  st.markdown(f"**Reason:** {result['reason']}")
86
  if result.get('suggestion') is not None:
87
  st.markdown(f"**Suggestion:** {result['suggestion']}")
88
+
89
  feedback_key = result['til'].replace(' ', '_')
90
  feedback_given_key = f"{feedback_key}_feedback_given"
91
 
 
110
  feedback_data = Feedback(
111
  metric_type=metric_type,
112
  metric_score=metric_score,
113
+ feedback_on=feedback_key.replace('_', ' ').title()
114
  )
115
  try:
116
  TilCrew.post_feedback(run_id, feedback_data)
117
  st.success("Feedback submitted successfully!")
118
  except Exception as e:
119
  st.error(f"Failed to submit feedback: {e}")
120
+
121
  st.session_state[f"{feedback_key}_feedback_given"] = True
122
 
123
  def clear_feedback_state(results):
 
127
 
128
 
129
  if __name__ == "__main__":
130
+ feedback_main()
workflows/{til.py → til/analyse_til.py} RENAMED
@@ -5,10 +5,8 @@ from langchain_core.messages import SystemMessage
5
  from langchain_core.output_parsers import JsonOutputParser
6
  from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate
7
  from langchain_openai import ChatOpenAI
8
- from langsmith import Client
9
  from pydantic import BaseModel, Field, UUID4
10
  from typing import List, Optional
11
- from .utils.feedback import Feedback
12
  import os
13
  import pprint
14
 
 
5
  from langchain_core.output_parsers import JsonOutputParser
6
  from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate
7
  from langchain_openai import ChatOpenAI
 
8
  from pydantic import BaseModel, Field, UUID4
9
  from typing import List, Optional
 
10
  import os
11
  import pprint
12
 
workflows/{til_v2.py → til/analyse_til_v2.py} RENAMED
@@ -9,11 +9,7 @@ import os
9
  import pprint
10
 
11
 
12
- class TilV2:
13
-
14
- def __init__(self, llm_model, langsmith_project):
15
- self.llm_model = llm_model
16
- self.langsmith_project = langsmith_project
17
 
18
  def kickoff(self, inputs={}):
19
  print("Human Message:")
@@ -89,14 +85,14 @@ class TilV2:
89
  ])
90
  print("Prompt: ")
91
  pprint.pp(feedback_prompt, width=80)
92
- llm = ChatOpenAI(model=self.llm_model, temperature=0.2)
93
  analysis_chain = (feedback_prompt | llm | feedback_parser).with_config({
94
  "tags": ["til"], "run_name": "Analysing TIL v2",
95
  "metadata": {
96
  "versoin": "v2.0.0",
97
  "growth_activity": "til",
98
  "env": os.environ["ENV"],
99
- "model": self.llm_model,
100
  }
101
  })
102
 
 
9
  import pprint
10
 
11
 
12
+ class AnalyseTilV2:
 
 
 
 
13
 
14
  def kickoff(self, inputs={}):
15
  print("Human Message:")
 
85
  ])
86
  print("Prompt: ")
87
  pprint.pp(feedback_prompt, width=80)
88
+ llm = ChatOpenAI(model=os.environ["OPENAI_MODEL"], temperature=0.2)
89
  analysis_chain = (feedback_prompt | llm | feedback_parser).with_config({
90
  "tags": ["til"], "run_name": "Analysing TIL v2",
91
  "metadata": {
92
  "versoin": "v2.0.0",
93
  "growth_activity": "til",
94
  "env": os.environ["ENV"],
95
+ "model": os.environ["OPENAI_MODEL"]
96
  }
97
  })
98