ai_workflows / endpoints.py
theRealNG's picture
Add staging url support for existing endpoints
35d4946
raw
history blame
10.5 kB
from dotenv import load_dotenv
load_dotenv()
from .workflows.courses.expectation_revision import ExpectationRevision, Inputs as ExpectationRevisionInputs, Response as ExpectationRevisionResponse
from .workflows.courses.suggest_check_question import SuggestCheckQuestion, Inputs as SuggestCheckQuestionInputs, Response as SuggestCheckQuestionResponse
from .workflows.courses.suggest_expectations import SuggestExpectations, Inputs as SuggestExpectationsInputs, Expectation, Response as SuggestExpectationsResponse
from .workflows.til.analyse_til import TilCrew, TilFeedbackResponse
from .workflows.til.analyse_til_v2 import AnalyseTilV2, TilV2FeedbackResponse
from .workflows.utils.feedback import Feedback, post_feedback
from fastapi import FastAPI, Query
from fastapi.middleware.cors import CORSMiddleware
from pydantic import UUID4
from tempenv import TemporaryEnvironment
from typing import List
import uvicorn
LANGSMITH_STAGING_PROJECT = "customer_agent"
LANGSMITH_PROD_PROJECT = "growthy-agents"
description = """
API helps you do awesome stuff. 🚀
"""
tags_metadata = [
{
"name": "til_feedback",
"description": "Gives the feedback on user's TIL content",
},
{
"name": "course_learn",
"description": "Workflows for course learn.",
},
]
app = FastAPI(
title="Growthy AI Worflows",
description=description,
summary="Deadpool's favorite app. Nuff said.",
version="0.0.1",
openapi_tags=tags_metadata,
docs_url="/documentation",
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.post("/til_feedback", tags=["til_feedback"])
async def til_feedback_kickoff(content: List[str]) -> TilFeedbackResponse:
separator = "\n* "
content[0] = "* " + content[0]
inputs = {"content": separator.join(content)}
result = TilCrew().kickoff(inputs)
return result
@app.post("/til_feedback/{run_id}/feedback", tags=["til_feedback"])
async def capture_feedback(run_id: UUID4, feedback: Feedback) -> str:
print("Metric Type: ", feedback.metric_type)
print("Feedback On: ", feedback.feedback_on)
post_feedback(run_id=run_id, feedback=feedback)
return "ok"
@app.post("/staging/til_feedback", tags=["til_feedback", "staging"])
async def staging_til_feedback_kickoff(content: List[str]) -> TilFeedbackResponse:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
separator = "\n* "
content[0] = "* " + content[0]
inputs = {"content": separator.join(content)}
result = TilCrew().kickoff(inputs)
return result
@app.post("/staging/til_feedback/{run_id}/feedback", tags=["til_feedback", "staging"])
async def staging_capture_feedback(run_id: UUID4, feedback: Feedback) -> str:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
print("Metric Type: ", feedback.metric_type)
print("Feedback On: ", feedback.feedback_on)
post_feedback(run_id=run_id, feedback=feedback)
return "ok"
def til_v2_logic(content) -> TilV2FeedbackResponse:
separator = "\n* "
content[0] = "* " + content[0]
inputs = {"content": separator.join(content)}
result = AnalyseTilV2().kickoff(inputs)
return result
@app.post("/v2/til_feedback", tags=["til_feedback"])
async def til_v2_feedback_kickoff(content: List[str]) -> TilV2FeedbackResponse:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
return til_v2_logic(content)
@app.post("/staging/v2/til_feedback", tags=["til_feedback", "staging"])
async def staging_til_v2_feedback_kickoff(content: List[str]) -> TilV2FeedbackResponse:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
return til_v2_logic(content)
def course_learn_suggest_expectations_logic(inputs) -> SuggestExpectationsResponse:
print("Inputs: ", inputs)
result = SuggestExpectations().kickoff(inputs={
"course": inputs.course,
"module": inputs.module,
"tasks": inputs.tasks,
"existing_expectations": inputs.existing_expectations,
})
return result
def course_learn_suggest_expectations_feedback_logic(run_id: UUID4, feedback: Feedback) -> str:
print("Helful Score: ", feedback.metric_type)
print("Feedback On: ", feedback.feedback_on)
post_feedback(run_id=run_id, feedback=feedback)
return "ok"
@app.post("/course_learn/suggest_expectations", tags=["course_learn"])
async def course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -> SuggestExpectationsResponse:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
return course_learn_suggest_expectations_logic(inputs)
@app.post("/staging/course_learn/suggest_expectations", tags=["course_learn", "staging"])
async def staging_course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -> SuggestExpectationsResponse:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
return course_learn_suggest_expectations_logic(inputs)
@app.post("/course_learn/suggest_expectations/{run_id}/feedback", tags=["course_learn"])
async def capture_suggest_expectations_feedback(run_id: UUID4, feedback: Feedback) -> str:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
return course_learn_suggest_expectations_feedback_logic(run_id, feedback)
@app.post("/staging/course_learn/suggest_expectations/{run_id}/feedback", tags=["course_learn", "staging"])
async def staging_capture_suggest_expectations_feedback(run_id: UUID4, feedback: Feedback) -> str:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
return course_learn_suggest_expectations_feedback_logic(run_id, feedback)
def course_learn_expectation_revision_logic(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse:
print("Inputs: ", inputs)
result = ExpectationRevision().kickoff(inputs={
"expectation": inputs.expectation,
"check_question": inputs.check_question,
"request": inputs.request,
})
return result
@app.post("/course_learn/expectation_revision", tags=["course_learn"])
async def course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
return course_learn_expectation_revision_logic(inputs)
@app.post("/staging/course_learn/expectation_revision", tags=["course_learn", "staging"])
async def staging_course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
return course_learn_expectation_revision_logic(inputs)
def capture_expectation_revision_feedback_logic(run_id: UUID4, feedback: Feedback) -> str:
print("Helful Score: ", feedback.metric_type)
print("Feedback On: ", feedback.feedback_on)
post_feedback(run_id=run_id, feedback=feedback)
return "ok"
@app.post("/course_learn/expectation_revision/{run_id}/feedback", tags=["course_learn"])
async def capture_expectation_revision_feedback(run_id: UUID4, feedback: Feedback) -> str:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
return capture_expectation_revision_feedback_logic(run_id, feedback)
@app.post("/staging/course_learn/expectation_revision/{run_id}/feedback", tags=["course_learn", "staging"])
async def staging_capture_expectation_revision_feedback(run_id: UUID4, feedback: Feedback) -> str:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
return capture_expectation_revision_feedback_logic(run_id, feedback)
def course_learn_suggest_check_question_logic(inputs: SuggestCheckQuestionInputs) -> SuggestCheckQuestionResponse:
print("Inputs: ", inputs)
result = SuggestCheckQuestion().kickoff(inputs={
"course": inputs.course,
"module": inputs.module,
"tasks": inputs.tasks,
"expectation": inputs.expectation,
})
return result
@app.post("/course_learn/suggest_check_question", tags=["course_learn"])
async def course_learn_suggest_check_question(inputs: SuggestCheckQuestionInputs) -> SuggestCheckQuestionResponse:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
return course_learn_suggest_check_question_logic(inputs)
@app.post("/staging/course_learn/suggest_check_question", tags=["course_learn", "staging"])
async def staging_course_learn_suggest_check_question(inputs: SuggestCheckQuestionInputs) -> SuggestCheckQuestionResponse:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
return course_learn_suggest_check_question_logic(inputs)
def course_learn_suggest_check_question_feedback_logic(run_id: UUID4, feedback: Feedback) -> str:
print("Helful Score: ", feedback.metric_type)
print("Feedback On: ", feedback.feedback_on)
post_feedback(run_id=run_id, feedback=feedback)
return "ok"
@app.post("/course_learn/suggest_check_question/{run_id}/feedback", tags=["course_learn"])
async def course_learn_suggest_check_question_feedback(run_id: UUID4, feedback: Feedback) -> str:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}):
return course_learn_suggest_check_question_feedback_logic(run_id, feedback)
@app.post("/staging/course_learn/suggest_check_question/{run_id}/feedback", tags=["course_learn", "staging"])
async def staging_course_learn_suggest_check_question_feedback(run_id: UUID4, feedback: Feedback) -> str:
with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}):
return course_learn_suggest_check_question_feedback_logic(run_id, feedback)
@app.get("/healthcheck")
async def read_root():
return {"status": "ok"}
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=8080)