Spaces:
Runtime error
Runtime error
| from dotenv import load_dotenv | |
| load_dotenv() | |
| from .workflows.courses.expectation_revision import ExpectationRevision, Inputs as ExpectationRevisionInputs, Response as ExpectationRevisionResponse | |
| from .workflows.courses.suggest_check_question import SuggestCheckQuestion, Inputs as SuggestCheckQuestionInputs, Response as SuggestCheckQuestionResponse | |
| from .workflows.courses.suggest_expectations import SuggestExpectations, Inputs as SuggestExpectationsInputs, Expectation, Response as SuggestExpectationsResponse | |
| from .workflows.til.analyse_til import TilCrew, TilFeedbackResponse | |
| from .workflows.til.analyse_til_v2 import AnalyseTilV2, TilV2FeedbackResponse | |
| from .workflows.utils.feedback import Feedback, post_feedback | |
| from fastapi import FastAPI, Query | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from pydantic import UUID4 | |
| from tempenv import TemporaryEnvironment | |
| from typing import List | |
| import uvicorn | |
| LANGSMITH_STAGING_PROJECT = "customer_agent" | |
| LANGSMITH_PROD_PROJECT = "growthy-agents" | |
| description = """ | |
| API helps you do awesome stuff. 🚀 | |
| """ | |
| tags_metadata = [ | |
| { | |
| "name": "til_feedback", | |
| "description": "Gives the feedback on user's TIL content", | |
| }, | |
| { | |
| "name": "course_learn", | |
| "description": "Workflows for course learn.", | |
| }, | |
| ] | |
| app = FastAPI( | |
| title="Growthy AI Worflows", | |
| description=description, | |
| summary="Deadpool's favorite app. Nuff said.", | |
| version="0.0.1", | |
| openapi_tags=tags_metadata, | |
| docs_url="/documentation", | |
| ) | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| async def til_feedback_kickoff(content: List[str]) -> TilFeedbackResponse: | |
| separator = "\n* " | |
| content[0] = "* " + content[0] | |
| inputs = {"content": separator.join(content)} | |
| result = TilCrew().kickoff(inputs) | |
| return result | |
| async def capture_feedback(run_id: UUID4, feedback: Feedback) -> str: | |
| print("Metric Type: ", feedback.metric_type) | |
| print("Feedback On: ", feedback.feedback_on) | |
| post_feedback(run_id=run_id, feedback=feedback) | |
| return "ok" | |
| async def staging_til_feedback_kickoff(content: List[str]) -> TilFeedbackResponse: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}): | |
| separator = "\n* " | |
| content[0] = "* " + content[0] | |
| inputs = {"content": separator.join(content)} | |
| result = TilCrew().kickoff(inputs) | |
| return result | |
| async def staging_capture_feedback(run_id: UUID4, feedback: Feedback) -> str: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}): | |
| print("Metric Type: ", feedback.metric_type) | |
| print("Feedback On: ", feedback.feedback_on) | |
| post_feedback(run_id=run_id, feedback=feedback) | |
| return "ok" | |
| def til_v2_logic(content) -> TilV2FeedbackResponse: | |
| separator = "\n* " | |
| content[0] = "* " + content[0] | |
| inputs = {"content": separator.join(content)} | |
| result = AnalyseTilV2().kickoff(inputs) | |
| return result | |
| async def til_v2_feedback_kickoff(content: List[str]) -> TilV2FeedbackResponse: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}): | |
| return til_v2_logic(content) | |
| async def staging_til_v2_feedback_kickoff(content: List[str]) -> TilV2FeedbackResponse: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}): | |
| return til_v2_logic(content) | |
| def course_learn_suggest_expectations_logic(inputs) -> SuggestExpectationsResponse: | |
| print("Inputs: ", inputs) | |
| result = SuggestExpectations().kickoff(inputs={ | |
| "course": inputs.course, | |
| "module": inputs.module, | |
| "tasks": inputs.tasks, | |
| "existing_expectations": inputs.existing_expectations, | |
| }) | |
| return result | |
| def course_learn_suggest_expectations_feedback_logic(run_id: UUID4, feedback: Feedback) -> str: | |
| print("Helful Score: ", feedback.metric_type) | |
| print("Feedback On: ", feedback.feedback_on) | |
| post_feedback(run_id=run_id, feedback=feedback) | |
| return "ok" | |
| async def course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -> SuggestExpectationsResponse: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}): | |
| return course_learn_suggest_expectations_logic(inputs) | |
| async def staging_course_learn_suggest_expectations(inputs: SuggestExpectationsInputs) -> SuggestExpectationsResponse: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}): | |
| return course_learn_suggest_expectations_logic(inputs) | |
| async def capture_suggest_expectations_feedback(run_id: UUID4, feedback: Feedback) -> str: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}): | |
| return course_learn_suggest_expectations_feedback_logic(run_id, feedback) | |
| async def staging_capture_suggest_expectations_feedback(run_id: UUID4, feedback: Feedback) -> str: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}): | |
| return course_learn_suggest_expectations_feedback_logic(run_id, feedback) | |
| def course_learn_expectation_revision_logic(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse: | |
| print("Inputs: ", inputs) | |
| result = ExpectationRevision().kickoff(inputs={ | |
| "expectation": inputs.expectation, | |
| "check_question": inputs.check_question, | |
| "request": inputs.request, | |
| }) | |
| return result | |
| async def course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}): | |
| return course_learn_expectation_revision_logic(inputs) | |
| async def staging_course_learn_expectation_revision(inputs: ExpectationRevisionInputs) -> ExpectationRevisionResponse: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}): | |
| return course_learn_expectation_revision_logic(inputs) | |
| def capture_expectation_revision_feedback_logic(run_id: UUID4, feedback: Feedback) -> str: | |
| print("Helful Score: ", feedback.metric_type) | |
| print("Feedback On: ", feedback.feedback_on) | |
| post_feedback(run_id=run_id, feedback=feedback) | |
| return "ok" | |
| async def capture_expectation_revision_feedback(run_id: UUID4, feedback: Feedback) -> str: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}): | |
| return capture_expectation_revision_feedback_logic(run_id, feedback) | |
| async def staging_capture_expectation_revision_feedback(run_id: UUID4, feedback: Feedback) -> str: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}): | |
| return capture_expectation_revision_feedback_logic(run_id, feedback) | |
| def course_learn_suggest_check_question_logic(inputs: SuggestCheckQuestionInputs) -> SuggestCheckQuestionResponse: | |
| print("Inputs: ", inputs) | |
| result = SuggestCheckQuestion().kickoff(inputs={ | |
| "course": inputs.course, | |
| "module": inputs.module, | |
| "tasks": inputs.tasks, | |
| "expectation": inputs.expectation, | |
| }) | |
| return result | |
| async def course_learn_suggest_check_question(inputs: SuggestCheckQuestionInputs) -> SuggestCheckQuestionResponse: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}): | |
| return course_learn_suggest_check_question_logic(inputs) | |
| async def staging_course_learn_suggest_check_question(inputs: SuggestCheckQuestionInputs) -> SuggestCheckQuestionResponse: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}): | |
| return course_learn_suggest_check_question_logic(inputs) | |
| def course_learn_suggest_check_question_feedback_logic(run_id: UUID4, feedback: Feedback) -> str: | |
| print("Helful Score: ", feedback.metric_type) | |
| print("Feedback On: ", feedback.feedback_on) | |
| post_feedback(run_id=run_id, feedback=feedback) | |
| return "ok" | |
| async def course_learn_suggest_check_question_feedback(run_id: UUID4, feedback: Feedback) -> str: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_PROD_PROJECT, "OPENAI_MODEL": "gpt-4o"}): | |
| return course_learn_suggest_check_question_feedback_logic(run_id, feedback) | |
| async def staging_course_learn_suggest_check_question_feedback(run_id: UUID4, feedback: Feedback) -> str: | |
| with TemporaryEnvironment({"LANGCHAIN_PROJECT": LANGSMITH_STAGING_PROJECT, "OPENAI_MODEL": "gpt-4o-mini"}): | |
| return course_learn_suggest_check_question_feedback_logic(run_id, feedback) | |
| async def read_root(): | |
| return {"status": "ok"} | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="127.0.0.1", port=8080) | |