Spaces:
Sleeping
Sleeping
| import logging | |
| from typing import Any | |
| from langchain.prompts import ChatPromptTemplate | |
| from langchain_core.output_parsers import StrOutputParser | |
| from langchain_core.runnables import RunnableLambda, RunnablePassthrough | |
| from .model import gpt4o | |
| from .base import build_output_fixing_parser, extraction_messages, generation_messages | |
| logger = logging.getLogger("fastapi_cli") | |
| # Build parser | |
| parser = build_output_fixing_parser(parser=StrOutputParser(), llm=gpt4o) | |
| # Extraction chain | |
| extraction_prompt = ChatPromptTemplate.from_messages(extraction_messages) | |
| extraction_chain = ( | |
| extraction_prompt | |
| | gpt4o | |
| | parser | |
| ) | |
| # Generation chain | |
| generation_prompt = ChatPromptTemplate.from_messages(generation_messages) | |
| generation_chain = ( | |
| generation_prompt | |
| | gpt4o | |
| | parser | |
| ) | |
| # Define the LCEL-based pipeline | |
| chain = ( | |
| RunnablePassthrough.assign(summary_output=extraction_chain) # Runs extraction_chain and adds its output | |
| .assign(generated_message=generation_chain) # Runs generation_chain using the extracted summary | |
| ) | |