Great-Lens-D / src /grammar_chain.py
HarishMaths's picture
Update src/grammar_chain.py
76ee8a8 verified
from notebook_parser import NotebookParser
from grammar_prompt import prompt
from fact_prompt import prompt_fact
from langchain_core.runnables import RunnableLambda,RunnableParallel
from langchain_openai import ChatOpenAI
from output_schema import LLMCorrectionOutput,LLMFactualCheckOutput
import pandas as pd
from dotenv import load_dotenv
load_dotenv()
def grammar_pipeline():
parse_notebook = RunnableLambda(lambda path: NotebookParser(notebook_path=path).extract(code=True, markdown=True))
prepare_message = RunnableLambda(
lambda cells: {
"role": "user",
"content": [{"type": "text", "text": prompt}] + [{"type": "text", "text": "The list of cells are : "}] + cells
},
)
llm = ChatOpenAI(model='gpt-4o-mini',temperature=0)
invoke_llm = RunnableLambda(
lambda message: llm.with_structured_output(LLMCorrectionOutput).invoke([message])
)
extract_suggestions = RunnableLambda(
lambda result: {'Is Grammar Error?':result.is_grammar_error,'Grammar_Text':result.text,'Grammar_Suggestions':result.corrected_text}
)
notebook_chain = (
parse_notebook
| prepare_message
| invoke_llm
| extract_suggestions
)
return notebook_chain
def fact_pipeline():
parse_notebook = RunnableLambda(lambda path: NotebookParser(notebook_path=path).extract(code=True, markdown=True))
prepare_message = RunnableLambda(
lambda cells: {
"role": "user",
"content": [{"type": "text", "text": prompt_fact}] + [{"type": "text", "text": "The list of cells are : "}] + cells
},
)
llm = ChatOpenAI(model='gpt-4o-mini',temperature=0)
invoke_llm = RunnableLambda(
lambda message: llm.with_structured_output(LLMFactualCheckOutput).invoke([message])
)
extract_suggestions = RunnableLambda(
lambda result: {'Fact_Text':result.text,'Fact_Suggestions':result.corrected_text}
)
notebook_chain = (
parse_notebook
| prepare_message
| invoke_llm
| extract_suggestions
)
return notebook_chain