HarishMaths commited on
Commit
76ee8a8
·
verified ·
1 Parent(s): 8b4ace5

Update src/grammar_chain.py

Browse files
Files changed (1) hide show
  1. src/grammar_chain.py +69 -72
src/grammar_chain.py CHANGED
@@ -1,73 +1,70 @@
1
- import sys,os,pickle
2
- sys.path.append(os.path.abspath("..."))
3
-
4
- from utils.notebook_parser import NotebookParser
5
- from prompts.grammar_prompt import prompt
6
- from prompts.fact_prompt import prompt_fact
7
- from langchain_core.runnables import RunnableLambda,RunnableParallel
8
- from langchain_openai import ChatOpenAI
9
- from output.output_schema import LLMCorrectionOutput,LLMFactualCheckOutput
10
- import pandas as pd
11
-
12
- from dotenv import load_dotenv
13
- load_dotenv()
14
-
15
- def grammar_pipeline():
16
-
17
- parse_notebook = RunnableLambda(lambda path: NotebookParser(notebook_path=path).extract(code=True, markdown=True))
18
-
19
- prepare_message = RunnableLambda(
20
- lambda cells: {
21
- "role": "user",
22
- "content": [{"type": "text", "text": prompt}] + [{"type": "text", "text": "The list of cells are : "}] + cells
23
- },
24
- )
25
-
26
- llm = ChatOpenAI(model='gpt-4o-mini',temperature=0)
27
-
28
- invoke_llm = RunnableLambda(
29
- lambda message: llm.with_structured_output(LLMCorrectionOutput).invoke([message])
30
- )
31
-
32
- extract_suggestions = RunnableLambda(
33
- lambda result: {'Is Grammar Error?':result.is_grammar_error,'Grammar_Text':result.text,'Grammar_Suggestions':result.corrected_text}
34
- )
35
-
36
- notebook_chain = (
37
- parse_notebook
38
- | prepare_message
39
- | invoke_llm
40
- | extract_suggestions
41
- )
42
-
43
- return notebook_chain
44
-
45
- def fact_pipeline():
46
-
47
- parse_notebook = RunnableLambda(lambda path: NotebookParser(notebook_path=path).extract(code=True, markdown=True))
48
-
49
- prepare_message = RunnableLambda(
50
- lambda cells: {
51
- "role": "user",
52
- "content": [{"type": "text", "text": prompt_fact}] + [{"type": "text", "text": "The list of cells are : "}] + cells
53
- },
54
- )
55
-
56
- llm = ChatOpenAI(model='gpt-4o-mini',temperature=0)
57
-
58
- invoke_llm = RunnableLambda(
59
- lambda message: llm.with_structured_output(LLMFactualCheckOutput).invoke([message])
60
- )
61
-
62
- extract_suggestions = RunnableLambda(
63
- lambda result: {'Fact_Text':result.text,'Fact_Suggestions':result.corrected_text}
64
- )
65
-
66
- notebook_chain = (
67
- parse_notebook
68
- | prepare_message
69
- | invoke_llm
70
- | extract_suggestions
71
- )
72
-
73
  return notebook_chain
 
1
+ from notebook_parser import NotebookParser
2
+ from grammar_prompt import prompt
3
+ from fact_prompt import prompt_fact
4
+ from langchain_core.runnables import RunnableLambda,RunnableParallel
5
+ from langchain_openai import ChatOpenAI
6
+ from output_schema import LLMCorrectionOutput,LLMFactualCheckOutput
7
+ import pandas as pd
8
+
9
+ from dotenv import load_dotenv
10
+ load_dotenv()
11
+
12
+ def grammar_pipeline():
13
+
14
+ parse_notebook = RunnableLambda(lambda path: NotebookParser(notebook_path=path).extract(code=True, markdown=True))
15
+
16
+ prepare_message = RunnableLambda(
17
+ lambda cells: {
18
+ "role": "user",
19
+ "content": [{"type": "text", "text": prompt}] + [{"type": "text", "text": "The list of cells are : "}] + cells
20
+ },
21
+ )
22
+
23
+ llm = ChatOpenAI(model='gpt-4o-mini',temperature=0)
24
+
25
+ invoke_llm = RunnableLambda(
26
+ lambda message: llm.with_structured_output(LLMCorrectionOutput).invoke([message])
27
+ )
28
+
29
+ extract_suggestions = RunnableLambda(
30
+ lambda result: {'Is Grammar Error?':result.is_grammar_error,'Grammar_Text':result.text,'Grammar_Suggestions':result.corrected_text}
31
+ )
32
+
33
+ notebook_chain = (
34
+ parse_notebook
35
+ | prepare_message
36
+ | invoke_llm
37
+ | extract_suggestions
38
+ )
39
+
40
+ return notebook_chain
41
+
42
+ def fact_pipeline():
43
+
44
+ parse_notebook = RunnableLambda(lambda path: NotebookParser(notebook_path=path).extract(code=True, markdown=True))
45
+
46
+ prepare_message = RunnableLambda(
47
+ lambda cells: {
48
+ "role": "user",
49
+ "content": [{"type": "text", "text": prompt_fact}] + [{"type": "text", "text": "The list of cells are : "}] + cells
50
+ },
51
+ )
52
+
53
+ llm = ChatOpenAI(model='gpt-4o-mini',temperature=0)
54
+
55
+ invoke_llm = RunnableLambda(
56
+ lambda message: llm.with_structured_output(LLMFactualCheckOutput).invoke([message])
57
+ )
58
+
59
+ extract_suggestions = RunnableLambda(
60
+ lambda result: {'Fact_Text':result.text,'Fact_Suggestions':result.corrected_text}
61
+ )
62
+
63
+ notebook_chain = (
64
+ parse_notebook
65
+ | prepare_message
66
+ | invoke_llm
67
+ | extract_suggestions
68
+ )
69
+
 
 
 
70
  return notebook_chain