usmanyousaf commited on
Commit
d945639
·
verified ·
1 Parent(s): 5926a5d

Update parse.py

Browse files
Files changed (1) hide show
  1. parse.py +11 -18
parse.py CHANGED
@@ -1,9 +1,6 @@
1
- from groq import Groq
 
2
 
3
- # Initialize Groq client with the API key directly
4
- client = Groq(api_key="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")
5
-
6
- # Define the template for parsing
7
  template = (
8
  "You are tasked with extracting specific information from the following text content: {dom_content}. "
9
  "Please follow these instructions carefully: \n\n"
@@ -13,23 +10,19 @@ template = (
13
  "4. **Direct Data Only:** Your output should contain only the data that is explicitly requested, with no other text."
14
  )
15
 
16
- def parse_with_groq(dom_chunks, parse_description, model="llama3-8b-8192"):
 
 
 
 
 
17
  parsed_results = []
18
 
19
  for i, chunk in enumerate(dom_chunks, start=1):
20
- # Prepare the prompt
21
- prompt = template.format(dom_content=chunk, parse_description=parse_description)
22
-
23
- # Send prompt to Groq for processing, specifying the model
24
- response = client.chat.completions.create(
25
- messages=[
26
- {"role": "user", "content": prompt}
27
- ],
28
- model=model # Specify the model
29
  )
30
-
31
- # Print status and store result
32
  print(f"Parsed batch: {i} of {len(dom_chunks)}")
33
- parsed_results.append(response.choices[0].message.content) # Access the content
34
 
35
  return "\n".join(parsed_results)
 
1
+ from langchain_ollama import OllamaLLM
2
+ from langchain_core.prompts import ChatPromptTemplate
3
 
 
 
 
 
4
  template = (
5
  "You are tasked with extracting specific information from the following text content: {dom_content}. "
6
  "Please follow these instructions carefully: \n\n"
 
10
  "4. **Direct Data Only:** Your output should contain only the data that is explicitly requested, with no other text."
11
  )
12
 
13
+ model = OllamaLLM(model="llama3")
14
+
15
+ def parse_with_ollama(dom_chunks, parse_description):
16
+ prompt = ChatPromptTemplate.from_template(template)
17
+ chain = prompt | model
18
+
19
  parsed_results = []
20
 
21
  for i, chunk in enumerate(dom_chunks, start=1):
22
+ response = chain.invoke(
23
+ {"dom_content": chunk, "parse_description": parse_description}
 
 
 
 
 
 
 
24
  )
 
 
25
  print(f"Parsed batch: {i} of {len(dom_chunks)}")
26
+ parsed_results.append(response)
27
 
28
  return "\n".join(parsed_results)