jithenderchoudary commited on
Commit
c78159a
·
verified ·
1 Parent(s): 337fa5f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -54
app.py CHANGED
@@ -1,64 +1,67 @@
 
 
1
  import pandas as pd
2
- import fitz # PyMuPDF for PDF processing
3
- from transformers import AutoProcessor, AutoModelForDocumentQuestionAnswering
4
- import torch
5
- from PIL import Image
6
 
7
- # Load Hugging Face OCR model (e.g., LayoutLM)
8
- processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base")
9
- model = AutoModelForDocumentQuestionAnswering.from_pretrained("microsoft/layoutlmv3-base")
 
10
 
11
- # Function to extract text from PDF using OCR
12
- def extract_text_from_pdf(pdf_path):
13
- pdf_document = fitz.open(pdf_path)
14
- all_text = []
15
 
16
- # Loop through PDF pages
17
- for page_num in range(len(pdf_document)):
18
- page = pdf_document.load_page(page_num)
19
- pix = page.get_pixmap() # Convert PDF page to image
20
- img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
21
-
22
- # Preprocess image for Hugging Face model
23
- inputs = processor(images=img, return_tensors="pt")
24
-
25
- # Model inference
26
- with torch.no_grad():
27
- outputs = model(**inputs)
28
-
29
- # Extract and decode text
30
- text = processor.decode(outputs.logits.argmax(-1).squeeze().tolist())
31
- all_text.append(text)
 
 
 
32
 
33
- return all_text
34
 
35
- # Parse text to extract relevant information (example logic, modify based on PO structure)
36
- def parse_po_details(extracted_text):
37
- po_data = []
38
-
39
- for line in extracted_text:
40
- # Example: Extract lines related to material, quantity, rate, and value
41
- if "Material" in line or "Quantity" in line or "Rate" in line or "Value" in line:
42
- po_data.append(line.strip())
43
-
44
- return po_data
45
-
46
- # Function to export data to CSV
47
- def export_to_csv(po_data, output_csv_path):
48
- df = pd.DataFrame(po_data, columns=["PO Details"])
49
- df.to_csv(output_csv_path, index=False)
50
-
51
- # Path to your PDF file
52
- pdf_path = 'your_pdf_path_here.pdf'
53
 
54
- # Extract text using Hugging Face OCR
55
- extracted_text = extract_text_from_pdf(pdf_path)
56
 
57
- # Parse the extracted text to get relevant PO details
58
- po_data = parse_po_details(extracted_text)
 
 
59
 
60
- # Export the parsed data to a CSV file
61
- output_csv_path = 'requirement.csv'
62
- export_to_csv(po_data, output_csv_path)
 
 
 
 
 
 
63
 
64
- print(f"Purchase order details have been exported to {output_csv_path}")
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pdfplumber
3
  import pandas as pd
4
+ from io import BytesIO
 
 
 
5
 
6
+ def is_header(text):
7
+ """Identify headers using common keywords."""
8
+ keywords = ['Purchase Order', 'Supplier Order', 'GSTIN', 'Annexure', 'Terms', 'Currency']
9
+ return any(keyword in text for keyword in keywords)
10
 
11
+ def extract_cleaned_tables(pdf_file):
12
+ """Extract tables while skipping headers and arranging them by pages."""
13
+ tables = []
 
14
 
15
+ with pdfplumber.open(pdf_file) as pdf:
16
+ for page_num, page in enumerate(pdf.pages):
17
+ text = page.extract_text()
18
+
19
+ # Skip pages with header-heavy content
20
+ if is_header(text):
21
+ continue
22
+
23
+ page_tables = page.extract_tables()
24
+ for table in page_tables:
25
+ if table:
26
+ df = pd.DataFrame(table[1:], columns=table[0])
27
+ # Fix misalignment issues (if 'Unit' in wrong columns, move it)
28
+ if 'Delivery Date' in df.columns and 'Unit' in df.columns:
29
+ mask = df['Delivery Date'].str.contains(r'NOS|PCS', na=False)
30
+ df.loc[mask, 'Unit'] = df.loc[mask, 'Delivery Date']
31
+ df.loc[mask, 'Delivery Date'] = None
32
+
33
+ tables.append((f"Page_{page_num+1}", df))
34
 
35
+ return tables
36
 
37
+ # Streamlit App
38
+ st.title("Enhanced PO Extraction Tool")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ uploaded_file = st.file_uploader("Upload PO PDF", type=["pdf"])
 
41
 
42
+ if uploaded_file:
43
+ try:
44
+ # Extract and clean tables from the uploaded PDF
45
+ extracted_tables = extract_cleaned_tables(uploaded_file)
46
 
47
+ if extracted_tables:
48
+ st.success("Tables extracted successfully!")
49
+
50
+ # Create an Excel file with multiple sheets
51
+ excel_buffer = BytesIO()
52
+ with pd.ExcelWriter(excel_buffer, engine='openpyxl') as writer:
53
+ for sheet_name, df in extracted_tables:
54
+ df.to_excel(writer, index=False, sheet_name=sheet_name)
55
+ excel_buffer.seek(0)
56
 
57
+ # Provide download options
58
+ st.download_button(
59
+ label="Download as Excel",
60
+ data=excel_buffer,
61
+ file_name="po_data.xlsx",
62
+ mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
63
+ )
64
+ else:
65
+ st.warning("No valid tables found.")
66
+ except Exception as e:
67
+ st.error(f"An error occurred: {e}")