Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,29 +1,37 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
def extract_info(pdf_file):
|
| 5 |
-
|
| 6 |
-
|
|
|
|
|
|
|
| 7 |
|
| 8 |
-
# Przetwarzanie
|
| 9 |
-
|
| 10 |
|
| 11 |
-
#
|
| 12 |
-
extracted_data = {
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
|
|
|
|
|
|
| 17 |
|
| 18 |
return extracted_data
|
| 19 |
|
| 20 |
-
# Interfejs u偶ytkownika
|
| 21 |
iface = gr.Interface(
|
| 22 |
fn=extract_info,
|
| 23 |
-
inputs=gr.
|
| 24 |
-
outputs=
|
| 25 |
title="Ekstrakcja informacji z faktur PDF",
|
| 26 |
-
description="Prze艣lij plik PDF z faktur膮,
|
| 27 |
)
|
| 28 |
|
| 29 |
if __name__ == "__main__":
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import pdfplumber
|
| 3 |
+
from transformers import pipeline
|
| 4 |
+
|
| 5 |
+
# Inicjalizacja modelu do ekstrakcji informacji
|
| 6 |
+
extractor = pipeline("ner", model="dslim/bert-base-NER")
|
| 7 |
|
| 8 |
def extract_info(pdf_file):
|
| 9 |
+
with pdfplumber.open(pdf_file) as pdf:
|
| 10 |
+
text = ""
|
| 11 |
+
for page in pdf.pages:
|
| 12 |
+
text += page.extract_text() + "\n"
|
| 13 |
|
| 14 |
+
# Przetwarzanie tekstu modelem NLP
|
| 15 |
+
entities = extractor(text)
|
| 16 |
|
| 17 |
+
# Filtrowanie i formatowanie wynik贸w
|
| 18 |
+
extracted_data = {}
|
| 19 |
+
for entity in entities:
|
| 20 |
+
label = entity['entity']
|
| 21 |
+
word = entity['word']
|
| 22 |
+
if label not in extracted_data:
|
| 23 |
+
extracted_data[label] = []
|
| 24 |
+
extracted_data[label].append(word)
|
| 25 |
|
| 26 |
return extracted_data
|
| 27 |
|
| 28 |
+
# Interfejs u偶ytkownika w Hugging Face Space
|
| 29 |
iface = gr.Interface(
|
| 30 |
fn=extract_info,
|
| 31 |
+
inputs=gr.File(label="Wybierz plik PDF"),
|
| 32 |
+
outputs="json",
|
| 33 |
title="Ekstrakcja informacji z faktur PDF",
|
| 34 |
+
description="Prze艣lij plik PDF z faktur膮, a model rozpozna kluczowe informacje."
|
| 35 |
)
|
| 36 |
|
| 37 |
if __name__ == "__main__":
|