Spaces:
Running
Running
Commit
·
3abff09
1
Parent(s):
6c26e5d
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,56 +1,64 @@
|
|
| 1 |
-
|
| 2 |
import gradio as gr
|
| 3 |
import fitz # PyMuPDF
|
| 4 |
from transformers import BartTokenizer, BartForConditionalGeneration, pipeline
|
| 5 |
import scipy.io.wavfile
|
| 6 |
import numpy as np
|
| 7 |
|
|
|
|
|
|
|
|
|
|
| 8 |
# Initialize tokenizers and models
|
| 9 |
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
|
| 10 |
model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
|
| 11 |
synthesiser = pipeline("text-to-speech", "suno/bark")
|
| 12 |
|
| 13 |
-
# Function to extract abstract from PDF
|
| 14 |
def extract_abstract(pdf_bytes):
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
|
|
|
|
|
|
| 26 |
def process_text(uploaded_file):
|
| 27 |
-
# Attempt to extract byte content from NamedString object
|
| 28 |
try:
|
| 29 |
pdf_bytes = uploaded_file.file.read()
|
| 30 |
-
|
| 31 |
-
|
|
|
|
| 32 |
return "File content could not be retrieved", None
|
| 33 |
|
| 34 |
-
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
normalized_audio_data = np.int16(audio_data / np.max(np.abs(audio_data)) * 32767)
|
| 46 |
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
scipy.io.wavfile.write(output_file, rate=speech["sampling_rate"], data=normalized_audio_data)
|
| 50 |
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
-
# Gradio Interface
|
| 54 |
iface = gr.Interface(
|
| 55 |
fn=process_text,
|
| 56 |
inputs=gr.components.File(label="Upload PDF"),
|
|
|
|
| 1 |
+
import logging
|
| 2 |
import gradio as gr
|
| 3 |
import fitz # PyMuPDF
|
| 4 |
from transformers import BartTokenizer, BartForConditionalGeneration, pipeline
|
| 5 |
import scipy.io.wavfile
|
| 6 |
import numpy as np
|
| 7 |
|
| 8 |
+
# Initialize logging
|
| 9 |
+
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 10 |
+
|
| 11 |
# Initialize tokenizers and models
|
| 12 |
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
|
| 13 |
model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
|
| 14 |
synthesiser = pipeline("text-to-speech", "suno/bark")
|
| 15 |
|
|
|
|
| 16 |
def extract_abstract(pdf_bytes):
|
| 17 |
+
try:
|
| 18 |
+
doc = fitz.open(stream=pdf_bytes, filetype="pdf")
|
| 19 |
+
first_page = doc[0].get_text()
|
| 20 |
+
start_idx = first_page.lower().find("abstract")
|
| 21 |
+
end_idx = first_page.lower().find("introduction")
|
| 22 |
+
if start_idx != -1 and end_idx != -1:
|
| 23 |
+
return first_page[start_idx:end_idx].strip()
|
| 24 |
+
else:
|
| 25 |
+
return "Abstract not found or 'Introduction' not found in the first page."
|
| 26 |
+
except Exception as e:
|
| 27 |
+
logging.error(f"Error extracting abstract: {e}")
|
| 28 |
+
return "Error in abstract extraction"
|
| 29 |
+
|
| 30 |
def process_text(uploaded_file):
|
|
|
|
| 31 |
try:
|
| 32 |
pdf_bytes = uploaded_file.file.read()
|
| 33 |
+
logging.info("PDF file read successfully")
|
| 34 |
+
except AttributeError as e:
|
| 35 |
+
logging.error(f"Error reading file content: {e}")
|
| 36 |
return "File content could not be retrieved", None
|
| 37 |
|
| 38 |
+
try:
|
| 39 |
+
abstract_text = extract_abstract(pdf_bytes)
|
| 40 |
+
logging.info(f"Extracted abstract: {abstract_text[:100]}...") # Log first 100 chars of abstract
|
| 41 |
+
except Exception as e:
|
| 42 |
+
logging.error(f"Error in abstract extraction: {e}")
|
| 43 |
+
return "Error in processing PDF", None
|
| 44 |
|
| 45 |
+
try:
|
| 46 |
+
inputs = tokenizer([abstract_text], max_length=1024, return_tensors='pt', truncation=True)
|
| 47 |
+
summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=40, min_length=10, length_penalty=2.0, early_stopping=True, no_repeat_ngram_size=2)
|
| 48 |
+
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
| 49 |
|
| 50 |
+
speech = synthesiser(summary, forward_params={"do_sample": True})
|
| 51 |
+
audio_data = speech["audio"].squeeze()
|
| 52 |
+
normalized_audio_data = np.int16(audio_data / np.max(np.abs(audio_data)) * 32767)
|
|
|
|
| 53 |
|
| 54 |
+
output_file = "temp_output.wav"
|
| 55 |
+
scipy.io.wavfile.write(output_file, rate=speech["sampling_rate"], data=normalized_audio_data)
|
|
|
|
| 56 |
|
| 57 |
+
return summary, output_file
|
| 58 |
+
except Exception as e:
|
| 59 |
+
logging.error(f"Error in summary generation or TTS conversion: {e}")
|
| 60 |
+
return "Error in summary or speech generation", None
|
| 61 |
|
|
|
|
| 62 |
iface = gr.Interface(
|
| 63 |
fn=process_text,
|
| 64 |
inputs=gr.components.File(label="Upload PDF"),
|