the-carnage commited on
Commit
f232bf7
·
1 Parent(s): 216c20d

Docstrings for cleaner code

Browse files
Files changed (1) hide show
  1. app.py +22 -21
app.py CHANGED
@@ -7,7 +7,7 @@ import io
7
 
8
  st.set_page_config(page_title="Docurizzer", layout="centered")
9
  st.title("📄 Docurizzer")
10
- st.write("Summarize text, images, or PDFs with AI")
11
 
12
  st.sidebar.header("Summarization Settings")
13
  min_len = st.sidebar.slider("Min Length", min_value=10, max_value=100, value=40, step=10)
@@ -22,47 +22,48 @@ def load_model():
22
  tokenizer, model = load_model()
23
 
24
  def extract_text_from_image(image):
25
- """Extract text from image using OCR"""
26
- return pytesseract.image_to_string(image)
 
 
 
27
 
28
  def extract_text_from_pdf(pdf_file):
29
- """Extract text from PDF file"""
30
  text = ""
31
- with pdfplumber.open(pdf_file) as pdf:
32
- for page in pdf.pages:
33
- page_text = page.extract_text()
34
- if page_text:
35
- text += page_text + "\n"
36
- return text
 
 
 
 
37
 
38
- def summarize_text(text, min_Len, max_Len):
39
- """Summarize the given text"""
40
  if not text.strip():
41
  return None
42
  input_text = "summarize: " + text[:4000]
43
  inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True)
44
  input_token_count = inputs.input_ids.shape[1]
45
 
46
- # For very short inputs, just return the original text
47
  if input_token_count < 15:
48
  return text.strip()
49
 
50
- # Cap lengths to avoid repetition - max should not exceed input length
51
- effective_max = min(max_Len, max(int(input_token_count * 0.6), 20))
52
- effective_min = 5 # Minimum 5 tokens for a summary
53
 
54
- # Ensure min < max
55
  if effective_min >= effective_max:
56
  effective_min = max(1, effective_max - 5)
57
 
58
- # Use simpler generation for short inputs
59
  if input_token_count < 50:
60
  summary_ids = model.generate(
61
  inputs.input_ids,
62
  max_length=effective_max,
63
  min_length=effective_min,
64
- do_sample=False, # Deterministic
65
- num_beams=1, # No beam search for short inputs
66
  early_stopping=True
67
  )
68
  else:
@@ -152,4 +153,4 @@ with tab3:
152
  st.warning("No text could be extracted from the PDF. The PDF might be image-based or empty.")
153
 
154
  st.divider()
155
- st.caption("Powered by T5 AI Model | Built with Streamlit | v1.1")
 
7
 
8
  st.set_page_config(page_title="Docurizzer", layout="centered")
9
  st.title("📄 Docurizzer")
10
+ st.write("Intelligent document summarization tool")
11
 
12
  st.sidebar.header("Summarization Settings")
13
  min_len = st.sidebar.slider("Min Length", min_value=10, max_value=100, value=40, step=10)
 
22
  tokenizer, model = load_model()
23
 
24
  def extract_text_from_image(image):
25
+ try:
26
+ return pytesseract.image_to_string(image)
27
+ except Exception as e:
28
+ st.error(f"Error extracting text from image: {str(e)}")
29
+ return ""
30
 
31
  def extract_text_from_pdf(pdf_file):
 
32
  text = ""
33
+ try:
34
+ with pdfplumber.open(pdf_file) as pdf:
35
+ for page in pdf.pages:
36
+ page_text = page.extract_text()
37
+ if page_text:
38
+ text += page_text + "\n"
39
+ return text
40
+ except Exception as e:
41
+ st.error(f"Error extracting text from PDF: {str(e)}")
42
+ return ""
43
 
44
+ def summarize_text(text, min_len, max_len):
 
45
  if not text.strip():
46
  return None
47
  input_text = "summarize: " + text[:4000]
48
  inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True)
49
  input_token_count = inputs.input_ids.shape[1]
50
 
 
51
  if input_token_count < 15:
52
  return text.strip()
53
 
54
+ effective_max = min(max_len, max(int(input_token_count * 0.6), 20))
55
+ effective_min = 5
 
56
 
 
57
  if effective_min >= effective_max:
58
  effective_min = max(1, effective_max - 5)
59
 
 
60
  if input_token_count < 50:
61
  summary_ids = model.generate(
62
  inputs.input_ids,
63
  max_length=effective_max,
64
  min_length=effective_min,
65
+ do_sample=False,
66
+ num_beams=1,
67
  early_stopping=True
68
  )
69
  else:
 
153
  st.warning("No text could be extracted from the PDF. The PDF might be image-based or empty.")
154
 
155
  st.divider()
156
+ st.caption("Built with Streamlit | v1.2")