FranklinMoses commited on
Commit
bbbba05
·
verified ·
1 Parent(s): 69dfe0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -44,9 +44,9 @@ st.write(
44
  st.text("Upload an image. Then click on the Run OCR button.")
45
 
46
  # -----------------------
47
- # Hardcoded configuration (from your notebook)
48
  # -----------------------
49
- INFERENCE_HOST_ADDRESS = "@cloud" # <- changed from @local to @cloud
50
  ZOO_URL = "degirum/hailo"
51
  DEVICE_TYPE = ["HAILORT/HAILO8"]
52
 
@@ -59,9 +59,9 @@ def load_crop_model():
59
  """
60
  Load Paddle OCR detection + recognition models and wrap them in a
61
  CroppingAndClassifyingCompoundModel so detection crops feed into OCR.
62
- Uses the same params as your notebook, just with inference_host_address=@cloud.
 
63
  """
64
- # Token for DeGirum Cloud (keep in st.secrets)
65
  token = st.secrets.get("DG_TOKEN", "")
66
 
67
  # Load paddle ocr text detection model
@@ -71,6 +71,7 @@ def load_crop_model():
71
  zoo_url=ZOO_URL,
72
  device_type=DEVICE_TYPE,
73
  token=token,
 
74
  )
75
 
76
  # Load paddle ocr text recognition model
@@ -80,6 +81,7 @@ def load_crop_model():
80
  zoo_url=ZOO_URL,
81
  device_type=DEVICE_TYPE,
82
  token=token,
 
83
  )
84
 
85
  # Create a compound cropping model (det → crops → rec)
@@ -115,14 +117,13 @@ with st.form("ocr_form"):
115
  # Run AI inference on image
116
  inference_result = crop_model(image)
117
 
118
- # Display combined results (image with boxes, if available)
119
  if hasattr(inference_result, "image_overlay"):
120
  st.image(
121
  inference_result.image_overlay,
122
  caption="OCR Output (detected text regions)",
123
  )
124
  else:
125
- # Fallback if image_overlay is not present
126
  st.image(
127
  image,
128
  caption="Input image (no overlay available)",
 
44
  st.text("Upload an image. Then click on the Run OCR button.")
45
 
46
  # -----------------------
47
+ # Hardcoded configuration (your notebook params)
48
  # -----------------------
49
+ INFERENCE_HOST_ADDRESS = "@cloud" # <-- changed from @local to @cloud
50
  ZOO_URL = "degirum/hailo"
51
  DEVICE_TYPE = ["HAILORT/HAILO8"]
52
 
 
59
  """
60
  Load Paddle OCR detection + recognition models and wrap them in a
61
  CroppingAndClassifyingCompoundModel so detection crops feed into OCR.
62
+
63
+ IMPORTANT: We set image_backend="pil" because the app passes a PIL.Image.
64
  """
 
65
  token = st.secrets.get("DG_TOKEN", "")
66
 
67
  # Load paddle ocr text detection model
 
71
  zoo_url=ZOO_URL,
72
  device_type=DEVICE_TYPE,
73
  token=token,
74
+ image_backend="pil", # <-- key fix
75
  )
76
 
77
  # Load paddle ocr text recognition model
 
81
  zoo_url=ZOO_URL,
82
  device_type=DEVICE_TYPE,
83
  token=token,
84
+ image_backend="pil", # <-- key fix
85
  )
86
 
87
  # Create a compound cropping model (det → crops → rec)
 
117
  # Run AI inference on image
118
  inference_result = crop_model(image)
119
 
120
+ # Display image with boxes (if overlay is available)
121
  if hasattr(inference_result, "image_overlay"):
122
  st.image(
123
  inference_result.image_overlay,
124
  caption="OCR Output (detected text regions)",
125
  )
126
  else:
 
127
  st.image(
128
  image,
129
  caption="Input image (no overlay available)",