FranklinMoses commited on
Commit
c37482a
·
verified ·
1 Parent(s): 2f1bd27

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -106
app.py CHANGED
@@ -1,130 +1,122 @@
1
  import streamlit as st
2
  import degirum as dg
3
- from PIL import Image
4
  import degirum_tools
 
5
 
6
- # -----------------------------
7
- # Page config
8
- # -----------------------------
9
  st.set_page_config(
10
- page_title="DeGirum License Plate Demo",
11
- page_icon="🚗",
12
- layout="centered",
13
  )
14
 
15
- # -----------------------------
16
- # App title & intro
17
- # -----------------------------
18
- st.title("License Plate Detection & Recognition (DeGirum Cloud)")
19
-
20
- st.markdown(
21
- """
22
- This demo shows how to build a simple **Automatic License Plate Recognition (ALPR)**
23
- pipeline using models hosted on **DeGirum Cloud**.
24
-
25
- **What this app does:**
26
- 1. Detects license plates in an uploaded image.
27
- 2. Crops each plate region.
28
- 3. Runs an OCR model to read the characters on the plate.
29
- 4. Displays the original and annotated images **side by side**.
30
- """
31
  )
32
 
33
- st.sidebar.header("About this demo")
34
- st.sidebar.markdown(
35
- """
36
- - **Inference location:** DeGirum Cloud
37
- - **Models used:**
38
- - LP detection: `yolov8n_relu6_global_lp_det--640x640_quant_n2x_orca1_1`
39
- - LP OCR: `yolov8s_relu6_lp_ocr_7ch--256x128_quant_n2x_orca1_1`
40
- - **Libraries:**
41
- - `degirum`
42
- - `degirum_tools`
43
- - `streamlit`
44
- """
45
- )
46
 
47
- # -----------------------------
48
- # Configuration
49
- # -----------------------------
50
- hw_location = "@cloud"
51
- model_zoo_url = "https://cs.degirum.com/degirum/degirum"
52
-
53
- lp_det_model_name = "yolov8n_relu6_global_lp_det--640x640_quant_n2x_orca1_1"
54
- lp_ocr_model_name = "yolov8s_relu6_lp_ocr_7ch--256x128_quant_n2x_orca1_1"
55
-
56
-
57
- # -----------------------------
58
- # Model loading (cached)
59
- # -----------------------------
60
- @st.cache_resource(show_spinner=True)
61
- def load_compound_model():
62
- model_zoo = dg.connect(hw_location, model_zoo_url, token=st.secrets["DG_TOKEN"])
63
-
64
- lp_det_model = model_zoo.load_model(
65
- lp_det_model_name,
 
 
 
 
 
 
 
 
 
 
66
  image_backend="pil",
67
- overlay_color=(255, 0, 0),
68
- overlay_line_width=2,
69
- overlay_font_scale=2,
70
  )
71
 
72
- lp_ocr_model = model_zoo.load_model(
73
- lp_ocr_model_name,
 
 
 
 
 
74
  image_backend="pil",
75
  )
76
 
77
- # Create a compound cropping model with 5% crop extent
78
  crop_model = degirum_tools.CroppingAndClassifyingCompoundModel(
79
- lp_det_model, lp_ocr_model, 5.0
 
80
  )
81
 
82
  return crop_model
83
 
84
 
85
- crop_model = load_compound_model()
86
-
87
- # -----------------------------
88
- # File upload UI
89
- # -----------------------------
90
- st.subheader("Upload an image and run the models")
91
-
92
- uploaded_file = st.file_uploader(
93
- "Choose an image containing a vehicle / license plate",
94
- type=["jpg", "jpeg", "png"],
95
- )
96
-
97
- run_button = st.button("Run Inference", type="primary", disabled=uploaded_file is None)
98
-
99
- # -----------------------------
100
- # Inference
101
- # -----------------------------
102
- if run_button and uploaded_file is not None:
103
- with st.spinner("Running license plate detection and recognition..."):
104
- # Load full-res image and create a display copy
105
- orig_image = Image.open(uploaded_file).convert("RGB")
106
- display_image = orig_image.copy()
107
- display_image.thumbnail((640, 640), Image.Resampling.LANCZOS)
108
 
109
- # Run model on the resized display image
110
- inference_results = crop_model(display_image)
111
 
112
- st.subheader("Results")
113
-
114
- col1, col2 = st.columns(2, gap="medium")
115
-
116
- with col1:
117
- st.markdown("**Original image**")
118
- st.image(display_image, use_container_width=True)
119
-
120
- with col2:
121
- st.markdown("**Detection & recognition**")
122
- st.image(
123
- inference_results.image_overlay,
124
- caption="License plates with bounding boxes and labels",
125
- use_container_width=True,
126
- )
127
-
128
- st.caption("Inference complete. Detected plates and OCR results are shown on the right.")
129
- elif uploaded_file is None:
130
- st.info("👈 Upload an image to get started.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import degirum as dg
 
3
  import degirum_tools
4
+ from PIL import Image
5
 
 
 
 
6
  st.set_page_config(
7
+ page_title="Paddle OCR with DeGirum",
8
+ page_icon="📝",
 
9
  )
10
 
11
+ st.title("Paddle OCR Text Detection and Recognition")
12
+ st.write(
13
+ "Upload an image containing text and click **Run OCR** to detect text regions "
14
+ "and recognize the text using PaddleOCR models on DeGirum / Hailo."
 
 
 
 
 
 
 
 
 
 
 
 
15
  )
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
+ @st.cache_resource
19
+ def load_crop_model():
20
+ """
21
+ Load Paddle OCR detection + recognition models and wrap them in
22
+ a CroppingAndClassifyingCompoundModel so detection crops feed into OCR.
23
+ """
24
+ # Read connection info from Streamlit secrets
25
+ inference_host = st.secrets.get("DG_INFERENCE_HOST", "@local")
26
+ zoo_url = st.secrets.get("DG_ZOO_URL", "degirum/hailo")
27
+ device_type = st.secrets.get("DG_DEVICE_TYPE", "HAILORT/HAILO8")
28
+ token = st.secrets.get("DG_TOKEN", "")
29
+
30
+ # Ensure device_type is a list (as required by dg.load_model)
31
+ if isinstance(device_type, str):
32
+ device_type_list = [device_type]
33
+ else:
34
+ device_type_list = device_type
35
+
36
+ # Model names (same as in your notebook)
37
+ paddle_ocr_det_model_name = "paddle_ocr_detection--544x960_quant_hailort_hailo8_1"
38
+ paddle_ocr_rec_model_name = "paddle_ocr_recognition--48x320_quant_hailort_hailo8_1"
39
+
40
+ # Load detection model
41
+ text_det_model = dg.load_model(
42
+ model_name=paddle_ocr_det_model_name,
43
+ inference_host_address=inference_host,
44
+ zoo_url=zoo_url,
45
+ device_type=device_type_list,
46
+ token=token,
47
  image_backend="pil",
 
 
 
48
  )
49
 
50
+ # Load recognition model
51
+ text_rec_model = dg.load_model(
52
+ model_name=paddle_ocr_rec_model_name,
53
+ inference_host_address=inference_host,
54
+ zoo_url=zoo_url,
55
+ device_type=device_type_list,
56
+ token=token,
57
  image_backend="pil",
58
  )
59
 
60
+ # Create compound cropping + classification model
61
  crop_model = degirum_tools.CroppingAndClassifyingCompoundModel(
62
+ text_det_model,
63
+ text_rec_model,
64
  )
65
 
66
  return crop_model
67
 
68
 
69
+ crop_model = load_crop_model()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
+ st.text("Upload an image. Then click on the Run OCR button.")
 
72
 
73
+ with st.form("ocr_form"):
74
+ uploaded_file = st.file_uploader(
75
+ "Input image",
76
+ type=["jpg", "jpeg", "png", "bmp", "tiff"],
77
+ )
78
+ submitted = st.form_submit_button("Run OCR")
79
+
80
+ if submitted:
81
+ if uploaded_file is None:
82
+ st.warning("Please upload an image first.")
83
+ else:
84
+ # Load and optionally resize the image
85
+ image = Image.open(uploaded_file).convert("RGB")
86
+ # You can limit size if you want
87
+ # image.thumbnail((960, 960), Image.Resampling.LANCZOS)
88
+
89
+ # Run inference
90
+ inference_result = crop_model(image)
91
+
92
+ # Show image with detected text boxes
93
+ st.image(
94
+ inference_result.image_overlay,
95
+ caption="Image with detected text regions",
96
+ use_column_width=True,
97
+ )
98
+
99
+ # Try to show OCR results in a table
100
+ st.subheader("OCR Results")
101
+ try:
102
+ df = inference_result.to_pandas()
103
+ st.dataframe(df)
104
+
105
+ # If there is a column with recognized text, try to display it nicely
106
+ text_cols = [
107
+ col
108
+ for col in df.columns
109
+ if "text" in col.lower() or "label" in col.lower()
110
+ ]
111
+ if text_cols:
112
+ st.subheader("Recognized Text")
113
+ all_texts = []
114
+ for col in text_cols:
115
+ all_texts.extend(
116
+ [str(x) for x in df[col].dropna().tolist()]
117
+ )
118
+ if all_texts:
119
+ st.write("\n".join(f"- {t}" for t in all_texts))
120
+ except Exception:
121
+ st.write("Raw result object:")
122
+ st.write(inference_result)