ethanrom commited on
Commit
55e2b35
·
1 Parent(s): fc7d98a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -16
app.py CHANGED
@@ -14,22 +14,38 @@ def main():
14
  st.image("image.jpg", use_column_width=True)
15
  with col2:
16
  st.markdown(
17
- """
18
- <h3 style='text-align: center;'>Lorem ipsum dolor sit amet!</h3>
19
-
20
- <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam hendrerit nisi sed sollicitudin pellentesque.</p>
21
- <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam hendrerit nisi sed sollicitudin pellentesque.</p>
22
- """,
23
- unsafe_allow_html=True)
24
- st.markdown(
25
- """
26
- <div style='text-align: center;'>
27
- <h3 style='text-align: center;'>About the demo</h3>
28
- <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam hendrerit nisi sed sollicitudin pellentesque.</p>
29
-
30
- </div>
31
- """,
32
- unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  with tabs[1]:
35
  st.write('## Find Order')
 
14
  st.image("image.jpg", use_column_width=True)
15
  with col2:
16
  st.markdown(
17
+ """
18
+ <h3 style='text-align: center;'>Jewellery Font Type Detection - Proposed Solution Demo</h3>
19
+
20
+ <p>The challenge is to identify the correct order ID from a list by using both the OCR detected text and font type of custom-made jewelry.</p>
21
+
22
+ <h4>Proposed Solution</h4>
23
+ <p>Our proposed solution involves bolstering an OCR engine with a custom-trained CNN for font type classification. In this demo, we have trained two custom CNNs to classify two font types using a synthetic dataset of 1000 images for each font, generated using NumPy, PIL, and OpenCV. The dataset consists of text images rendered with different fonts, utilizing variations in font size and positioning to create diversity. However, training an accurate custom CNN for the given problem requires thousands of images due to the similar nature of the font types used in custom jewelry.</p>
24
+
25
+ <p>There are two potential solutions to overcome this challenge:</p>
26
+
27
+ <h5>Solution 1</h5>
28
+ <p>Pre-process the image to a level where we can generate a similar synthetic dataset.</p>
29
+
30
+ <h5>Solution 2</h5>
31
+ <p>Use Photoshop batch actions to create thousands of realistic images.</p>
32
+
33
+ """, unsafe_allow_html=True
34
+ )
35
+
36
+ st.subheader("Otsu's Thresholding")
37
+ col3, col4 = st.columns([2, 1])
38
+ with col4:
39
+ st.image('otsu.PNG', use_column_width=True)
40
+
41
+ with col3:
42
+ st.markdown("""<p>In initial testing, we found that Otsu thresholding can pre-process images to a similar level of a synthetic dataset. See the image :</p>
43
+ <p>Otsu's method assumes that the image contains two distinct intensity distributions, corresponding to the foreground and background regions.
44
+ It calculates the threshold that minimizes the intra-class variance or maximizes the inter-class variance.
45
+ By choosing the threshold that maximizes the inter-class variance, Otsu's thresholding effectively separates the two classes, resulting in a binary image.</p> """, unsafe_allow_html=True)
46
+
47
+ colab_link = '[<img src="https://colab.research.google.com/assets/colab-badge.svg">](https://colab.research.google.com/drive/1tq35g7ym1c73uDlAcy2KChIXsqlNY-RL?usp=sharing)'
48
+ st.markdown(colab_link, unsafe_allow_html=True)
49
 
50
  with tabs[1]:
51
  st.write('## Find Order')