ken4 commited on
Commit
2c11630
·
verified ·
1 Parent(s): fbcd5bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -20
app.py CHANGED
@@ -1,11 +1,12 @@
1
  import pandas as pd
2
  import PIL
3
- from PIL import Image, ImageDraw
 
4
  import gradio as gr
5
  import torch
6
  import easyocr
7
 
8
- # Download example images
9
  urls = {
10
  "english.png": "https://github.com/JaidedAI/EasyOCR/raw/master/examples/english.png",
11
  "thai.jpg": "https://github.com/JaidedAI/EasyOCR/raw/master/examples/thai.jpg",
@@ -15,6 +16,7 @@ urls = {
15
  "korean.png": "https://github.com/JaidedAI/EasyOCR/raw/master/examples/korean.png",
16
  "Hindi.jpeg": "https://i.imgur.com/mwQFd7G.jpeg"
17
  }
 
18
  for filename, url in urls.items():
19
  torch.hub.download_url_to_file(url, filename)
20
 
@@ -25,28 +27,25 @@ def draw_boxes(image, bounds, color='yellow', width=2):
25
  draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
26
  return image
27
 
28
- def inference(img_file, lang):
29
- img_path = img_file.name
30
  reader = easyocr.Reader(lang)
31
  bounds = reader.readtext(img_path)
32
-
33
  im = PIL.Image.open(img_path)
34
  draw_boxes(im, bounds)
35
  im.save("result.jpg")
36
 
37
  df = pd.DataFrame(bounds)[[1, 2]]
38
  df.columns = ["text", "confidence"]
 
39
  return "result.jpg", df
40
 
41
  choices = [
42
- "abq","ady","af","ang","ar","as","ava","az",
43
- "be","bg","bh","bho","bn","bs","ch_sim","ch_tra",
44
- "che","cs","cy","da","dar","de","en","es","et","fa","fr","ga",
45
- "gom","hi","hr","hu","id","inh","is","it","ja","kbd","kn","ko",
46
- "ku","la","lbe","lez","lt","lv","mah","mai","mi","mn","mr","ms",
47
- "mt","ne","new","nl","no","oc","pi","pl","pt","ro","ru","rs_cyrillic",
48
- "rs_latin","sck","sk","sl","sq","sv","sw","ta","tab","te","th","tjk",
49
- "tl","tr","ug","uk","ur","uz","vi"
50
  ]
51
 
52
  examples = [
@@ -61,24 +60,24 @@ examples = [
61
 
62
  with gr.Blocks(css=".output_image, .input_image {height: 40rem; width: 100%;}") as demo:
63
 
64
- gr.Markdown("# **EasyOCR Web Interface**")
65
- gr.Markdown("Upload an image, select languages, and run OCR.")
66
 
67
  with gr.Row():
68
- file_input = gr.File(label="Upload Image", file_types=["image"])
69
- lang_input = gr.CheckboxGroup(choices=choices, value=["en"], label="Languages")
70
 
71
  run_btn = gr.Button("Run OCR")
72
 
73
  with gr.Row():
74
- output_img = gr.Image(label="Detected Image")
75
  output_df = gr.Dataframe(headers=["text", "confidence"], label="Detected Text")
76
 
77
- gr.Examples(examples, [file_input, lang_input], [output_img, output_df], fn=inference)
78
 
79
  run_btn.click(
80
  inference,
81
- inputs=[file_input, lang_input],
82
  outputs=[output_img, output_df]
83
  )
84
 
 
1
  import pandas as pd
2
  import PIL
3
+ from PIL import Image
4
+ from PIL import ImageDraw
5
  import gradio as gr
6
  import torch
7
  import easyocr
8
 
9
+ # Download examples
10
  urls = {
11
  "english.png": "https://github.com/JaidedAI/EasyOCR/raw/master/examples/english.png",
12
  "thai.jpg": "https://github.com/JaidedAI/EasyOCR/raw/master/examples/thai.jpg",
 
16
  "korean.png": "https://github.com/JaidedAI/EasyOCR/raw/master/examples/korean.png",
17
  "Hindi.jpeg": "https://i.imgur.com/mwQFd7G.jpeg"
18
  }
19
+
20
  for filename, url in urls.items():
21
  torch.hub.download_url_to_file(url, filename)
22
 
 
27
  draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
28
  return image
29
 
30
+ def inference(img_path, lang):
 
31
  reader = easyocr.Reader(lang)
32
  bounds = reader.readtext(img_path)
33
+
34
  im = PIL.Image.open(img_path)
35
  draw_boxes(im, bounds)
36
  im.save("result.jpg")
37
 
38
  df = pd.DataFrame(bounds)[[1, 2]]
39
  df.columns = ["text", "confidence"]
40
+
41
  return "result.jpg", df
42
 
43
  choices = [
44
+ "abq","ady","af","ang","ar","as","ava","az","be","bg","bh","bho","bn","bs","ch_sim","ch_tra",
45
+ "che","cs","cy","da","dar","de","en","es","et","fa","fr","ga","gom","hi","hr","hu","id","inh","is",
46
+ "it","ja","kbd","kn","ko","ku","la","lbe","lez","lt","lv","mah","mai","mi","mn","mr","ms","mt","ne",
47
+ "new","nl","no","oc","pi","pl","pt","ro","ru","rs_cyrillic","rs_latin","sck","sk","sl","sq","sv","sw",
48
+ "ta","tab","te","th","tjk","tl","tr","ug","uk","ur","uz","vi"
 
 
 
49
  ]
50
 
51
  examples = [
 
60
 
61
  with gr.Blocks(css=".output_image, .input_image {height: 40rem; width: 100%;}") as demo:
62
 
63
+ gr.Markdown("# **EasyOCR**")
64
+ gr.Markdown("Upload an image and select languages to extract text.")
65
 
66
  with gr.Row():
67
+ image_input = gr.Image(type="filepath", label="Input Image")
68
+ lang_input = gr.CheckboxGroup(choices=choices, value=["en"], label="Language")
69
 
70
  run_btn = gr.Button("Run OCR")
71
 
72
  with gr.Row():
73
+ output_img = gr.Image(label="Output")
74
  output_df = gr.Dataframe(headers=["text", "confidence"], label="Detected Text")
75
 
76
+ gr.Examples(examples, [image_input, lang_input], [output_img, output_df], fn=inference)
77
 
78
  run_btn.click(
79
  inference,
80
+ inputs=[image_input, lang_input],
81
  outputs=[output_img, output_df]
82
  )
83