ken4 commited on
Commit
589565d
·
verified ·
1 Parent(s): c0f62c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -50
app.py CHANGED
@@ -6,13 +6,19 @@ import gradio as gr
6
  import torch
7
  import easyocr
8
 
9
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/english.png', 'english.png')
10
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/thai.jpg', 'thai.jpg')
11
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/french.jpg', 'french.jpg')
12
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/chinese.jpg', 'chinese.jpg')
13
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/japanese.jpg', 'japanese.jpg')
14
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/korean.png', 'korean.png')
15
- torch.hub.download_url_to_file('https://i.imgur.com/mwQFd7G.jpeg', 'Hindi.jpeg')
 
 
 
 
 
 
16
 
17
  def draw_boxes(image, bounds, color='yellow', width=2):
18
  draw = ImageDraw.Draw(image)
@@ -24,59 +30,55 @@ def draw_boxes(image, bounds, color='yellow', width=2):
24
  def inference(img_path, lang):
25
  reader = easyocr.Reader(lang)
26
  bounds = reader.readtext(img_path)
 
27
  im = PIL.Image.open(img_path)
28
  draw_boxes(im, bounds)
29
- im.save('result.jpg')
30
 
31
- # return dataframe with only text + confidence
32
  df = pd.DataFrame(bounds)[[1, 2]]
33
  df.columns = ["text", "confidence"]
 
 
34
 
35
- return 'result.jpg', df
36
-
37
- title = 'EasyOCR'
38
- description = """
39
- EasyOCR demo supports 80+ languages.
40
- Upload an image and choose a language to extract text.
41
- """
42
- article = "<p style='text-align: center'><a href='https://www.jaided.ai/easyocr/'>EasyOCR Website</a> | <a href='https://github.com/JaidedAI/EasyOCR'>GitHub Repo</a></p>"
43
 
44
  examples = [
45
- ['english.png', ['en']],
46
- ['thai.jpg', ['th']],
47
- ['french.jpg', ['fr', 'en']],
48
- ['chinese.jpg', ['ch_sim', 'en']],
49
- ['japanese.jpg', ['ja', 'en']],
50
- ['korean.png', ['ko', 'en']],
51
- ['Hindi.jpeg', ['hi', 'en']]
52
  ]
53
 
54
- css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
55
 
56
- choices = [
57
- "abq","ady","af","ang","ar","as","ava","az","be","bg","bh","bho","bn","bs","ch_sim","ch_tra","che",
58
- "cs","cy","da","dar","de","en","es","et","fa","fr","ga","gom","hi","hr","hu","id","inh","is","it",
59
- "ja","kbd","kn","ko","ku","la","lbe","lez","lt","lv","mah","mai","mi","mn","mr","ms","mt","ne",
60
- "new","nl","no","oc","pi","pl","pt","ro","ru","rs_cyrillic","rs_latin","sck","sk","sl","sq","sv",
61
- "sw","ta","tab","te","th","tjk","tl","tr","ug","uk","ur","uz","vi"
62
- ]
 
 
 
 
 
 
 
63
 
64
- demo = gr.Interface(
65
- fn=inference,
66
- inputs=[
67
- gr.Image(type="filepath", label="Input Image"),
68
- gr.CheckboxGroup(choices=choices, value=["en"], label="Language")
69
- ],
70
- outputs=[
71
- gr.Image(label="Output"),
72
- gr.Dataframe(label="Detected Text")
73
- ],
74
- title=title,
75
- description=description,
76
- article=article,
77
- examples=examples,
78
- css=css,
79
- enable_queue=True
80
- )
81
 
82
- demo.launch(debug=True)
 
6
  import torch
7
  import easyocr
8
 
9
+ # Download examples
10
+ urls = {
11
+ "english.png": "https://github.com/JaidedAI/EasyOCR/raw/master/examples/english.png",
12
+ "thai.jpg": "https://github.com/JaidedAI/EasyOCR/raw/master/examples/thai.jpg",
13
+ "french.jpg": "https://github.com/JaidedAI/EasyOCR/raw/master/examples/french.jpg",
14
+ "chinese.jpg": "https://github.com/JaidedAI/EasyOCR/raw/master/examples/chinese.jpg",
15
+ "japanese.jpg": "https://github.com/JaidedAI/EasyOCR/raw/master/examples/japanese.jpg",
16
+ "korean.png": "https://github.com/JaidedAI/EasyOCR/raw/master/examples/korean.png",
17
+ "Hindi.jpeg": "https://i.imgur.com/mwQFd7G.jpeg"
18
+ }
19
+
20
+ for filename, url in urls.items():
21
+ torch.hub.download_url_to_file(url, filename)
22
 
23
  def draw_boxes(image, bounds, color='yellow', width=2):
24
  draw = ImageDraw.Draw(image)
 
30
  def inference(img_path, lang):
31
  reader = easyocr.Reader(lang)
32
  bounds = reader.readtext(img_path)
33
+
34
  im = PIL.Image.open(img_path)
35
  draw_boxes(im, bounds)
36
+ im.save("result.jpg")
37
 
 
38
  df = pd.DataFrame(bounds)[[1, 2]]
39
  df.columns = ["text", "confidence"]
40
+
41
+ return "result.jpg", df
42
 
43
+ choices = [
44
+ "abq","ady","af","ang","ar","as","ava","az","be","bg","bh","bho","bn","bs","ch_sim","ch_tra",
45
+ "che","cs","cy","da","dar","de","en","es","et","fa","fr","ga","gom","hi","hr","hu","id","inh","is",
46
+ "it","ja","kbd","kn","ko","ku","la","lbe","lez","lt","lv","mah","mai","mi","mn","mr","ms","mt","ne",
47
+ "new","nl","no","oc","pi","pl","pt","ro","ru","rs_cyrillic","rs_latin","sck","sk","sl","sq","sv","sw",
48
+ "ta","tab","te","th","tjk","tl","tr","ug","uk","ur","uz","vi"
49
+ ]
 
50
 
51
  examples = [
52
+ ["english.png", ["en"]],
53
+ ["thai.jpg", ["th"]],
54
+ ["french.jpg", ["fr", "en"]],
55
+ ["chinese.jpg", ["ch_sim", "en"]],
56
+ ["japanese.jpg", ["ja", "en"]],
57
+ ["korean.png", ["ko", "en"]],
58
+ ["Hindi.jpeg", ["hi", "en"]]
59
  ]
60
 
61
+ with gr.Blocks(css=".output_image, .input_image {height: 40rem; width: 100%;}") as demo:
62
 
63
+ gr.Markdown("# **EasyOCR**")
64
+ gr.Markdown("Upload an image and select languages to extract text.")
65
+
66
+ with gr.Row():
67
+ image_input = gr.Image(type="filepath", label="Input Image")
68
+ lang_input = gr.CheckboxGroup(choices=choices, value=["en"], label="Language")
69
+
70
+ run_btn = gr.Button("Run OCR")
71
+
72
+ with gr.Row():
73
+ output_img = gr.Image(label="Output")
74
+ output_df = gr.Dataframe(headers=["text", "confidence"], label="Detected Text")
75
+
76
+ gr.Examples(examples, [image_input, lang_input], [output_img, output_df], fn=inference)
77
 
78
+ run_btn.click(
79
+ inference,
80
+ inputs=[image_input, lang_input],
81
+ outputs=[output_img, output_df]
82
+ )
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
+ demo.launch()