Gopikanth123 commited on
Commit
0b186c2
·
verified ·
1 Parent(s): 6a4f190

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -26
app.py CHANGED
@@ -1,6 +1,5 @@
1
- # main program
2
- # hi
3
  import gradio as gr
 
4
  import openpyxl
5
  from openpyxl.styles import Font, Alignment
6
  import tempfile
@@ -9,6 +8,8 @@ from PIL import Image
9
  import cv2
10
  import numpy as np
11
  import os
 
 
12
 
13
  excel_tempfile_state = gr.State()
14
  roll_number_state = gr.State()
@@ -176,10 +177,10 @@ def task1(Examination, Date_Of_Exam, Program, Branch, Course, Name_Of_Faculty, A
176
 
177
  #configuring interface 1
178
  inputs = [
179
- gr.components.Textbox(label="Examination"),
180
- gr.components.Textbox(label="Date Of Exam"),
181
- gr.components.Textbox(label="Program"),
182
- gr.components.Textbox(label="Branch"),
183
  gr.components.Textbox(label="Course"),
184
  gr.components.Textbox(label="Name Of Faculty"),
185
  gr.components.Textbox(label="Academic Year"),
@@ -213,15 +214,13 @@ def predict_and_crop(image_np, api_key, project_name, model_version, confidence=
213
  return cropped_img_np, img # Return both cropped image and original image
214
 
215
  # Function to resize and insert cropped image
216
- def resize_and_insert(cropped_image, base_image_path, output_image_path):
217
- base_image = cv2.imread(base_image_path)
218
- base_height, base_width = base_image.shape[:2]
219
  base_aspect_ratio = base_width / base_height
220
  new_width = int(base_height * base_aspect_ratio)
221
  resized_cropped_img = cv2.resize(cropped_image, (new_width, base_height))
222
- base_image[0:base_height, 0:new_width] = resized_cropped_img
223
- # cv2.imwrite(output_image_path, base_image)
224
- return base_image
225
 
226
  # Function to convert string to integer based on confidence level
227
  def convert_str_int(var, conf):
@@ -273,10 +272,10 @@ def append_to_workbook(cells_data, excel_file_path):
273
 
274
  #All Functions for interface 2
275
  def task2(image_np):
276
- api_key = "LimDLja7HF1Asuk3vfSd"
277
  project_name = "marks_table_detection_lbrce"
278
  model_version = 1
279
- base_image_path = "base_img.jpg"
280
  temp_image_path = "temp_image.jpg"
281
  output_image_path = "merged_image.jpg"
282
 
@@ -289,7 +288,7 @@ def task2(image_np):
289
  cropped_image, original_image = predict_and_crop(image_np, api_key, project_name, model_version)
290
 
291
  # Resize and insert
292
- result_image = resize_and_insert(cropped_image, base_image_path, output_image_path)
293
 
294
  # Cell coordinates
295
  cell_coordinates = cell_coordinates = [(235, 129), (475, 223), (496, 125), (685, 225), (708, 127), (896, 225), (919, 125), (1140, 217), (232, 253), (473, 346), (500, 249), (687, 347), (708, 250), (896, 346), (920, 249), (1142, 345), (232, 375), (474, 442), (496, 371), (686, 442), (708, 373), (897, 444), (922, 373), (1147, 443)]
@@ -316,27 +315,23 @@ def task2(image_np):
316
  print(cells_data)
317
  return cropped_image, cells_data, excel_file_path
318
 
319
- # # interface two
320
- # iface2 = gr.Interface(
321
- # fn=task2,
322
- # inputs="image",
323
- # outputs="text",
324
- # title="Automating Examination Mark Entry with Deep Learning"
325
- # )
326
  iface2 = gr.Interface(
327
  fn=task2,
328
  elem_id="my-interface",
329
- inputs=gr.components.Image(type="numpy", label="Upload Image"),
 
 
330
  outputs=[
331
  gr.components.Image(type="numpy", label="Cropped Image"),
332
  gr.components.Textbox(label="Detected Marks"),
333
  gr.components.File(label="marks_sheet")
334
  ],
335
- title="Automating Examination Mark Entry with Deep Learning",
 
336
  theme="huggingface"
337
  )
338
-
339
  demo = gr.TabbedInterface([iface1, iface2], ["Configure Excel Sheet Data", "Extract marks from Answer Sheets"])
340
 
341
  # Run the interface
342
- demo.launch(share=True,debug=True)
 
 
 
1
  import gradio as gr
2
+ import random
3
  import openpyxl
4
  from openpyxl.styles import Font, Alignment
5
  import tempfile
 
8
  import cv2
9
  import numpy as np
10
  import os
11
+ from gradio_calendar import Calendar
12
+ import datetime
13
 
14
  excel_tempfile_state = gr.State()
15
  roll_number_state = gr.State()
 
177
 
178
  #configuring interface 1
179
  inputs = [
180
+ gr.Dropdown(["I Mid","II Mid"], value=["I Mid", "II Mid"], label="Examination"),
181
+ Calendar(type="date", label="Date Of Examination"),
182
+ gr.Dropdown(["B-Tech R20","M-Tech R20","MBA R20","B-Tech R17","M-Tech R17","MBA-R17"], value=["B-Tech R20","M-Tech R20","MBA R20","B-Tech R17","M-Tech R17","MBA-R17"], label="Program"),
183
+ gr.Dropdown(["ASE","AI&DS","Civil","CSE","CSE(AI&ML)","ECE","EEE","IT","MECH","MBA"], value=["ASE","AI&DS","Civil","CSE","CSE(AI&ML)","ECE","EEE","IT","MECH","MBA"], label="Branch"),
184
  gr.components.Textbox(label="Course"),
185
  gr.components.Textbox(label="Name Of Faculty"),
186
  gr.components.Textbox(label="Academic Year"),
 
214
  return cropped_img_np, img # Return both cropped image and original image
215
 
216
  # Function to resize and insert cropped image
217
+ def resize_and_insert(cropped_image, output_image_path):
218
+ # base_image = cv2.imread(base_image_path)
219
+ base_height, base_width = (460, 1158)
220
  base_aspect_ratio = base_width / base_height
221
  new_width = int(base_height * base_aspect_ratio)
222
  resized_cropped_img = cv2.resize(cropped_image, (new_width, base_height))
223
+ return resized_cropped_img
 
 
224
 
225
  # Function to convert string to integer based on confidence level
226
  def convert_str_int(var, conf):
 
272
 
273
  #All Functions for interface 2
274
  def task2(image_np):
275
+ api_key = "UyAumhQJOJpo7vUu3LaK"
276
  project_name = "marks_table_detection_lbrce"
277
  model_version = 1
278
+ base_image_path = "base_img.png"
279
  temp_image_path = "temp_image.jpg"
280
  output_image_path = "merged_image.jpg"
281
 
 
288
  cropped_image, original_image = predict_and_crop(image_np, api_key, project_name, model_version)
289
 
290
  # Resize and insert
291
+ result_image = resize_and_insert(cropped_image, output_image_path)
292
 
293
  # Cell coordinates
294
  cell_coordinates = cell_coordinates = [(235, 129), (475, 223), (496, 125), (685, 225), (708, 127), (896, 225), (919, 125), (1140, 217), (232, 253), (473, 346), (500, 249), (687, 347), (708, 250), (896, 346), (920, 249), (1142, 345), (232, 375), (474, 442), (496, 371), (686, 442), (708, 373), (897, 444), (922, 373), (1147, 443)]
 
315
  print(cells_data)
316
  return cropped_image, cells_data, excel_file_path
317
 
318
+
 
 
 
 
 
 
319
  iface2 = gr.Interface(
320
  fn=task2,
321
  elem_id="my-interface",
322
+ inputs=[
323
+ gr.components.Image(type="numpy", label="Upload Image")
324
+ ],
325
  outputs=[
326
  gr.components.Image(type="numpy", label="Cropped Image"),
327
  gr.components.Textbox(label="Detected Marks"),
328
  gr.components.File(label="marks_sheet")
329
  ],
330
+ # examples=[['IMG_20240215_210403.jpg'],['IMG_20240215_210530.jpg'],['IMG_20240215_210534.jpg'],['IMG_20240215_210611.jpg']],
331
+ title="Automating Examination Mark Entry with Deep Learning",
332
  theme="huggingface"
333
  )
 
334
  demo = gr.TabbedInterface([iface1, iface2], ["Configure Excel Sheet Data", "Extract marks from Answer Sheets"])
335
 
336
  # Run the interface
337
+ demo.launch(share=True,debug=True)