internationalscholarsprogram commited on
Commit
a45863a
·
1 Parent(s): c9990de

Update handbook sync engine

Browse files
Files changed (2) hide show
  1. app.py +352 -123
  2. requirements.txt +1 -1
app.py CHANGED
@@ -1,19 +1,39 @@
1
  import os
2
  import json
 
 
3
 
4
  import gradio as gr
5
- from docx import Document # from python-docx
6
  from deepdiff import DeepDiff
7
  import mysql.connector
8
 
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  # -----------------------------
11
  # DB CONNECTION HELPERS
12
  # -----------------------------
13
  def get_db_connection():
14
  """
15
  Create and return a MySQL connection using environment variables.
16
- Set these in your HF Space settings:
17
  DB_HOST, DB_PORT, DB_USER, DB_PASSWORD, DB_NAME
18
  """
19
  return mysql.connector.connect(
@@ -21,40 +41,54 @@ def get_db_connection():
21
  port=int(os.getenv("DB_PORT", "3306")),
22
  user=os.getenv("DB_USER", "root"),
23
  password=os.getenv("DB_PASSWORD", ""),
24
- database=os.getenv("DB_NAME", "test"),
25
  )
26
 
27
 
28
- def fetch_db_json(doc_id: str):
29
  """
30
- Fetch existing JSON from the database for a given doc_id.
31
- Assumes a table 'documents' with columns: id, json_data.
32
  """
33
  conn = get_db_connection()
34
  try:
35
  cursor = conn.cursor()
36
- query = "SELECT json_data FROM documents WHERE id = %s"
37
- cursor.execute(query, (doc_id,))
 
 
 
 
 
38
  row = cursor.fetchone()
39
- if not row or row[0] is None:
 
 
 
 
 
 
 
40
  return None
41
- # If stored as TEXT, parse it as JSON.
42
- return json.loads(row[0])
43
  finally:
44
  cursor.close()
45
  conn.close()
46
 
47
 
48
- def update_db_json(doc_id: str, new_data: dict):
49
  """
50
- Update JSON content in the database for a given doc_id.
51
  """
52
  conn = get_db_connection()
53
  try:
54
  cursor = conn.cursor()
55
  new_json_str = json.dumps(new_data, ensure_ascii=False)
56
- query = "UPDATE documents SET json_data = %s WHERE id = %s"
57
- cursor.execute(query, (new_json_str, doc_id))
 
 
 
 
58
  conn.commit()
59
  finally:
60
  cursor.close()
@@ -62,152 +96,347 @@ def update_db_json(doc_id: str, new_data: dict):
62
 
63
 
64
  # -----------------------------
65
- # DOCX JSON
66
  # -----------------------------
67
- def docx_to_python_dict(file_obj):
68
- if file_obj is None:
69
- raise ValueError("No file uploaded")
70
 
71
- document = Document(file_obj.name)
 
 
 
 
 
 
 
 
72
 
73
- # Example: convert paragraphs into a simple structured dict
74
- paragraphs = [p.text for p in document.paragraphs if p.text.strip() != ""]
 
 
 
 
 
75
 
76
- data = {
77
- "paragraphs": paragraphs,
78
- "paragraph_count": len(paragraphs),
79
- }
80
- return data
81
 
 
 
 
 
 
82
 
83
- # -----------------------------
84
- # GRADIO CALLBACKS
85
- # -----------------------------
86
- def convert_and_compare(file_obj, doc_id):
87
  """
88
- 1. Convert DOCX to JSON (Python dict)
89
- 2. Fetch old JSON from DB
90
- 3. Compare and return:
91
- - new_json_str
92
- - old_json_str (or message if none)
93
- - diff_str
94
  """
95
- if file_obj is None:
96
- return "{}", "No existing record (or doc_id missing)", "No file uploaded."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
- if not doc_id:
99
- return "{}", "{}", "Please provide a doc_id to look up in the database."
100
 
101
- # 1) DOCX dict
102
- try:
103
- new_data = docx_to_python_dict(file_obj)
104
- except Exception as e:
105
- return "{}", "{}", f"Error parsing DOCX: {e}"
106
 
107
- new_json_str = json.dumps(new_data, indent=2, ensure_ascii=False)
 
108
 
109
- # 2) Fetch existing from DB
110
- try:
111
- old_data = fetch_db_json(doc_id)
112
- except Exception as e:
113
- return new_json_str, "{}", f"Error fetching from DB: {e}"
114
 
115
- if old_data is None:
116
- old_json_str = "No existing JSON found for this doc_id."
117
- diff_str = "No existing data to compare. You can choose to update DB with this new JSON."
118
- return new_json_str, old_json_str, diff_str
 
119
 
120
- old_json_str = json.dumps(old_data, indent=2, ensure_ascii=False)
121
 
122
- # 3) Compare with DeepDiff
123
- try:
124
- diff = DeepDiff(old_data, new_data, ignore_order=True)
125
- if not diff:
126
- diff_str = "No differences detected between DOCX JSON and DB JSON."
127
- else:
128
- diff_str = json.dumps(diff, indent=2, ensure_ascii=False, default=str)
129
- except Exception as e:
130
- diff_str = f"Error computing diff: {e}"
131
 
132
- return new_json_str, old_json_str, diff_str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
 
 
134
 
135
- def apply_update(doc_id, new_json_str):
 
136
  """
137
- Apply the new JSON to the DB if user confirms.
 
 
 
 
 
 
 
 
 
 
138
  """
139
- if not doc_id:
140
- return "Please provide a doc_id."
141
 
142
- if not new_json_str.strip():
143
- return "No new JSON provided to update."
 
 
 
144
 
145
- try:
146
- new_data = json.loads(new_json_str)
147
- except Exception as e:
148
- return f"Error parsing new JSON: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
 
150
  try:
151
- update_db_json(doc_id, new_data)
152
  except Exception as e:
153
- return f"Error updating DB: {e}"
154
-
155
- return "Database updated successfully with new JSON."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
 
158
  # -----------------------------
159
  # GRADIO UI
160
  # -----------------------------
161
  with gr.Blocks() as demo:
162
- gr.Markdown("# DOCXJSON DB Sync")
163
  gr.Markdown(
164
- "Upload a Word (.docx) file, enter the document ID from your database, "
165
- "and compare the generated JSON with what is stored in the DB. "
166
- "If there are changes, you can update the DB."
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  )
168
 
169
- with gr.Row():
170
- file_input = gr.File(label="Upload .docx file")
171
- doc_id_input = gr.Textbox(label="Document ID (as stored in DB)", placeholder="e.g. 123")
172
-
173
- with gr.Row():
174
- new_json_output = gr.Code(
175
- label="New JSON (from DOCX)",
176
- language="json",
177
- interactive=True,
178
- )
179
- old_json_output = gr.Code(
180
- label="Existing JSON (from DB)",
181
- language="json",
182
- interactive=False,
183
- )
184
 
185
- diff_output = gr.Code(
186
- label="Diff (DeepDiff result)",
187
- language="json",
 
188
  interactive=False,
189
  )
190
 
191
- compare_button = gr.Button("Convert & Compare")
192
- compare_button.click(
193
- fn=convert_and_compare,
194
- inputs=[file_input, doc_id_input],
195
- outputs=[new_json_output, old_json_output, diff_output],
196
- )
197
-
198
- gr.Markdown("## Apply Update")
199
- gr.Markdown(
200
- "If you're happy with the changes, click below to write the **New JSON** "
201
- "back into the database for this `doc_id`."
202
- )
203
-
204
- update_status = gr.Textbox(label="Update Status", interactive=False)
205
-
206
- update_button = gr.Button("Update DB with New JSON")
207
- update_button.click(
208
- fn=apply_update,
209
- inputs=[doc_id_input, new_json_output],
210
- outputs=[update_status],
211
  )
212
 
213
  if __name__ == "__main__":
 
1
  import os
2
  import json
3
+ import re
4
+ from typing import Dict, Any, List, Tuple
5
 
6
  import gradio as gr
7
+ from docx import Document
8
  from deepdiff import DeepDiff
9
  import mysql.connector
10
 
11
 
12
+ # -----------------------------
13
+ # CONFIG: UNIVERSITY MAPPING
14
+ # -----------------------------
15
+ UNIVERSITY_ID_MAP = {
16
+ "Indiana University of Pennsylvania (IUP)": 1,
17
+ "Missouri State University": 2,
18
+ "University Of Kentucky (UK)": 3,
19
+ "University of Louisville (UofL)": 4,
20
+ "University of Delaware (UD)": 6,
21
+ "Grand Valley State University": 7,
22
+ "Quinnipiac University": 9,
23
+ "William Jessup University": 10,
24
+ "Wilkes University": 14,
25
+ "University of South Dakota (USD)": 16,
26
+ # Extend as you add more rows to university_handbook_sections
27
+ }
28
+
29
+
30
  # -----------------------------
31
  # DB CONNECTION HELPERS
32
  # -----------------------------
33
  def get_db_connection():
34
  """
35
  Create and return a MySQL connection using environment variables.
36
+ Set these in HF Space secrets:
37
  DB_HOST, DB_PORT, DB_USER, DB_PASSWORD, DB_NAME
38
  """
39
  return mysql.connector.connect(
 
41
  port=int(os.getenv("DB_PORT", "3306")),
42
  user=os.getenv("DB_USER", "root"),
43
  password=os.getenv("DB_PASSWORD", ""),
44
+ database=os.getenv("DB_NAME", ""),
45
  )
46
 
47
 
48
+ def fetch_section_json(university_id: int, section_key: str):
49
  """
50
+ Fetch existing JSON for given university_id + section_key from DB.
51
+ Returns parsed dict or None if not found.
52
  """
53
  conn = get_db_connection()
54
  try:
55
  cursor = conn.cursor()
56
+ query = """
57
+ SELECT section_json
58
+ FROM university_handbook_sections
59
+ WHERE university_id = %s AND section_key = %s
60
+ LIMIT 1
61
+ """
62
+ cursor.execute(query, (university_id, section_key))
63
  row = cursor.fetchone()
64
+ if not row:
65
+ return None
66
+ if not row[0]:
67
+ return None
68
+ try:
69
+ return json.loads(row[0])
70
+ except Exception:
71
+ # JSON malformed in DB – treat as None to force overwrite
72
  return None
 
 
73
  finally:
74
  cursor.close()
75
  conn.close()
76
 
77
 
78
+ def update_section_json(university_id: int, section_key: str, new_data: Dict[str, Any]):
79
  """
80
+ Update section_json in DB for given university_id + section_key.
81
  """
82
  conn = get_db_connection()
83
  try:
84
  cursor = conn.cursor()
85
  new_json_str = json.dumps(new_data, ensure_ascii=False)
86
+ query = """
87
+ UPDATE university_handbook_sections
88
+ SET section_json = %s
89
+ WHERE university_id = %s AND section_key = %s
90
+ """
91
+ cursor.execute(query, (new_json_str, university_id, section_key))
92
  conn.commit()
93
  finally:
94
  cursor.close()
 
96
 
97
 
98
  # -----------------------------
99
+ # DOCX PARSING HELPERS
100
  # -----------------------------
101
+ def normalize_text(text: str) -> str:
102
+ return " ".join(text.split()).strip()
103
+
104
 
105
+ def split_doc_by_university(doc: Document) -> Dict[str, List[str]]:
106
+ """
107
+ Split the docx into blocks per university name using headings that match
108
+ the keys in UNIVERSITY_ID_MAP.
109
+ Returns dict: { "University Name": [list_of_paragraph_texts_in_block] }
110
+ """
111
+ paragraphs = [normalize_text(p.text) for p in doc.paragraphs]
112
+ # Remove empties
113
+ paragraphs = [p for p in paragraphs if p]
114
 
115
+ # Find start indices for each known university name
116
+ indices = []
117
+ for i, p in enumerate(paragraphs):
118
+ for uni_name in UNIVERSITY_ID_MAP.keys():
119
+ # Exact match or paragraph starting with that name
120
+ if p == uni_name or p.startswith(uni_name):
121
+ indices.append((i, uni_name))
122
 
123
+ # Sort by index
124
+ indices.sort(key=lambda x: x[0])
 
 
 
125
 
126
+ uni_blocks: Dict[str, List[str]] = {}
127
+ for idx, (start_idx, uni_name) in enumerate(indices):
128
+ end_idx = indices[idx + 1][0] if idx + 1 < len(indices) else len(paragraphs)
129
+ block = paragraphs[start_idx:end_idx]
130
+ uni_blocks[uni_name] = block
131
 
132
+ return uni_blocks
133
+
134
+
135
+ def parse_overview_block(block: List[str]) -> Dict[str, Any]:
136
  """
137
+ Given the full block for a university, extract the overview section as JSON.
138
+ We look for lines containing 'Founded:', 'Total Students:', etc.
 
 
 
 
139
  """
140
+ overview = {}
141
+ for line in block:
142
+ if line.startswith("Founded:"):
143
+ overview["founded"] = int(re.sub(r"[^\d]", "", line.split(":", 1)[1]))
144
+ elif line.startswith("Total Students") or line.startswith("Total Students:"):
145
+ overview["total_students"] = int(
146
+ re.sub(r"[^\d]", "", line.split(":", 1)[1])
147
+ )
148
+ elif "Postgraduate" in line and "Students" in line:
149
+ # Some pages have 'Postgraduate students' or 'Postgraduate Students'
150
+ digits = re.sub(r"[^\d]", "", line.split(":", 1)[1])
151
+ overview["postgraduate_students"] = int(digits) if digits else None
152
+ elif line.startswith("Acceptance rate"):
153
+ overview["acceptance_rate"] = line.split(":", 1)[1].strip()
154
+ elif line.startswith("Location:"):
155
+ overview["location"] = line.split(":", 1)[1].strip()
156
+ elif "Yearly Out of State Tuition Fees" in line or "Yearly Tuition Fees" in line:
157
+ digits = re.sub(r"[^\d]", "", line.split(":", 1)[1])
158
+ overview["tuition_out_of_state_yearly"] = int(digits) if digits else None
159
+
160
+ return overview
161
+
162
+
163
+ def extract_between(block: List[str], start_marker: str, stop_markers: List[str]) -> List[str]:
164
+ """
165
+ Extract lines between a line containing `start_marker` and the first line
166
+ containing any of `stop_markers`.
167
+ """
168
+ started = False
169
+ buf: List[str] = []
170
+ for line in block:
171
+ if not started and start_marker in line:
172
+ started = True
173
+ continue
174
+ if started:
175
+ if any(m in line for m in stop_markers):
176
+ break
177
+ if line.strip():
178
+ buf.append(line.strip())
179
+ return buf
180
+
181
+
182
+ def parse_benefits_block(block: List[str]) -> Dict[str, Any]:
183
+ """
184
+ Benefits are the lines following 'Benefits for ISP students at this school'
185
+ until 'To qualify for The International Scholars Program' or university change.
186
+ """
187
+ benefits_lines = extract_between(
188
+ block,
189
+ start_marker="Benefits for ISP students at this school",
190
+ stop_markers=[
191
+ "To qualify for The International Scholars Program at",
192
+ "To qualify for The International Scholars Program",
193
+ ],
194
+ )
195
+ # Clean bullet style / stray punctuation
196
+ benefits = [normalize_text(l) for l in benefits_lines if l]
197
+ return {"benefits": benefits}
198
 
 
 
199
 
200
+ def parse_programs_block(block: List[str]) -> Dict[str, Any]:
201
+ """
202
+ Parse the 'Program table' portion.
 
 
203
 
204
+ We assume that after:
205
+ 'To qualify for The International Scholars Program at <Uni>, you must be willing to study...'
206
 
207
+ we get repeated groups like:
 
 
 
 
208
 
209
+ Program
210
+ Designation
211
+ Entrance Exam Required
212
+ Examples of Career Pathways
213
+ Funding Category
214
 
215
+ But in the raw text, it often appears as:
216
 
217
+ MS Computer Science
218
+ STEM
219
+ Optional
220
+ Software Developer
221
+ Database Administrator
222
+ TIER 1
 
 
 
223
 
224
+ So we scan for the first occurrence of 'Program' header and then slice in chunks of 5-6 lines.
225
+ """
226
+ # Grab everything after 'To qualify for ... you must be willing to study'
227
+ program_lines = extract_between(
228
+ block,
229
+ start_marker="To qualify for The International Scholars Program",
230
+ stop_markers=[
231
+ "Montclair State University",
232
+ "Missouri State University",
233
+ "Indiana University of Pennsylvania",
234
+ "University of Louisville",
235
+ "University of Delaware",
236
+ "Grand Valley State University",
237
+ "Quinnipiac University",
238
+ "William Jessup University",
239
+ "Wilkes University",
240
+ "University of South Dakota",
241
+ # any other possible headings we might hit
242
+ ],
243
+ )
244
+
245
+ # Remove the header row if present
246
+ header_keywords = {"Program", "Designation", "Entrance Exam Required", "Entrance Examination", "Examples of Career Pathways", "Funding Category"}
247
+ cleaned: List[str] = []
248
+ for line in program_lines:
249
+ if line in header_keywords:
250
+ continue
251
+ cleaned.append(line)
252
+
253
+ # Now group by 5-6 lines per program:
254
+ # 0: program_name
255
+ # 1: designation
256
+ # 2: entrance_exam
257
+ # 3: career_path_1
258
+ # 4: career_path_2 (optional, may be missing)
259
+ # 5: funding_category
260
+ programs: List[Dict[str, Any]] = []
261
+ i = 0
262
+ while i < len(cleaned):
263
+ # Heuristic: we expect at least 4 lines ahead for a valid program
264
+ remaining = len(cleaned) - i
265
+ if remaining < 4:
266
+ break
267
+
268
+ program_name = cleaned[i].strip()
269
+ designation = cleaned[i + 1].strip() if remaining > 1 else ""
270
+ entrance_exam = cleaned[i + 2].strip() if remaining > 2 else ""
271
+ # Next 1–2 lines are examples of career pathways until we hit something that looks like 'TIER'
272
+ career_paths: List[str] = []
273
+ j = i + 3
274
+ while j < len(cleaned) and not cleaned[j].startswith("TIER"):
275
+ career_paths.append(cleaned[j].strip())
276
+ j += 1
277
+
278
+ funding_category = cleaned[j].strip() if j < len(cleaned) else ""
279
+
280
+ programs.append(
281
+ {
282
+ "program_name": program_name,
283
+ "designation": designation,
284
+ "entrance_exam": entrance_exam,
285
+ "career_pathways": career_paths,
286
+ "funding_category": funding_category,
287
+ }
288
+ )
289
+
290
+ # Move index to element after funding_category
291
+ i = j + 1
292
 
293
+ return {"programs": programs}
294
 
295
+
296
+ def parse_university_block(uni_name: str, block: List[str]) -> Dict[str, Dict[str, Any]]:
297
  """
298
+ Parse all sections for a single university block:
299
+ - overview
300
+ - benefits
301
+ - programs
302
+
303
+ Return dict:
304
+ {
305
+ "overview": {...},
306
+ "benefits": {...},
307
+ "programs": {...}
308
+ }
309
  """
310
+ sections: Dict[str, Dict[str, Any]] = {}
 
311
 
312
+ overview = parse_overview_block(block)
313
+ if overview:
314
+ # Always include explicit name for safety
315
+ overview.setdefault("university_name", uni_name)
316
+ sections["overview"] = overview
317
 
318
+ benefits = parse_benefits_block(block)
319
+ if benefits.get("benefits"):
320
+ sections["benefits"] = benefits
321
+
322
+ programs = parse_programs_block(block)
323
+ if programs.get("programs"):
324
+ sections["programs"] = programs
325
+
326
+ return sections
327
+
328
+
329
+ # -----------------------------
330
+ # MAIN SYNC FUNCTION
331
+ # -----------------------------
332
+ def run_full_sync(docx_file) -> str:
333
+ """
334
+ 1. Parse DOCX into university blocks
335
+ 2. For each known university_id:
336
+ a. Parse overview/benefits/programs from the handbook
337
+ b. Fetch existing section_json from DB
338
+ c. Compare (DeepDiff)
339
+ d. If different, update DB
340
+ 3. Return human-readable log
341
+ """
342
+ if docx_file is None:
343
+ return "No handbook file uploaded."
344
 
345
  try:
346
+ document = Document(docx_file.name)
347
  except Exception as e:
348
+ return f"Failed to read DOCX: {e}"
349
+
350
+ uni_blocks = split_doc_by_university(document)
351
+
352
+ logs: List[str] = []
353
+ total_updates = 0
354
+
355
+ for uni_name, uni_id in UNIVERSITY_ID_MAP.items():
356
+ block = uni_blocks.get(uni_name)
357
+ if not block:
358
+ logs.append(f"[WARN] No block found in handbook for '{uni_name}'. Skipping.")
359
+ continue
360
+
361
+ parsed_sections = parse_university_block(uni_name, block)
362
+ if not parsed_sections:
363
+ logs.append(f"[WARN] No parsable sections for '{uni_name}'. Skipping.")
364
+ continue
365
+
366
+ for section_key, new_data in parsed_sections.items():
367
+ # We ONLY touch sections sourced from handbook: overview, benefits, programs
368
+ if section_key not in ("overview", "benefits", "programs"):
369
+ continue
370
+
371
+ current_data = fetch_section_json(uni_id, section_key)
372
+ if current_data is None:
373
+ # No existing record or invalid JSON – we still require that the row exists;
374
+ # if not, we just log and skip.
375
+ # If you want to INSERT missing rows, you can add that logic here.
376
+ logs.append(
377
+ f"[INFO] No existing JSON for uni_id={uni_id}, section_key='{section_key}'. "
378
+ f"Will only update if row exists."
379
+ )
380
+
381
+ # Compare with DeepDiff
382
+ diff = DeepDiff(current_data or {}, new_data, ignore_order=True)
383
+ if not diff:
384
+ logs.append(f"[OK] '{uni_name}' [{section_key}] – no change.")
385
+ continue
386
+
387
+ # Update DB
388
+ try:
389
+ update_section_json(uni_id, section_key, new_data)
390
+ total_updates += 1
391
+ logs.append(
392
+ f"[UPDATED] '{uni_name}' [{section_key}] – DB updated (differences detected)."
393
+ )
394
+ except Exception as e:
395
+ logs.append(
396
+ f"[ERROR] Failed to update '{uni_name}' [{section_key}]: {e}"
397
+ )
398
+
399
+ summary = f"\n\nTotal sections updated: {total_updates}\n"
400
+ return "\n".join(logs) + summary
401
 
402
 
403
  # -----------------------------
404
  # GRADIO UI
405
  # -----------------------------
406
  with gr.Blocks() as demo:
407
+ gr.Markdown("# ISP Handbook Database Sync (Full Auto)")
408
  gr.Markdown(
409
+ """
410
+ Upload the **full ISP Handbook DOCX**.
411
+ On **Run full sync**, the app will:
412
+
413
+ 1. Parse each university block from the handbook
414
+ 2. Extract **Overview**, **Benefits**, and **Programs** sections
415
+ 3. Compare them with `university_handbook_sections.section_json`
416
+ 4. Update only rows that have changed
417
+
418
+ Only sections that are sourced from the handbook are touched:
419
+ - `overview`
420
+ - `benefits`
421
+ - `programs`
422
+
423
+ Sections like `campus_image` / `image` are **never updated** here.
424
+ """
425
  )
426
 
427
+ file_input = gr.File(label="Upload ISP Handbook DOCX", file_types=[".docx"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428
 
429
+ sync_button = gr.Button("Run full sync")
430
+ log_output = gr.Textbox(
431
+ label="Sync Log",
432
+ lines=30,
433
  interactive=False,
434
  )
435
 
436
+ sync_button.click(
437
+ fn=run_full_sync,
438
+ inputs=file_input,
439
+ outputs=log_output,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
440
  )
441
 
442
  if __name__ == "__main__":
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
  gradio
2
  python-docx
3
  deepdiff
4
- mysql-connector-python
 
1
  gradio
2
  python-docx
3
  deepdiff
4
+ mysql-connector-python