Spaces:
Sleeping
Sleeping
| ######################################################### IMPORTS ################################################################################################################################# | |
| from __future__ import print_function | |
| from flask import Flask, render_template,request,flash , json, url_for,g , redirect , jsonify , send_file ,make_response | |
| import json | |
| import fitz | |
| from PIL import Image | |
| import cv2 | |
| import numpy as np | |
| import pilecaps_adr | |
| import base64 | |
| from db import dropbox_connect | |
| import cv2 | |
| import pandas as pd | |
| import time | |
| from io import BytesIO, StringIO | |
| import urllib | |
| import tempfile | |
| from flask import Flask, Response | |
| from werkzeug.wsgi import wrap_file | |
| import tameem3_2 | |
| import pypdfium2 as pdfium | |
| from urllib.parse import urlparse | |
| import re | |
| import pixelconversion | |
| import tameem2_1 | |
| import io | |
| from urllib.parse import unquote | |
| import API | |
| import MC_Templates_API | |
| import tsadropboxretrieval | |
| import doc_search | |
| import google_sheet_Legend | |
| import dxf__omar3_2 | |
| import requests | |
| import google_sheet_to_xml | |
| from threading import Thread | |
| import mainDBAlaa | |
| import datetime | |
| import doors_fasterrcnn | |
| import deploying_3_3 | |
| import Doors_Schedule | |
| import Code_2_7 | |
| import Counting_Columns_2_1 | |
| # import Find_Hyperlinking_text | |
| import ezdxf | |
| import Azure_api | |
| from flask import Flask, render_template, session, redirect, url_for | |
| from flask_session import Session # Capital "S" | |
| import os, traceback | |
| from werkzeug.utils import secure_filename | |
| import Legend_Detection | |
| ######################################################### FLASK APP ################################################################################################################################# | |
| app = Flask(__name__) | |
| # Configure Flask-Session for server-side session storage | |
| app.config["SESSION_TYPE"] = "filesystem" | |
| app.config["SESSION_PERMANENT"] = False | |
| app.config["SESSION_FILE_DIR"] = "./flask_session_files" | |
| app.secret_key = "your_secret_key" | |
| # Initialize Flask-Session | |
| sess = Session() | |
| sess.init_app(app) | |
| ######################################################### GLOBAL VARIABLES ################################################################################################################################# | |
| global colorsused | |
| global pdflink | |
| # Global cache to store last known tables | |
| cached_tables = { | |
| "projects": None, | |
| "parts": None, | |
| "sections": None | |
| } | |
| global hatched_areas2_7 # For 2.7 | |
| ######################################################### MAIN ERROR MESSAGE FUNCTION ################################################################################################################################# | |
| def log_error(message, issue_type="backend"): | |
| filename = os.path.basename(__file__) | |
| # Assign contact based on filename | |
| if filename.startswith(('2.6', '2.8', '2.1')): | |
| contactName = 'Tameem' | |
| elif filename.startswith(('2.7', '3.2', '3.3')): | |
| contactName = 'Omar' | |
| else: | |
| contactName = 'Marthe' | |
| # Define issue category | |
| if issue_type == "connection": | |
| issue_msg = "Connection issue detected" | |
| elif issue_type == "frontend": | |
| issue_msg = "Frontend issue detected" | |
| else: | |
| issue_msg = "Backend error detected" | |
| error_msg = f"{issue_msg}. {message}. Please contact {contactName} from the ADR Team." | |
| print(error_msg) | |
| return error_msg | |
| ######################################################### GUI RENDER ################################################################################################################################# | |
| def getInfotoMeasure(): | |
| try: | |
| return render_template("gui2.html") | |
| except Exception as e: | |
| # capture traceback for more detail | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details) | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### PASSWORD REDIRECTION ################################################################################################################################# | |
| def password_page(): | |
| try: | |
| return render_template("gui2.html") | |
| except Exception as e: | |
| # capture traceback for more detail | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details) | |
| return jsonify({"error": error_msg}), 500 | |
| def check_password(): | |
| try: | |
| password = request.form.get("password") | |
| correct_password = "c900" | |
| if password == correct_password: | |
| session["authenticated"] = True | |
| return jsonify({"authenticated": True}), 200 | |
| else: | |
| return jsonify({"authenticated": False}), 200 | |
| except Exception as e: | |
| # capture traceback for more detail | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details) | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### MAIN GUI PAGE ################################################################################################################################# | |
| def main_gui(): | |
| try: | |
| if "authenticated" not in session or not session["authenticated"]: | |
| return redirect(url_for("password_page")) | |
| return render_template("proposed-GUI.html") | |
| except Exception as e: | |
| # capture traceback for more detail | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details) | |
| return jsonify({"error": error_msg}), 500 | |
| def measurementConsoleFn(): | |
| try: | |
| return render_template("proposed-GUI.html") | |
| except Exception as e: | |
| # capture traceback for more detail | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details) | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### WORD SEARCH ################################################################################################################################# | |
| def searchDocument(): | |
| return render_template('wordSearch.html') | |
| def getInfo2toMeasure(): | |
| try: | |
| # API.AppendtablestoSheets() | |
| return render_template("wordSearch.html") | |
| except Exception as e: | |
| # capture traceback for more detail | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details) | |
| return jsonify({"error": error_msg}), 500 | |
| def getSearchinDocs(): | |
| try: | |
| arr=[] | |
| values = request.get_json() | |
| keyword=values.get('keyword') | |
| listpfProjs=values.get('listofprojs') | |
| print(keyword,listpfProjs) | |
| df,img_list=doc_search.search_docs(keyword,listpfProjs) | |
| for img in img_list: | |
| _, buffer = cv2.imencode('.png', img) | |
| arr.append(base64.b64encode(buffer).decode('utf-8')) | |
| return jsonify([df.to_html(index=False, escape=False),arr]) | |
| except Exception as e: | |
| # capture traceback for more detail | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details) | |
| return jsonify({"error": error_msg}), 500 | |
| def getSearchinFolder(): | |
| try: | |
| arr=[] | |
| values = request.get_json() | |
| keyword=values.get('keyword') | |
| projname=values.get('ProjectName') | |
| df,img_list=doc_search.slow_search(keyword=keyword,project=projname) | |
| for img in img_list: | |
| _, buffer = cv2.imencode('.png', img) | |
| arr.append(base64.b64encode(buffer).decode('utf-8')) | |
| return jsonify([df.to_html(index=False, escape=False),arr]) | |
| except Exception as e: | |
| # capture traceback for more detail | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details) | |
| return jsonify({"error": error_msg}), 500 | |
| #get sunburst from doc_search - NOT USED | |
| def getSunburst(): | |
| try: | |
| # df=tsadropboxretrieval.GetParquetDF() | |
| tree=doc_search.prepare_sunburst() | |
| return jsonify(tree.to_dict()) | |
| except Exception as e: | |
| # capture traceback for more detail | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details) | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### TESTING ROUTE ################################################################################################################################# | |
| def test_route(): | |
| try: | |
| # simulate an error | |
| x = 10 / 0 | |
| return jsonify({"result": x}) | |
| except Exception as e: | |
| # capture traceback for more detail | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details) | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### PROJECT NAMES RETRIEVAL ################################################################################################################################# | |
| def getprjnamesfromTestAPI(): | |
| try: | |
| prjnames, prjids = API.getPrjNames() | |
| # Save in memory | |
| global cached_tables | |
| cached_tables["projects"] = pd.DataFrame({ | |
| "ProjectName": prjnames, | |
| "ProjectId": prjids | |
| }) | |
| return jsonify([prjnames, prjids]) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| # return Response(generate_progressPrjnames(), content_type='text/event-stream') | |
| ######################################################### PROJECT PARTS RETRIEVAL ################################################################################################################################# | |
| def getprjpartsfromTestAPI(jsdata): | |
| try: | |
| prjparts, partsIds = API.getprjParts(jsdata) | |
| global cached_tables | |
| df = pd.DataFrame({"ProjectPart": prjparts, "ProjectPartId": partsIds, "ProjectId": [int(jsdata)] * len(prjparts)}) | |
| cached_tables["parts"] = df | |
| return jsonify([prjparts, partsIds]) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### PROJECT SECTIONS RETRIEVAL ################################################################################################################################# | |
| def getprjSectionsfromTestAPI(jsdata): | |
| try: | |
| data = json.loads(jsdata) | |
| if data[0] == 'testing': | |
| sections = API.getprjSections() | |
| project_id = data[0] | |
| project_part_id = None # No second parameter in this case | |
| else: | |
| # Expecting 2 values | |
| project_id = data[0] | |
| project_part_id = data[1] if len(data) > 1 else None | |
| sections = API.getprjSections(project_id, project_part_id) | |
| global cached_tables | |
| df = pd.DataFrame({ | |
| "ProjectSection": sections, | |
| "ProjectId": [project_id] * len(sections), | |
| "ProjectPartId": [project_part_id] * len(sections) | |
| }) | |
| cached_tables["sections"] = df | |
| return jsonify(sections) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### PROJECT PDF NAMES RETRIEVAL ################################################################################################################################# | |
| def get_javascript_data(jsdata): | |
| progress_updates = [] # Shared list to store progress | |
| def generate_progress(): | |
| try: | |
| # raise ConnectionError("Failed to reach external service") | |
| yield f"data: 5\n\n" # Initial progress | |
| yield f"data: 10\n\n" # Initial progress | |
| # Call retrieveProjects and pass a callback to update the shared list | |
| documentsToMeasure, RelevantDocuments, extracted_path = tsadropboxretrieval.retrieveProjects( | |
| jsdata, | |
| progress_callback=lambda p: progress_updates.append(p) | |
| ) | |
| # Continuously yield progress updates from the shared list | |
| while progress_updates: | |
| progress = progress_updates.pop(0) # Get the next progress value | |
| yield f"data: {progress}\n\n" | |
| # Final progress and result | |
| yield f"data: 100\n\n" | |
| result = json.dumps([documentsToMeasure, RelevantDocuments, extracted_path]) | |
| yield f"data: {result}\n\n" | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use your log_error function for connection issues | |
| error_msg = log_error(str(e), issue_type="connection") | |
| # Send it as SSE custom event | |
| yield f"event:error\ndata:{json.dumps({'error': error_msg})}\n\n" | |
| except Exception: | |
| # Any other backend error | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| # Send it as SSE custom event | |
| yield f"event:error\ndata:{json.dumps({'error': error_msg})}\n\n" | |
| return Response(generate_progress(), content_type='text/event-stream') | |
| #--------------------------------------------------------------- | |
| ######################################################### TABLE DETECTION ################################################################################################################################# | |
| def TableDetection(): | |
| try: | |
| return render_template('tableDetection.html') | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| #####################not added in gui yettt | |
| def table(): | |
| try: | |
| # 1. Get PDF bytes from Dropbox | |
| tt = eval(request.form.get('answers')) # consider json.loads instead of eval | |
| print("Value of tt = ",tt) | |
| print("Value of tt[0] = ",tt[0]) | |
| pdflist=[] | |
| pdfnames=[] | |
| if tt[0].startswith('http'): | |
| pdf_path=tt[0] | |
| if pdf_path and ('http' in pdf_path or 'dropbox' in pdf_path): | |
| pdf_path = pdf_path.replace('dl=0', 'dl=1') | |
| response = requests.get(pdf_path) | |
| pdf_content = BytesIO(response.content) | |
| if not pdf_content: | |
| raise ValueError("No valid PDF content found.") | |
| excel_io = Azure_api.detect_tables([response.content],['pdfname.pdf']) | |
| else: | |
| for i in range(len(tt[0])): | |
| print("Value of tt[0][i] = ",tt[0][i]) | |
| pdfpath, _ = tsadropboxretrieval.getPathtoPDF_File(nameofPDF=tt[0][i]) | |
| dbx = tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| _, res = dbx.files_download(path=pdfpath) | |
| pdf_bytes = res.content | |
| pdflist.append(pdf_bytes) | |
| pdfnames.append(pdfpath) | |
| # 2. Generate in-memory Excel | |
| print("pdflist = ",pdflist) | |
| excel_io = Azure_api.detect_tables(pdflist,pdfnames) | |
| # 3. Send it as a downloadable .xlsx | |
| if excel_io is None: | |
| # No tables → return a JSON error (or another response of your choice) | |
| return ('', 204) | |
| return send_file( | |
| excel_io, | |
| as_attachment=True, | |
| download_name='detected_tables.xlsx', | |
| mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' | |
| ) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### TESTING MEASUREMENT USING LINKS ################################################################################################################################# | |
| def TestingMeasurement(): | |
| try: | |
| return render_template('TestingMeasurement.html') | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### FIND FUNCTIONS ################################################################################################################################# | |
| #####################not added in gui yettt | |
| def FindInitialMarkups(): | |
| try: | |
| return render_template('FindInitialMarkups.html') | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| pdf_content = None | |
| pageNumTextFound = 0 | |
| BASE_URL = "https://marthee-nbslink.hf.space" # Hugging Face Spaces base URL | |
| # @app.route("/", methods=["GET", "POST"]) | |
| # def thismain(): | |
| # print('ayhaga') | |
| # return render_template("gui.html") | |
| def download_pdf(): | |
| try: | |
| # Manually parse the query parameters | |
| full_query_string = request.query_string.decode() # Get raw query string | |
| parsed_params = urllib.parse.parse_qs(full_query_string) # Parse it | |
| # Extract pdfLink and keyword manually | |
| pdf_link = parsed_params.get('pdfLink', [None])[0] | |
| keyword = parsed_params.get('keyword', [None])[0] | |
| if not pdf_link or not keyword: | |
| return "Missing required parameters.", 400 | |
| # Decode the extracted values | |
| pdf_link = urllib.parse.unquote(pdf_link) | |
| keyword = urllib.parse.unquote(keyword) | |
| # If the keyword is a JSON string, convert it back to a list | |
| try: | |
| keyword = json.loads(keyword) | |
| except json.JSONDecodeError: | |
| keyword = [keyword] # Treat it as a single keyword if not JSON | |
| print("Extracted PDF Link:", pdf_link) | |
| print("Extracted Keywords:", keyword) | |
| createDF=False | |
| pdf_content = Find_Hyperlinking_text.annotate_text_from_pdf([pdf_link], keyword)[0] | |
| if pdf_content is None: | |
| return "PDF content not found.", 404 | |
| pdf_bytes = BytesIO(pdf_content) | |
| return send_file( | |
| pdf_bytes, | |
| mimetype='application/pdf', | |
| as_attachment=False, | |
| download_name=f"annotated_page_{pageNumTextFound}.pdf" | |
| ) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| def receive_pdf_data(): | |
| try: | |
| global pdf_content, pageNumTextFound | |
| # Get PDF link and keyword from finddata() | |
| pdfLink, keyword = finddata() | |
| if not pdfLink or not keyword: | |
| return jsonify({"error": "Both 'pdfLink' and 'keyword' must be provided."}), 400 | |
| try: | |
| print(pdfLink, keyword) | |
| pdfbytes, pdf_document , df ,tablepdfoutput= Find_Hyperlinking_text.annotate_text_from_pdf([pdfLink], keyword) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| # Get metadata using the shared link | |
| metadata = dbxTeam.sharing_get_shared_link_metadata(pdfLink) | |
| dbPath='/TSA JOBS/ADR Test/FIND/' | |
| pdflink= tsadropboxretrieval.uploadanyFile(doc=pdf_document,path=dbPath,pdfname=metadata.name) #doc=doc,pdfname=path,pdfpath=pdfpath+'Measured Plan/ | |
| print('LINKS0',pdflink) | |
| dbPath='/TSA JOBS/ADR Test/FIND/' | |
| tablepdfLink=tsadropboxretrieval.uploadanyFile(doc=tablepdfoutput,path=dbPath,pdfname=metadata.name.rsplit(".pdf", 1)[0] +' Markup Summary'+'.pdf') | |
| print(f"PDF successfully uploaded to Dropbox at") | |
| print('LINKS1',tablepdfLink) | |
| return jsonify({ | |
| "message": "PDF processed successfully.", | |
| "PDF_MarkedUp": pdflink, | |
| 'Table_PDF_Markup_Summary': tablepdfLink | |
| }) | |
| except Exception as e: | |
| return jsonify({"error": str(e)}), 500 | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| def finddata(): | |
| pdfLink = 'https://www.dropbox.com/scl/fi/hnp4mqigb51a5kp89kgfa/00801-ARC-20-ZZ-S-A-0002.pdf?rlkey=45abeoebzqw4qwnslnei6dkd6&st=m4yrcjm2&dl=1' | |
| keyword = ['115 INTEGRATED MRI ROOM LININGS', '310 ACCURACY'] | |
| return pdfLink, keyword | |
| ######################################################### LEGEND DIRECTORY ################################################################################################################################# | |
| def legendDirectory(): | |
| try: | |
| return render_template('legendDirectory.html') | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| #Google sheet links | |
| #####################not added in gui yettt | |
| def getlinkscreated(jsdata): | |
| try: | |
| spreadsheet_service,drive_service,gc= google_sheet_Legend.authorizeLegend() | |
| ids=gc.spreadsheet_ids() | |
| titles=gc.spreadsheet_titles() | |
| allpaths=[] | |
| print('HEREEEEEEEEEE') | |
| # for title in titles: | |
| for i in range(0,len(titles)): | |
| print('titles',titles[i]) | |
| if not (titles[i].startswith('API') or (titles[i].startswith('Dropbox')) ) : | |
| ws=gc.open(titles[i]) | |
| path_metadata = ws.get_developer_metadata('path') | |
| print(path_metadata) | |
| allpaths.append([titles[i], ws.get_developer_metadata('path')[0].value , drive_service.files().get(fileId=ids[i],fields="createdTime, modifiedTime").execute() ,ids[i] ]) | |
| return jsonify(allpaths) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### MC NAMES RETRIEVAL ################################################################################################################################# | |
| def CallAPIforMCTNames(): | |
| # print(jsdata) | |
| try: | |
| DictionaryOfTemplates=MC_Templates_API.RetrieveMC_Templates_API() | |
| # jsdata=jsdata.replace('"', '') | |
| print('here') | |
| return jsonify(DictionaryOfTemplates) #[str(jsdata).lower()] | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| ######################################################### HELPER FUNCTIONS: MARTHE (1.0) ################################################################################################################################# | |
| #Hex value to RGB value | |
| def hexRGB(color): | |
| try: | |
| color=color.lstrip('#') | |
| color= tuple(int(color[i:i+2], 16) for i in (0, 2, 4)) #hex to rgb | |
| color=list(color) #rgb to bgr | |
| return color | |
| except Exception as e: | |
| # capture traceback for more detail | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details) | |
| return jsonify({"error": error_msg}), 500 | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| ######################################################### GUI: UPDATE IMAGE PREVIEW ################################################################################################################################# | |
| def getfromdropboxImg(jsdata): | |
| try: | |
| # vv = eval(request.form.get('pdflink')) | |
| pdfpath='' | |
| # jsdata=jsdata.replace('"', '') | |
| jsdata=eval(jsdata) | |
| print('pdfnameeee==',jsdata) | |
| dbPath='/TSA JOBS/ADR Test/'+jsdata[0][0]+'/'+jsdata[0][1]+'/'+jsdata[0][2]+'/Measured Plan/'+jsdata[1] | |
| print(dbPath) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| md, res =dbxTeam.files_download(path=dbPath) | |
| data = res.content | |
| doc = fitz.open("pdf",data) | |
| page=doc[0] | |
| pix = page.get_pixmap() # render page to an image | |
| pl=Image.frombytes('RGB', [pix.width,pix.height],pix.samples) | |
| img=np.array(pl) | |
| img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) | |
| _, buffer = cv2.imencode('.png', img) | |
| return base64.b64encode(buffer).decode('utf-8') | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### MAIN FUNCTION WHEN USER CLICKS MEASURE BUTTON ################################################################################################################################# | |
| def stringToRGB(): | |
| try: | |
| vv = eval(request.form.get('answers')) | |
| # vv = json.loads(answers) # JSON null -> Python None, true->True, false->False | |
| # print("vv for 2.7=", vv) | |
| if type(vv[0]) == list: | |
| vv[0] = vv[0][0] | |
| if vv[0].startswith('http'): | |
| if ( vv[5].startswith('3.2') or vv[5].startswith('3.3') or vv[5].startswith('2.7')) : | |
| print('3.2 section') | |
| # pdflink=vv[0] | |
| # encoded_url = request.args.get('url') | |
| link = urllib.parse.unquote(vv[0].strip('"')) | |
| if link and ('http' in link or 'dropbox' in link): | |
| if 'dl=0' in link: | |
| link = link.replace('dl=0', 'dl=1') | |
| elif 'www.dropbox.com' in link and '?dl=1' not in link: | |
| link += '?dl=1' | |
| response = requests.get(link) | |
| pdf_content = BytesIO(response.content) | |
| if not pdf_content: | |
| raise ValueError("No valid PDF content found.") | |
| if 'file' not in request.files: | |
| print('error, No file part in the request') | |
| else: | |
| file = request.files['file'] | |
| print('file done, measuring') | |
| arr=measureproject(result=vv,dxffile=file,pdf_content=pdf_content) | |
| return jsonify(arr) | |
| if vv[5].startswith('2.8') or vv[5].startswith('2.6'): | |
| # or vv[5].startswith('2.1') | |
| #print(f"el mawgood fe vv[0]: {vv[0]}") | |
| # arr_s = [] | |
| # dataDocs = [] | |
| # pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF= str(v)) | |
| link = urllib.parse.unquote(vv[0].strip('"')) | |
| if link and ('http' in link or 'dropbox' in link): | |
| if 'dl=0' in link: | |
| link = link.replace('dl=0', 'dl=1') | |
| elif 'www.dropbox.com' in link and '?dl=1' not in link: | |
| link += '?dl=1' | |
| response = requests.get(link) | |
| pdf_content = BytesIO(response.content) | |
| doc = fitz.open(stream=pdf_content, filetype="pdf") | |
| # doc = fitz.open("pdf",dataDoc) | |
| page=doc[0] | |
| if page.rotation!=0: | |
| page.set_rotation(0) | |
| pix = page.get_pixmap(dpi=300) # render page to an image | |
| pl=Image.frombytes('RGB', [pix.width,pix.height],pix.samples) | |
| if 'file' not in request.files: | |
| print('error, No file part in the request') | |
| else: | |
| csvfile = request.files['csvfile'] | |
| print('csvfile done, measuring') | |
| arr=measureproject(result=vv,img=pl,pdf_content=pdf_content,csvfile=csvfile) | |
| else: | |
| if ( vv[5][2].startswith('3.2') or vv[5][2].startswith('3.3') or vv[5][2].startswith('2.7')) : | |
| print('3.2 section') | |
| pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=vv[0]) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| md, res =dbxTeam.files_download(path=pdfpath) | |
| dataDoc = res.content | |
| if 'file' not in request.files: | |
| print('error, No file part in the request') | |
| else: | |
| file = request.files['file'] | |
| print('file done, measuring') | |
| arr=measureproject(vv,dataDoc,0,file) | |
| return jsonify(arr) | |
| if vv[5][2].startswith('1.0'): | |
| opencv_img,dataDoc = plan2img( str(vv[0]) ) | |
| if vv[1]==220: | |
| imgdata = base64.b64decode(vv[6]) | |
| img=Image.open(io.BytesIO(imgdata)) | |
| opencv_img= cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) | |
| arr=measureproject(vv,dataDoc,opencv_img) | |
| if vv[5][2].startswith('2.1'): | |
| #vv = eval(request.form.get('answers')) | |
| #print(f"el mawgood fe vv[0] fe 2.1: {vv[0]}") | |
| #print(f"el mawgood fe vv[6] fe 2.1: {vv[6]}") | |
| #print(f"el mawgood fe vv fe 2.1: {vv}") | |
| #answers = json.loads(request.form['answers']) | |
| data_url = vv[6] # "data:image/png;base64,..." | |
| header, b64 = data_url.split(',', 1) | |
| segmented_img_bytes = base64.b64decode(b64) | |
| pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF= str(vv[0])) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| md, res =dbxTeam.files_download(path=pdfpath) | |
| dataDoc = res.content | |
| opencv_img,_ = convert2img2_1(str(vv[0])) | |
| #imgdata = base64.b64decode(vv[6]) | |
| #img=Image.open(io.BytesIO(imgdata)) | |
| # opencv_img= cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) | |
| arr=measureproject(vv,dataDoc,segmented_img_bytes) | |
| if vv[5][2].startswith('2.8') or vv[5][2].startswith('2.6'): | |
| vv = eval(request.form.get('answers')) | |
| print(f"el mawgood fe vv[0]: {vv[0]}") | |
| arr_s = [] | |
| dataDocs = [] | |
| pls = [] | |
| for v in vv[0]: | |
| pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF= str(v)) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| md, res =dbxTeam.files_download(path=pdfpath) | |
| dataDoc = res.content | |
| dataDocs.append(dataDoc) | |
| doc = fitz.open("pdf",dataDoc) | |
| page=doc[0] | |
| if page.rotation!=0: | |
| page.set_rotation(0) | |
| pix = page.get_pixmap(dpi=300) # render page to an image | |
| pl=Image.frombytes('RGB', [pix.width,pix.height],pix.samples) | |
| pls.append(pl) | |
| arr=measureproject(vv,dataDocs,pls) | |
| # ---- CSV FILE ----# | |
| #if 'file' not in request.files: | |
| # print('error, No file part in the request') | |
| # arr=measureproject(vv,dataDocs,pls) | |
| #else: | |
| # csvfile = request.files['csvfile'] | |
| # print('csvfile done, measuring') | |
| #arr=measureproject(vv,dataDocs,pls,csvfile=csvfile) | |
| #pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF= str(vv[0])) | |
| #dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| #md, res =dbxTeam.files_download(path=pdfpath) | |
| #dataDoc = res.content | |
| #doc = fitz.open("pdf",dataDoc) | |
| #page=doc[0] | |
| #if page.rotation!=0: | |
| # page.set_rotation(0) | |
| #pix = page.get_pixmap(dpi=300) # render page to an image | |
| #pl=Image.frombytes('RGB', [pix.width,pix.height],pix.samples) | |
| #arr=measureproject(vv,dataDoc,pl) | |
| return jsonify(arr) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| def measure2_1(): | |
| try: | |
| name = request.get_json() | |
| result=name.get('allvalues') | |
| arr=measureproject(result) | |
| return arr | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| #MAIN FUNCTION -- calls python code to measure the chosen plan from the interface | |
| # @app.route('/projecttomeasure/<jsdata>',methods=["GET","POST"]) | |
| def measureproject(result,dataDoc=0,img=0,dxffile=0,pdf_content=0,csvfile=0): | |
| try: | |
| colorarr=[] | |
| global pdflink | |
| pdfpath='/' | |
| # result = json.loads(jsdata) | |
| if pdf_content: | |
| section=result[5] | |
| pdfpath+='testinglink/' | |
| else: | |
| section=result[5][2] | |
| for word in result[5]: | |
| pdfpath+=word +'/' | |
| ################################# -1.0- ################################# | |
| arr=[] | |
| if section.startswith('1.0'): #section value - 1.0 substructure- pile caps | |
| for item in result[2]: | |
| # item1 ='#'+item | |
| c=hexRGB(item) | |
| colorarr.append(c) | |
| print('RATIOS=',result[3], result[4]) | |
| # cv2.imwrite(img,'imgg.png') | |
| # global pdflink | |
| imgPerimeter1,image_new1,SimilarAreaDictionary , colorsUsed,spreadsheet_url, spreadsheetId,list1, pdflink, areas_Perimeters, namepathArr =pilecaps_adr.drawAllContours(dataDoc,img,result[1],colorarr, result[3], result[4], result[0],pdfpath) | |
| _, buffer = cv2.imencode('.png', image_new1) | |
| arr=[base64.b64encode(buffer).decode('utf-8'),SimilarAreaDictionary.to_dict(),spreadsheet_url , spreadsheetId,colorsUsed,list1.to_dict(), pdflink, areas_Perimeters, namepathArr] | |
| ################################# -3.2- ################################# | |
| # elif result[5][2].startswith('3.2'): #section value - 3.2 floor finishes | |
| # print('IN HEREEEE 3.2') | |
| # dxfpath=dxffile.read() | |
| # with tempfile.NamedTemporaryFile(suffix='.dxf', delete=False) as temp_file: | |
| # temp_file.write(dxfpath) | |
| # temp_filename = temp_file.name | |
| # print(temp_filename) | |
| # doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas=dxf__omar3_2.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4] , pdfpath,result[0]) | |
| # dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' | |
| # pdflink= tsadropboxretrieval.uploadanyFile(doc=doc,path=dbPath,pdfname=result[0]) #doc=doc,pdfname=path,pdfpath=pdfpath+'Measured Plan/' | |
| # _, buffer = cv2.imencode('.png', outputimg) | |
| # arr=[ base64.b64encode(buffer).decode('utf-8'),SimilarAreaDictionary.to_dict(), spreadsheet_url,spreadsheetId,[],list1.to_dict(),pdflink,hatched_areas,namepathArr]# , spreadsheetId, spreadsheet_url , list1.to_dict()] | |
| ################################# -3.3 or 3.2- ################################# | |
| elif section.startswith('3.3') or section.startswith('3.2'): #section value - 3.2 floor finishes | |
| print('code of 3.3 and 3.2') | |
| dxfpath=dxffile.read() | |
| with tempfile.NamedTemporaryFile(suffix='.dxf', delete=False) as temp_file: | |
| temp_file.write(dxfpath) | |
| temp_filename = temp_file.name | |
| print(temp_filename) | |
| CorrectionRatio=result[9] | |
| print("result = ",result) | |
| SearchArray=result[6] | |
| parsed_url = urlparse(result[0]) | |
| CollectedColors=result[7] | |
| print("CollectedColors = ",CollectedColors) | |
| filename = parsed_url.path.split('/')[-1] | |
| print(filename) | |
| nameofpdf=filename | |
| points_Of_drawing_Canvas=False | |
| if len(result[8])>0: | |
| points_Of_drawing_Canvas=drawonpdf(nameofpdf,result[8]) ########################## points of manual drawing hereee | |
| if pdf_content: | |
| if section.startswith('3.3'): | |
| doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas , bax_pretty_xml, column_xml=deploying_3_3.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4] ,SearchArray,CorrectionRatio,CollectedColors,points_Of_drawing_Canvas,pdfpath,result[0],pdf_content) | |
| else: | |
| doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas, bax_pretty_xml, column_xml=dxf__omar3_2.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4] ,SearchArray,CorrectionRatio,CollectedColors,points_Of_drawing_Canvas,pdfpath,result[0],pdf_content) | |
| else: | |
| if section.startswith('3.3'): | |
| doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas , bax_pretty_xml, column_xml=deploying_3_3.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4] ,SearchArray,CorrectionRatio,CollectedColors,points_Of_drawing_Canvas,pdfpath,result[0]) | |
| else: | |
| doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas, bax_pretty_xml, column_xml=dxf__omar3_2.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4] ,SearchArray,CorrectionRatio,CollectedColors,points_Of_drawing_Canvas,pdfpath,result[0]) | |
| global colorsused | |
| colorsused=list(SimilarAreaDictionary['Color']) | |
| dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' | |
| print(dbPath,result[0]) | |
| pdflink= tsadropboxretrieval.uploadanyFile(doc=doc,path=dbPath,pdfname=nameofpdf) | |
| _, buffer = cv2.imencode('.png', outputimg) | |
| # if section.startswith('3.3'): | |
| # arr=[ base64.b64encode(buffer).decode('utf-8'),SimilarAreaDictionary.to_dict(), spreadsheet_url,spreadsheetId,[],list1.to_dict(),pdflink,hatched_areas,namepathArr]#,hatched_areas,namepathArr]# , spreadsheetId, spreadsheet_url , list1.to_dict()] | |
| # else: | |
| bax_b64 = base64.b64encode(bax_pretty_xml.encode("utf-8")).decode("ascii") #base64.b64encode(bax_pretty_xml).decode("ascii") | |
| xml_b64 = base64.b64encode(column_xml.encode("utf-8")).decode("ascii") | |
| bax_link= tsadropboxretrieval.upload_string_file(content_str=bax_pretty_xml,filename="baxfile.bax",path=dbPath) | |
| xml_link= tsadropboxretrieval.upload_string_file(content_str=column_xml,filename="customCols.xml",path=dbPath) | |
| arr=[ base64.b64encode(buffer).decode('utf-8'),SimilarAreaDictionary.to_dict(), spreadsheet_url,spreadsheetId,[],list1.to_dict(),pdflink,hatched_areas,namepathArr ,bax_b64,xml_b64, bax_link,xml_link] | |
| ################################# 2.7- ################################# | |
| elif section.startswith('2.7') : #section value - 2.7 floor finishes | |
| print('code of 2.7') | |
| dxfpath=dxffile.read() | |
| with tempfile.NamedTemporaryFile(suffix='.dxf', delete=False) as temp_file: | |
| temp_file.write(dxfpath) | |
| temp_filename = temp_file.name | |
| print(temp_filename) | |
| CorrectionRatio=result[10] | |
| SearchArray=result[6] | |
| CollectedColors=result[7] | |
| print("CollectedColors in app.py = ",CollectedColors) | |
| Thickness=result[8] | |
| print("result[9] = ",result[9]) | |
| parsed_url = urlparse(result[0]) | |
| filename = parsed_url.path.split('/')[-1] | |
| print(filename) | |
| nameofpdf=filename | |
| points_Of_drawing_Canvas=False | |
| if (len(result[9]) >0): | |
| points_Of_drawing_Canvas=drawonpdf(nameofpdf,result[9]) ########################## points of manual drawing hereee | |
| print("result for 2.7 = ",result) | |
| print("SearchArray = ",SearchArray) | |
| global hatched_areas2_7 | |
| if pdf_content: | |
| doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas,bax_pretty_xml,column_xml=Code_2_7.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4],SearchArray,CorrectionRatio,CollectedColors,points_Of_drawing_Canvas,Thickness, pdfpath,result[0],pdf_content) | |
| else: | |
| doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas,bax_pretty_xml,column_xml=Code_2_7.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4],SearchArray,CorrectionRatio,CollectedColors,points_Of_drawing_Canvas,Thickness, pdfpath,result[0]) | |
| # global colorsused | |
| hatched_areas2_7=hatched_areas | |
| colorsused=list(SimilarAreaDictionary['Color']) | |
| dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' | |
| print(dbPath,result[0]) | |
| pdflink= tsadropboxretrieval.uploadanyFile(doc=doc,path=dbPath,pdfname=nameofpdf) #doc=doc,pdfname=path,pdfpath=pdfpath+'Measured Plan/' | |
| _, buffer = cv2.imencode('.png', outputimg) | |
| bax_b64 = base64.b64encode(bax_pretty_xml.encode("utf-8")).decode("ascii") #base64.b64encode(bax_pretty_xml).decode("ascii") | |
| xml_b64 = base64.b64encode(column_xml.encode("utf-8")).decode("ascii") | |
| bax_link= tsadropboxretrieval.upload_string_file(content_str=bax_pretty_xml,filename="baxfile.bax",path=dbPath) | |
| xml_link= tsadropboxretrieval.upload_string_file(content_str=column_xml,filename="customCols.xml",path=dbPath) | |
| arr=[ base64.b64encode(buffer).decode('utf-8'),SimilarAreaDictionary.to_dict(), spreadsheet_url,spreadsheetId,[],list1.to_dict(),pdflink,[],namepathArr,bax_b64,xml_b64, bax_link,xml_link]#,hatched_areas,namepathArr]# , spreadsheetId, spreadsheet_url , list1.to_dict()] | |
| # arr=[ base64.b64encode(buffer).decode('utf-8'),SimilarAreaDictionary.to_dict(), spreadsheet_url,spreadsheetId,[],list1.to_dict(),pdflink,hatched_areas,namepathArr]#,hatched_areas,namepathArr]# , spreadsheetId, spreadsheet_url , list1.to_dict()] | |
| ################################# -2.8- ################################# | |
| elif section.startswith('2.8') or section.startswith('2.6'): #section value - 2.8 floor finishes | |
| #vv[0] array of pdf names | |
| imgss=[] | |
| dpxlinks=[] | |
| legendLinks=[] | |
| listofmarkups=[] | |
| SearchArray=result[7] | |
| print('searchhh:',SearchArray) | |
| print('csv',csvfile) | |
| CSV_UPLOAD_DIR = os.path.join(os.path.dirname(__file__), "uploaded_csv") | |
| # annotatedimg,pdf_document,spreadsheet_url, list1 , df_doors=doors_fasterrcnn.main_run(img,dataDoc,'separated_classes_all.pth',result[0],pdfpath, result[4]) #single_double.pth | |
| if pdf_content: | |
| link = urllib.parse.unquote(result[6].strip('"')) | |
| if link and ('http' in link or 'dropbox' in link): | |
| if 'dl=0' in link: | |
| link = link.replace('dl=0', 'dl=1') | |
| elif 'www.dropbox.com' in link and '?dl=1' not in link: | |
| link += '?dl=1' | |
| response = requests.get(link) | |
| pdf_contentSched = BytesIO(response.content) | |
| # pdf_contentSched = fitz.open(stream=pdf_contentSched, filetype="pdf") | |
| annotatedimgs, pdf_document , list1, repeated_labels , not_found, bax_pretty_xml, column_xml =Doors_Schedule.mainRun(pdf_contentSched, dataDoc, SearchArray,pdf_content,pdf_contentSched) | |
| else: | |
| sch_csv_pdf = False | |
| file_names = result[6] # ["7376....csv", "something.csv"] in CSV mode or PDF names in PDF mode | |
| if not file_names: | |
| raise ValueError("No schedule files provided in result[6].") | |
| first_name = str(file_names[0]).lower() | |
| if first_name.endswith(".csv"): | |
| # 🔹 CSV MODE | |
| # Ensure folder exists | |
| os.makedirs(CSV_UPLOAD_DIR, exist_ok=True) | |
| # Grab uploaded CSV files from the request | |
| # name="csvFilename" from your <input type="file"> | |
| uploaded_csvs = request.files.getlist("csvFilename") | |
| saved_paths = [] | |
| for f in uploaded_csvs: | |
| if not f.filename: | |
| continue | |
| safe_name = secure_filename(f.filename) | |
| save_path = os.path.join(CSV_UPLOAD_DIR, safe_name) | |
| f.save(save_path) | |
| saved_paths.append(save_path) | |
| annotatedimgs, pdf_document, list1, repeated_labels, not_found, bax_pretty_xml, column_xml = Doors_Schedule.mainRun( | |
| saved_paths, # 👉 ["file1.csv", "file2.csv"] | |
| dataDoc, | |
| SearchArray, | |
| sch_csv_pdf | |
| ) | |
| else: | |
| dataDocDoorsSchedule = [] | |
| sch_csv_pdf = True | |
| for r in result[6]: | |
| pdfpathDoors,_=tsadropboxretrieval.getPathtoPDF_File(nameofPDF= r) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| md, resDoors =dbxTeam.files_download(path=pdfpathDoors) | |
| dataDocDoorsSchedule.append(resDoors.content) | |
| annotatedimgs, pdf_document , list1, repeated_labels , not_found, bax_pretty_xml, column_xml =Doors_Schedule.mainRun(dataDocDoorsSchedule, dataDoc, SearchArray, sch_csv_pdf) | |
| dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' | |
| pdflink= tsadropboxretrieval.uploadanyFile(doc=pdf_document,path=dbPath,pdfname="combined_output.pdf") #doc=doc,pdfname=path,pdfpath=pdfpath+'Measured Plan/ | |
| repeatedLabelsReturn='' | |
| NotFoundReturn='' | |
| if len(repeated_labels)>0: | |
| repeatedLabelsReturn=repeated_labels | |
| if len(not_found)>0: | |
| NotFoundReturn=not_found # , spreadsheetId, spreadsheet_url , list1.to_dict()] | |
| annotatedimgsBuffered=[] | |
| for b in annotatedimgs: | |
| _, buffer = cv2.imencode('.png', b) | |
| # Convert buffer (ndarray) to bytes and then base64 string | |
| b64_str = base64.b64encode(buffer).decode('utf-8') | |
| annotatedimgsBuffered.append(b64_str) | |
| bax_b64 = base64.b64encode(bax_pretty_xml.encode("utf-8")).decode("ascii") #base64.b64encode(bax_pretty_xml).decode("ascii") | |
| xml_b64 = base64.b64encode(column_xml.encode("utf-8")).decode("ascii") | |
| dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' | |
| bax_link= tsadropboxretrieval.upload_string_file(content_str=bax_pretty_xml,filename="baxfile.bax",path=dbPath) | |
| xml_link= tsadropboxretrieval.upload_string_file(content_str=column_xml,filename="customCols.xml",path=dbPath) | |
| arr = [ | |
| annotatedimgsBuffered, | |
| pdflink, | |
| list1.to_dict(), | |
| str(repeatedLabelsReturn), | |
| str(NotFoundReturn), | |
| bax_b64, | |
| xml_b64, | |
| bax_link, | |
| xml_link | |
| ] | |
| ################################# -2.1- ################################# | |
| elif section.startswith('2.1'): #section value - 2.8 floor finishes | |
| #vv[0] array of pdf names | |
| imgss=[] | |
| dpxlinks=[] | |
| legendLinks=[] | |
| listofmarkups=[] | |
| data_url = result[6] # "data:image/png;base64,..." | |
| header, b64 = data_url.split(',', 1) | |
| segmented_img_bytes = base64.b64decode(b64) | |
| #annotatedimg,pdf_document,spreadsheet_url, list1 , df_doors = Counting_Columns_2_1.mainfun(dataDoc, segmented_img_bytes) | |
| # dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' | |
| # pdflink= tsadropboxretrieval.uploadanyFile(doc=pdf_document,path=dbPath,pdfname=result[0]) #doc=doc,pdfname=path,pdfpath=pdfpath+'Measured Plan/ | |
| # _, buffer = cv2.imencode('.png', annotatedimg) | |
| #arr=[base64.b64encode(buffer).decode('utf-8') ,pdflink,spreadsheet_url,list1.to_dict(), df_doors.to_dict()]# , spreadsheetId, spreadsheet_url , list1.to_dict()] | |
| output_dict = Counting_Columns_2_1.mainfun(dataDoc, segmented_img_bytes) | |
| arr = output_dict | |
| ################################# -2.2- ################################# | |
| # elif result[5][2].startswith('2.2'): #section value - 2.2 rc slabs | |
| #add here python code | |
| # | |
| #link (add this to ur code) | |
| # pdflink= db.dropbox_upload_file(doc=doc,pdfname=path,pdfpath=pdfpath) | |
| # gc,spreadsheet_service,spreadsheetId, spreadsheet_url , namepathArr= pilecaps_adr.legendGoogleSheets(df,plan ,pdfpath) | |
| #output img | |
| # _, buffer = cv2.imencode('.png', outputimg) | |
| #let first result to be the img | |
| #return results in arr=[base64.b64encode(buffer).decode('utf-8'),pdflink,spreadsheetId,spreadsheet_url] like the previous sections in the above lines | |
| # elif result[5][2].startswith('2.1'): #section value - 2.1 frames | |
| # url = tameem2_1.mainfun( result[0], pdfpath) | |
| # return jsonify([url]) | |
| return arr | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| ######################################################### HELPER FUNCTIONS: MARTHE/OMAR ################################################################################################################################# | |
| def drawonpdf(nameofpdf,coords): | |
| try: | |
| pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=nameofpdf) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| md, res =dbxTeam.files_download(path=pdfpath) | |
| data = res.content | |
| doc = fitz.open("pdf",data) | |
| page=doc[0] | |
| for shape in coords: | |
| if not isinstance(shape, dict): | |
| continue | |
| points_list = shape.get("coordinates", []) | |
| if not isinstance(points_list, list) or len(points_list) < 2: | |
| continue | |
| # Extract coordinates | |
| vertices = [(p["x"], p["y"]) for p in points_list if "x" in p and "y" in p] | |
| # Convert to fitz.Point objects and apply derotation | |
| points = [fitz.Point(x, y) * page.derotation_matrix for x, y in vertices] | |
| # --- 🟢 Use polygon annotation --- | |
| if len(points) > 2: | |
| annot = page.add_polygon_annot(points) | |
| else: | |
| annot = page.add_polyline_annot(points) | |
| # Style annotation | |
| annot.set_colors(stroke=(1, 0, 0)) # red border | |
| annot.set_border(width=1) | |
| annot.update() | |
| # doc.save("output.pdf") | |
| return points | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| def callOmarLegend(): | |
| # TEXT DATA | |
| nameofPDF = request.form.get("nameofPDF") | |
| print('nameofPDF',nameofPDF) | |
| groupedValues = json.loads(request.form.get("groupedValues")) | |
| print('groupedValues',groupedValues) | |
| # FILE (DXF) | |
| if "file" not in request.files: | |
| return jsonify({"error": "No file received"}), 400 | |
| file = request.files["file"] | |
| with tempfile.NamedTemporaryFile(suffix=".dxf", delete=False) as temp_file: | |
| temp_file.write(file.read()) | |
| temp_filename = temp_file.name | |
| # Get PDF from Dropbox | |
| pdfpath, pdflink = tsadropboxretrieval.getPathtoPDF_File(nameofPDF=nameofPDF) | |
| dbxTeam = tsadropboxretrieval.ADR_Access_DropboxTeam("user") | |
| md, res = dbxTeam.files_download(path=pdfpath) | |
| dataDoc = res.content | |
| # Run your legend detection | |
| colorsArray = Legend_Detection.Legend_Detection(dataDoc,temp_filename,groupedValues) | |
| print('colorsArray',colorsArray) | |
| return jsonify(colorsArray) | |
| ######################################################### GUI: CANVAS ################################################################################################################################# | |
| def pdftoimgCanvas(jsdata): | |
| try: | |
| img=plan2img(jsdata)[0] | |
| _, buffer = cv2.imencode('.png', img) | |
| arr=[base64.b64encode(buffer).decode('utf-8') , img.shape[0],img.shape[1]] | |
| return jsonify(arr) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| def pdftoimgCanvas2(jsdata): | |
| try: | |
| img=convert2img2_1(jsdata)[0] | |
| _, buffer = cv2.imencode('.png', img) | |
| arr=[base64.b64encode(buffer).decode('utf-8') , img.shape[0],img.shape[1]] | |
| return jsonify(arr) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| #pdf to img | |
| ######################################################### HELPER FUNCTIONS: MARTHE ################################################################################################################################# | |
| def plan2img(nameofpdf): | |
| try: | |
| pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=nameofpdf) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| md, res =dbxTeam.files_download(path=pdfpath) | |
| data = res.content | |
| doc = fitz.open("pdf",data) | |
| page=doc[0] | |
| # if page.rotation!=0: | |
| # page.set_rotation(0) | |
| pix = page.get_pixmap() # render page to an image | |
| pl=Image.frombytes('RGB', [pix.width,pix.height],pix.samples) | |
| img=np.array(pl) | |
| img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) | |
| return img ,data | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| ######################################################### HELPER FUNCTIONS: TAMEEM ################################################################################################################################# | |
| img_cv2 = None | |
| segmented_img = None | |
| current_hsv = { | |
| 'h_min': 0, 'h_max': 179, | |
| 's_min': 0, 's_max': 255, | |
| 'v_min': 0, 'v_max': 255 | |
| } | |
| def convert2img2_1(nameofpdf): | |
| try: | |
| global img_cv2 | |
| pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=nameofpdf) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| md, res =dbxTeam.files_download(path=pdfpath) | |
| data = res.content | |
| pdf = pdfium.PdfDocument(data) | |
| page = pdf.get_page(0) | |
| pil_image = page.render().to_pil() | |
| pl1=np.array(pil_image) | |
| img = cv2.cvtColor(pl1, cv2.COLOR_RGB2BGR) | |
| img_cv2 = img | |
| return img, data | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| def convert2img(path): | |
| try: | |
| pdf = pdfium.PdfDocument(path) | |
| page = pdf.get_page(0) | |
| pil_image = page.render().to_pil() | |
| pl1=np.array(pil_image) | |
| img = cv2.cvtColor(pl1, cv2.COLOR_RGB2BGR) | |
| return img | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### FRONTEND ROUTES: TAMEEM ################################################################################################################################# | |
| def click(): | |
| try: | |
| global img_cv2, current_hsv | |
| if img_cv2 is None: | |
| return jsonify({'error': 'No image loaded'}), 400 | |
| data = request.json | |
| x, y = int(data['x']), int(data['y']) | |
| b, g, r = img_cv2[y, x] | |
| hsv = cv2.cvtColor(np.uint8([[[b, g, r]]]), cv2.COLOR_BGR2HSV)[0][0] | |
| h, s, v = hsv.tolist() | |
| current_hsv = { | |
| 'h_min': h, 'h_max': h, | |
| 's_min': s, 's_max':s, | |
| 'v_min': v, 'v_max': v | |
| } | |
| return jsonify({ | |
| 'info': f'RGB: ({r}, {g}, {b}) - HSV: ({h}, {s}, {v})', | |
| 'hMin': current_hsv['h_min'], 'hMax': current_hsv['h_max'], | |
| 'sMin': current_hsv['s_min'], 'sMax': current_hsv['s_max'], | |
| 'vMin': current_hsv['v_min'], 'vMax': current_hsv['v_max'] | |
| }) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### FRONTEND ROUTES: TAMEEM ################################################################################################################################# | |
| def update_mask(): | |
| try: | |
| global img_cv2, segmented_img, current_hsv | |
| if img_cv2 is None: | |
| return jsonify({'error': 'No image uploaded yet'}), 400 | |
| data = request.get_json(force=True) or {} | |
| current_hsv = { | |
| 'h_min': int(data.get('hMin', 0)), 'h_max': int(data.get('hMax', 179)), | |
| 's_min': int(data.get('sMin', 0)), 's_max': int(data.get('sMax', 255)), | |
| 'v_min': int(data.get('vMin', 0)), 'v_max': int(data.get('vMax', 255)), | |
| } | |
| hsv = cv2.cvtColor(img_cv2, cv2.COLOR_BGR2HSV) | |
| lower = np.array([current_hsv['h_min'], current_hsv['s_min'], current_hsv['v_min']], dtype=np.uint8) | |
| upper = np.array([current_hsv['h_max'], current_hsv['s_max'], current_hsv['v_max']], dtype=np.uint8) | |
| mask = cv2.inRange(hsv, lower, upper) | |
| segmented_img = cv2.bitwise_and(img_cv2, img_cv2, mask=mask) | |
| # Encode segmented image to PNG -> base64 | |
| ok, buf = cv2.imencode('.png', segmented_img) | |
| if not ok: | |
| return jsonify({'error': 'PNG encode failed'}), 500 | |
| b64 = base64.b64encode(buf).decode('utf-8') | |
| data_url = 'data:image/png;base64,' + b64 | |
| return jsonify({ | |
| 'image_data': data_url | |
| }) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### FRONTEND ROUTES: TAMEEM ################################################################################################################################# | |
| def img_to_click(blob): | |
| try: | |
| pdf = fitz.open("pdf", blob) | |
| page = pdf.get_page(0) | |
| pil_image = page.render().to_pil() | |
| pl1=np.array(pil_image) | |
| img = cv2.cvtColor(pl1, cv2.COLOR_RGB2BGR) | |
| _, buffer = cv2.imencode('.png', img) | |
| return base64.b64encode(buffer).decode('utf-8') | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### FRONTEND ROUTES: MARTHE ################################################################################################################################# | |
| #User-made MC-T NAME - in the second part of the interface | |
| def sendmctnametoLegend(jsdata): | |
| try: | |
| result = json.loads(jsdata) | |
| print(result) | |
| global pdflink | |
| summaryid=google_sheet_Legend.mapnametoLegend(result,colorsused,pdflink) | |
| allreturns=[summaryid] | |
| return jsonify(allreturns) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| def getguessedNames(): | |
| try: | |
| guessednamesDrpdwn=google_sheet_Legend.getallguessednames() | |
| return jsonify(guessednamesDrpdwn) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| ######################################################### HELPER FUNCTIONS: TAMEEM - NOT USED ################################################################################################################################# | |
| #NOT USED (pixel conversion instead) | |
| def calcRef(img): | |
| try: | |
| blk = np.ones(img.shape, dtype="uint8") * [[[np.uint8(0), np.uint8(0), np.uint8(0)]]] | |
| start_point = (50, 100) | |
| end_point = (120, 200) | |
| color = (255, 255, 255) # white BGR | |
| thickness = -1 # Thickness of -1 will fill the entire shape | |
| blk = cv2.rectangle(blk, start_point, end_point, color, thickness) | |
| blk = cv2.cvtColor(blk, cv2.COLOR_BGR2GRAY) | |
| contourzz, hierarchy = cv2.findContours(image=blk, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE) | |
| for i, cnt3 in enumerate(contourzz): | |
| M = cv2.moments(cnt3) | |
| if M['m00'] != 0.0: | |
| x2 = int(M['m10']/M['m00']) | |
| y2 = int(M['m01']/M['m00']) | |
| area = cv2.contourArea(cnt3) | |
| perimeter = cv2.arcLength(cnt3, True) | |
| return area,perimeter , blk | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| #NOT USED (pixel conversion instead) | |
| def modifyingcalcRefDynamic(img): | |
| try: | |
| imgcopy = img.copy() | |
| blk = np.ones(img.shape, dtype="uint8") * [[[np.uint8(0), np.uint8(0), np.uint8(0)]]] | |
| x = 50 | |
| y = 100 | |
| xD = int(img.shape[1] * 0.10) | |
| yD = int(img.shape[0] * 0.10) | |
| start_point = (x, y) | |
| end_point = (x+xD, y+yD) | |
| blue = (255, 0, 0) # BGR | |
| white = (255, 255, 255) # BGR | |
| thickness = -1 # Thickness of -1 will fill the entire shape | |
| imgcopy = cv2.rectangle(imgcopy, start_point, end_point, blue, thickness) | |
| blk = cv2.rectangle(blk, start_point, end_point, white, thickness) | |
| blk = cv2.cvtColor(blk, cv2.COLOR_BGR2GRAY) | |
| contourzz, hierarchy = cv2.findContours(image=blk, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE) | |
| for i, cnt3 in enumerate(contourzz): | |
| M = cv2.moments(cnt3) | |
| if M['m00'] != 0.0: | |
| x2 = int(M['m10']/M['m00']) | |
| y2 = int(M['m01']/M['m00']) | |
| area = cv2.contourArea(cnt3) | |
| perimeter = cv2.arcLength(cnt3, True) | |
| return area, perimeter, blk , imgcopy | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| ######################################################### DROPBOX CODES ################################################################################################################################# | |
| def downloadPDFfromLinkFunc(): | |
| try: | |
| encoded_url = request.args.get('url') | |
| link = urllib.parse.unquote(encoded_url) | |
| if link and ('http' in link or 'dropbox' in link): | |
| if 'dl=0' in link: | |
| link = link.replace('dl=0', 'dl=1') | |
| elif 'www.dropbox.com' in link and '?dl=1' not in link: | |
| link += '?dl=1' | |
| try: | |
| res = requests.get(link) | |
| res.raise_for_status() | |
| except Exception as e: | |
| return f"Error downloading PDF from link: {e}", 400 | |
| pdf_data = res.content | |
| filename = link.split("/")[-1].split("?")[0] or "downloaded.pdf" | |
| response = make_response(io.BytesIO(pdf_data).getvalue()) | |
| response.headers.set('Content-Type', 'application/pdf') | |
| response.headers.set('Content-Disposition', f'attachment; filename="{filename}"') | |
| return response | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| #get pdf dropbox url after measurement is done | |
| def calldropboxurl(jsdata): | |
| try: | |
| print('jsdata',jsdata) | |
| pdfurl=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=jsdata)[1] | |
| print('urll',pdfurl) | |
| if pdfurl and ('http' in pdfurl or 'dropbox' in pdfurl): | |
| if 'dl=0' in pdfurl: | |
| pdfurl = pdfurl.replace('dl=0', 'dl=1') | |
| print('urll1',pdfurl) | |
| # Download the PDF content from the shareable link | |
| response = requests.get(pdfurl) | |
| pdf_content = BytesIO(response.content) # Store the content in memory | |
| if pdf_content is None: | |
| raise ValueError("No valid PDF content found.") | |
| # Open the PDF using PyMuPDF | |
| pdf_document = fitz.open(stream=pdf_content, filetype="pdf") | |
| pdf_bytes = BytesIO() | |
| pdf_document.save(pdf_bytes) | |
| return Response(pdf_bytes.getvalue(), content_type='application/pdf') | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| def checkdropbox(): | |
| try: | |
| print('checkingggdf') | |
| dfFromDropbox=tsadropboxretrieval.DropboxItemstoDF("/TSA JOBS")[0] | |
| dfParquet=tsadropboxretrieval.GetParquetDF() | |
| dfParquet1 = dfParquet[['name', 'path_display', 'client_modified', 'server_modified']] | |
| deletedrows = pd.concat([dfFromDropbox, dfParquet1]).drop_duplicates(keep=False) | |
| deletedrows = deletedrows.reset_index(drop=True) | |
| deletedrows.columns = ['name', 'path_display', 'client_modified', 'server_modified'] | |
| differences = deletedrows[~deletedrows.isin(dfFromDropbox)].dropna() | |
| if (len(differences)>0): | |
| print(differences) | |
| dbxTeam=tsadropboxretrieval.dropbox_upload_file(dfFromDropbox) | |
| stringReturned= 'Updated Sucessfully.' | |
| else: | |
| stringReturned= 'Nothing to update.' | |
| return 'stringReturned' | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| def refreshDropboxRetrievals(extractedPath): | |
| try: | |
| dfFromDropbox = tsadropboxretrieval.DropboxItemstoDF(extractedPath)[0] | |
| dfParquet = tsadropboxretrieval.GetParquetDF() | |
| # print("Original Parquet size:", len(dfParquet)) | |
| # Keep only relevant columns | |
| dfParquet = dfParquet[['name', 'path_display', 'client_modified', 'server_modified']] | |
| dfFromDropbox = dfFromDropbox[['name', 'path_display', 'client_modified', 'server_modified']] | |
| # **Drop rows that start with extractedPath** | |
| dfParquetUpdated = dfParquet[~dfParquet['path_display'].str.startswith(extractedPath)] | |
| # **Append new data from Dropbox** | |
| dfParquetUpdated = pd.concat([dfParquetUpdated, dfFromDropbox], ignore_index=True) | |
| # print("Updated Parquet size:", len(dfParquetUpdated),dfParquetUpdated) | |
| # **Save the updated Parquet file** | |
| tsadropboxretrieval.dropbox_upload_file(dfParquetUpdated) | |
| if len(dfFromDropbox) > 0: | |
| print("Updated entries:", dfFromDropbox) | |
| return 'Updated Successfully.' | |
| else: | |
| return 'Nothing to update.' | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| def handle_path(encoded_path): | |
| try: | |
| decoded_path = urllib.parse.unquote(encoded_path) # Decode URL encoding | |
| extracted_path = json.loads(decoded_path) # Convert back to Python object | |
| print('path to refresh',extracted_path) | |
| stringReturned=refreshDropboxRetrievals(extracted_path) | |
| print(stringReturned) | |
| return stringReturned | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### GET PATH OF PDF ################################################################################################################################# | |
| def get_pdf(jsdata): | |
| try: | |
| print('pdfname',jsdata) | |
| # Download PDF from Dropbox | |
| pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=jsdata) | |
| print('pdfpath',pdfpath) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| md, res =dbxTeam.files_download(path=pdfpath) | |
| pdf_data = res.content # Ensure this is your correct PDF data from Dropbox | |
| response = make_response(io.BytesIO(pdf_data).getvalue()) | |
| response.headers.set('Content-Type', 'application/pdf') | |
| response.headers.set('Content-Disposition', 'attachment', filename='filename.pdf') # Replace with your desired file name | |
| # return response | |
| return response | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### PIXEL CONVERSION ################################################################################################################################# | |
| def pixeltesting(): | |
| try: | |
| encoded_url = request.args.get('url') | |
| link = urllib.parse.unquote(encoded_url.strip('"')) | |
| if link and ('http' in link or 'dropbox' in link): | |
| if 'dl=0' in link: | |
| link = link.replace('dl=0', 'dl=1') | |
| elif 'www.dropbox.com' in link and '?dl=1' not in link: | |
| link += '?dl=1' | |
| pdf_path=link | |
| response = requests.get(pdf_path) | |
| pdf_content = BytesIO(response.content) | |
| if not pdf_content: | |
| raise ValueError("No valid PDF content found.") | |
| progress_updates = [] # Shared list to store progress | |
| def generate_progressPixel(): | |
| yield f"data: 10\n\n" # Initial progress | |
| doc,areaPixel,perimeterPixel=pixelconversion.drawisrotated(pdf_content=pdf_content) # .openDrawPDF(data) | |
| yield f"data: 20\n\n" # Initial progress | |
| dbPath='/TSA JOBS/ADR Test/'+'TestingLinks'+'/'+'Scale Document' +'/' | |
| dburl=tsadropboxretrieval.uploadanyFile(doc=doc,pdfname=str('testinglink') ,path=dbPath) #PixelConv_ | |
| # areaPixel,perimeterPixel= pixelconversion.getAreaPerimeter(dbPath, str(jsdata[3])) | |
| yield f"data: 40\n\n" # Initial progress | |
| outputs=[areaPixel,perimeterPixel , dburl] | |
| while progress_updates: | |
| progress = progress_updates.pop(0) # Get the next progress value | |
| yield f"data: {progress}\n\n" | |
| # Final progress and result | |
| yield f"data: 80\n\n" | |
| yield f"data: 100\n\n" | |
| result = json.dumps(outputs) | |
| yield f"data: {result}\n\n" | |
| return Response(generate_progressPixel(), content_type='text/event-stream') | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| #PIXEL CONVERSION METHOD -- SAVES DOC ON DROPBOX TO BE MEASURED BY USER | |
| def getimg(jsdata): | |
| try: | |
| progress_updates = [] # Shared list to store progress | |
| jsdata=eval(jsdata) | |
| print('piexxxeell',jsdata) | |
| def generate_progressPixel(): | |
| yield f"data: 10\n\n" # Initial progress | |
| pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=jsdata[3], progress_callback=lambda p: progress_updates.append(p)) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| md, res =dbxTeam.files_download(path=pdfpath) | |
| data = res.content | |
| if str(jsdata[2]).startswith('1.0'): | |
| doc,areaPixel,perimeterPixel=pixelconversion.drawisrotated(data=data,dpi=300) # .openDrawPDF(data) | |
| else: | |
| doc,areaPixel,perimeterPixel=pixelconversion.drawisrotated(data=data) # .openDrawPDF(data) | |
| yield f"data: 20\n\n" # Initial progress | |
| dbPath='/TSA JOBS/ADR Test/'+jsdata[0]+'/'+jsdata[1]+'/'+jsdata[2]+'/'+'Scale Document' +'/' | |
| dburl=tsadropboxretrieval.uploadanyFile(doc=doc,pdfname=str(jsdata[3]) ,path=dbPath) #PixelConv_ | |
| # areaPixel,perimeterPixel= pixelconversion.getAreaPerimeter(dbPath, str(jsdata[3])) | |
| yield f"data: 40\n\n" # Initial progress | |
| outputs=[areaPixel,perimeterPixel , dburl] | |
| while progress_updates: | |
| progress = progress_updates.pop(0) # Get the next progress value | |
| yield f"data: {progress}\n\n" | |
| # Final progress and result | |
| yield f"data: 80\n\n" | |
| yield f"data: 100\n\n" | |
| result = json.dumps(outputs) | |
| yield f"data: {result}\n\n" | |
| return Response(generate_progressPixel(), content_type='text/event-stream') | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| ######################################################### DELETE MARKUPS ################################################################################################################################# | |
| #DELETE MARKUPS (for comparison) | |
| def getnewlegend(): | |
| try: | |
| pdfpth='' | |
| alljson = request.get_json() | |
| list1=alljson.get('dict1') | |
| print('list1',list1) | |
| # list1 = request.args.get('dict1') | |
| path=alljson.get('path') | |
| # path = request.args.get('path') | |
| spreadsheetId=alljson.get('spreadsheetId') | |
| # spreadsheetId =request.args.get('spreadsheetId') | |
| pdfpathpath=alljson.get('pdfpathpath') | |
| # pdfpathpath=request.args.get('pdfpathpath') | |
| print(pdfpathpath,type(pdfpathpath)) | |
| pdfname=request.args.get('pdfname') | |
| for word in eval(pdfpathpath): | |
| pdfpth+='/' +word | |
| pdfpth+='/' | |
| dbPath='/TSA JOBS/ADR Test'+pdfpth+'Measured Plan/' | |
| print(pdfpth) | |
| deletedrows1=google_sheet_Legend.deletemarkups(list1=list1,dbPath=dbPath,path=path) | |
| arr1=[deletedrows1.to_dict()] | |
| print('arr,',arr1) | |
| return jsonify(arr1) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| #--------------------------------------------------------------------------- | |
| #if user wishes to delete | |
| def dltmarkupslegend(): | |
| try: | |
| print('IN deletemarkupsroute') | |
| pdfpth='' | |
| alljson = request.get_json() | |
| SimilarAreaDictionary=alljson.get('dict') | |
| # SimilarAreaDictionary = request.args.get('dict') | |
| deletedrows=alljson.get('deletedrows') | |
| print('deletedrowsssssssssssssssssssssssssssssss',deletedrows) | |
| # deletedrows = request.args.get('deletedrows') | |
| path=alljson.get('path') | |
| # path = request.args.get('path') | |
| spreadsheetId=alljson.get('spreadsheetId') | |
| # spreadsheetId =request.args.get('spreadsheetId') | |
| areaPermArr=alljson.get('areaPermArr') | |
| print('aaaaaaaaaaaaa',areaPermArr) | |
| # areaPermArr=request.args.get('areaPermArr') | |
| section=alljson.get('section') | |
| # section=request.args.get('section') | |
| pdfpathpath=alljson.get('pdfpathpath') | |
| # pdfpathpath=request.args.get('pdfpathpath') | |
| for word in eval(pdfpathpath): | |
| pdfpth+='/' +word | |
| pdfpth+='/' | |
| # myDict=eval(deletedrows) | |
| deletedrows=pd.DataFrame(deletedrows) | |
| print('deletedrows',deletedrows) | |
| if section.startswith('2.7'): | |
| areaPermArr=hatched_areas2_7 | |
| if section.startswith('1.0') or section.startswith('3.2') or section.startswith('3.3'): | |
| newlgnd=google_sheet_Legend.deletefromlegend(deletedrows=deletedrows,SimilarAreaDictionarycopy=SimilarAreaDictionary, section=section,areaPermArr=areaPermArr) | |
| elif section.startswith('2.8') or section.startswith('2.6'): | |
| newlgnd=google_sheet_Legend.deletedoors(deletedrows,SimilarAreaDictionary) | |
| print('done wit 2.8 in deleting, didnt append yet ') | |
| else: | |
| newlgnd=google_sheet_Legend.deletefromlegend(deletedrows=deletedrows,SimilarAreaDictionarycopy=SimilarAreaDictionary, section=section) | |
| try: | |
| newlgnd=google_sheet_Legend.legendGoogleSheets(SimilarAreaDictionary=newlgnd,path=path,spreadsheetId=spreadsheetId ,pdfpath=pdfpth) #new legend | |
| except: | |
| print("An exception occurred") | |
| time.sleep(20) | |
| newlgnd=google_sheet_Legend.legendGoogleSheets(SimilarAreaDictionary=newlgnd,path=path,spreadsheetId=spreadsheetId,pdfpath=pdfpth) | |
| return jsonify('donee') | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### API ################################################################################################################################# | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| def returnAPITables(): | |
| try: | |
| # API.update_df() | |
| table1,table2,table3=API.GenerateTables() | |
| return jsonify([table1.to_dict(),table2.to_dict(),table3.to_dict()]) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| def checktables(): | |
| try: | |
| projectname = request.args.get('prjname') | |
| projectpart = request.args.get('prjpart') | |
| projectpartid = request.args.get('prjpartid') | |
| global cached_tables | |
| # ✅ Fix: check properly for missing/empty DataFrame | |
| if cached_tables["projects"] is None or cached_tables["projects"].empty: | |
| return jsonify(["No cached data available yet. Please fetch projects first."]) | |
| # Get fresh values from API | |
| new_projects, new_parts, new_sections = API.GenerateTables() | |
| returnString = None | |
| # 1) Compare project names | |
| old_names = cached_tables["projects"]["ProjectName"].tolist() | |
| new_names = new_projects["ProjectName"].tolist() | |
| if set(old_names) != set(new_names): | |
| added = list(set(new_names) - set(old_names)) | |
| removed = list(set(old_names) - set(new_names)) | |
| returnString = ["Changes in Projects", "project", {"added": added, "removed": removed}] | |
| # 2) Compare parts | |
| elif projectname and cached_tables["parts"] is not None and not cached_tables["parts"].empty: | |
| prjid = new_projects.loc[new_projects['ProjectName'] == projectname, 'ProjectId'].values[0] | |
| old_parts = cached_tables["parts"].loc[cached_tables["parts"]["ProjectId"] == prjid, "ProjectPart"].tolist() | |
| new_parts_list = new_parts.loc[new_parts["ProjectId"] == prjid, "ProjectPart"].tolist() | |
| if set(old_parts) != set(new_parts_list): | |
| added = list(set(new_parts_list) - set(old_parts)) | |
| removed = list(set(old_parts) - set(new_parts_list)) | |
| returnString = ["Changes in Parts", "part", {"added": added, "removed": removed}] | |
| # 3) Compare sections | |
| elif projectname and projectpart and projectpartid and cached_tables["sections"] is not None and not cached_tables["sections"].empty: | |
| prjid = new_projects.loc[new_projects['ProjectName'] == projectname, 'ProjectId'].values[0] | |
| old_sections = cached_tables["sections"]["ProjectSection"].tolist() | |
| new_sections_list = new_sections[ | |
| (new_sections["ProjectId"] == prjid) & | |
| (new_sections["ProjectPartId"] == int(projectpartid)) | |
| ]["ProjectSection"].tolist() | |
| if set(old_sections) != set(new_sections_list): | |
| added = list(set(new_sections_list) - set(old_sections)) | |
| removed = list(set(old_sections) - set(new_sections_list)) | |
| returnString = ["Changes in Sections", "section", {"added": added, "removed": removed}] | |
| if not returnString: | |
| returnString = ["No changes detected"] | |
| # ✅ update cache | |
| cached_tables["projects"] = new_projects | |
| cached_tables["parts"] = new_parts | |
| cached_tables["sections"] = new_sections | |
| return jsonify(returnString) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| def appendNewTables(): | |
| try:# value = request.args.get('keyword') | |
| # print(value) | |
| API.AppendtablestoSheets() | |
| return jsonify('appended') | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ######################################################### XML SUMMARY ################################################################################################################################# | |
| def cvtSummarytoXML(jsdata): | |
| try: | |
| path='/TSA JOBS/ADR Test/' | |
| result = json.loads(jsdata) | |
| for word in result[0]: | |
| path+=word +'/' | |
| print(path) | |
| path=path+'XML/' | |
| # print(result[0]) | |
| # print(result[1]) | |
| xmllink=google_sheet_to_xml.create_xml(documentname=result[1],dbPath=path) | |
| return jsonify(xmllink) | |
| except (ConnectionError, TimeoutError) as e: | |
| # Use app context when logging | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_msg = log_error(str(e), issue_type="connection") | |
| return jsonify({"error": error_msg}), 500 | |
| except Exception: | |
| from flask import current_app | |
| with current_app.app_context(): | |
| error_details = traceback.format_exc() | |
| error_msg = log_error(error_details, issue_type="backend") | |
| return jsonify({"error": error_msg}), 500 | |
| ################################################################################################################################################################################################ | |
| ######################################################### MAIN ################################################################################################################################# | |
| ################################################################################################################################################################################################ | |
| def runn(): | |
| # app.run(host="127.0.0.1") | |
| from gevent.pywsgi import WSGIServer | |
| http_server = WSGIServer(("0.0.0.0", 7860), app) | |
| http_server.serve_forever() | |
| # serve(app, host="127.0.0.1", port=8080) | |
| def keep_alive(): | |
| t=Thread(target=runn) | |
| t.start() | |
| dtn = datetime.datetime.now(datetime.timezone.utc) | |
| print(dtn) | |
| next_start = datetime.datetime(dtn.year, dtn.month, dtn.day, 21, 0, 0).astimezone(datetime.timezone.utc) #us - 2 = utc time (21 utc is 19:00 our time and 9 is 7 our time , it needs to run 9 utc time ____ ) | |
| print(next_start) | |
| keep_alive() | |
| # active_path = "/TSA Team Folder/ADR Test/Test/" | |
| while 1: | |
| dtnNow = datetime.datetime.now(datetime.timezone.utc) | |
| print(dtnNow) | |
| if dtnNow >= next_start: | |
| next_start += datetime.timedelta(hours=12) # 1 day | |
| print(next_start) | |
| checkdropbox() | |
| time.sleep(1800) | |
| if __name__ == "__main__": | |
| runn() | |