| | from __future__ import print_function |
| | from flask import Flask, render_template,request,flash , json, url_for,g , redirect , jsonify , send_file ,make_response |
| | import json |
| | import fitz |
| | from PIL import Image |
| | import cv2 |
| | import numpy as np |
| | import pilecaps_adr |
| | import base64 |
| | from db import dropbox_connect |
| | import cv2 |
| | import pandas as pd |
| | import time |
| | from io import BytesIO, StringIO |
| | import urllib |
| | import tempfile |
| | from flask import Flask, Response |
| | from werkzeug.wsgi import wrap_file |
| | import tameem3_2 |
| | import pypdfium2 as pdfium |
| | from urllib.parse import urlparse |
| | import re |
| | import pixelconversion |
| | import tameem2_1 |
| | import io |
| | from urllib.parse import unquote |
| | import API |
| | import MC_Templates_API |
| | import tsadropboxretrieval |
| | import doc_search |
| | import google_sheet_Legend |
| | import dxf__omar3_2 |
| | import requests |
| | import google_sheet_to_xml |
| | from threading import Thread |
| | import mainDBAlaa |
| | import datetime |
| | import doors_fasterrcnn |
| | import deploying_3_3 |
| | import Doors_Schedule |
| | import Code_2_7 |
| | import Counting_Columns_2_1 |
| | |
| | import ezdxf |
| | import Azure_api |
| | from flask import Flask, render_template, session, redirect, url_for |
| | from flask_session import Session |
| | app = Flask(__name__) |
| |
|
| | prjnamesURL = 'https://docs.google.com/spreadsheets/d/1nsIgi9o9VSBKQlNxbxihPzG_N7s4um0eNVfgL4gaGPc/export?format=csv&gid=0' |
| | prjpartsURL = 'https://docs.google.com/spreadsheets/d/1nsIgi9o9VSBKQlNxbxihPzG_N7s4um0eNVfgL4gaGPc/export?format=csv&gid=34865056' |
| | prjsectionsURL = 'https://docs.google.com/spreadsheets/d/1nsIgi9o9VSBKQlNxbxihPzG_N7s4um0eNVfgL4gaGPc/export?format=csv&gid=1751466819' |
| |
|
| | |
| | global colorsused |
| | global pdflink |
| | |
| | global hatched_areas2_7 |
| | |
| | cached_tables = { |
| | "projects": None, |
| | "parts": None, |
| | "sections": None |
| | } |
| |
|
| | |
| | app.config["SESSION_TYPE"] = "filesystem" |
| | app.config["SESSION_PERMANENT"] = False |
| | app.config["SESSION_FILE_DIR"] = "./flask_session_files" |
| | app.secret_key = "your_secret_key" |
| |
|
| | |
| | sess = Session() |
| | sess.init_app(app) |
| |
|
| | @app.route("/", methods=["GET", "POST"]) |
| | def getInfotoMeasure(): |
| | return render_template("gui2.html") |
| |
|
| | @app.route("/password", methods=["GET", "POST"]) |
| | def password_page(): |
| | return render_template("gui2.html") |
| |
|
| | @app.route("/check_password", methods=["POST"]) |
| | def check_password(): |
| | password = request.form.get("password") |
| | correct_password = "c900" |
| |
|
| | if password == correct_password: |
| | session["authenticated"] = True |
| | return jsonify({"authenticated": True}), 200 |
| | else: |
| | return jsonify({"authenticated": False}), 200 |
| |
|
| | @app.route("/mainGUI", methods=["GET", "POST"]) |
| | def main_gui(): |
| | if "authenticated" not in session or not session["authenticated"]: |
| | return redirect(url_for("password_page")) |
| | return render_template("proposed-GUI.html") |
| |
|
| |
|
| |
|
| | @app.route("/WordSearch",methods=["GET", "POST"]) |
| | def getInfo2toMeasure(): |
| | |
| | return render_template("wordSearch.html") |
| |
|
| |
|
| |
|
| | @app.route('/getprojectnames/', methods=['GET']) |
| | def getprjnamesfromTestAPI(): |
| | progress_updates = [] |
| |
|
| | def generate_progressProjnames(): |
| | yield f"data: 10\n\n" |
| | prjnames, prjids = API.getPrjNames( |
| | progress_callback=lambda p: progress_updates.append(p) |
| | ) |
| |
|
| | |
| | global cached_tables |
| | cached_tables["projects"] = pd.DataFrame({ |
| | "ProjectName": prjnames, |
| | "ProjectId": prjids |
| | }) |
| |
|
| | while progress_updates: |
| | progress = progress_updates.pop(0) |
| | yield f"data: {progress}\n\n" |
| | yield f"data: 80\n\n" |
| | yield f"data: 100\n\n" |
| | result = json.dumps([prjnames, prjids]) |
| | yield f"data: {result}\n\n" |
| |
|
| | return Response(generate_progressProjnames(), content_type='text/event-stream') |
| |
|
| | @app.route('/getprojectparts/<jsdata>', methods=['GET']) |
| | def getprjpartsfromTestAPI(jsdata): |
| | prjparts, partsIds = API.getprjParts(jsdata) |
| |
|
| | global cached_tables |
| | df = pd.DataFrame({"ProjectPart": prjparts, "ProjectPartId": partsIds, "ProjectId": [int(jsdata)] * len(prjparts)}) |
| | cached_tables["parts"] = df |
| |
|
| | return jsonify([prjparts, partsIds]) |
| |
|
| | @app.route('/getprojectsections/<jsdata>', methods=['GET']) |
| | def getprjSectionsfromTestAPI(jsdata): |
| | data = json.loads(jsdata) |
| |
|
| | if data[0] == 'testing': |
| | sections = API.getprjSections() |
| | project_id = data[0] |
| | project_part_id = None |
| | else: |
| | |
| | project_id = data[0] |
| | project_part_id = data[1] if len(data) > 1 else None |
| | sections = API.getprjSections(project_id, project_part_id) |
| |
|
| | global cached_tables |
| | df = pd.DataFrame({ |
| | "ProjectSection": sections, |
| | "ProjectId": [project_id] * len(sections), |
| | "ProjectPartId": [project_part_id] * len(sections) |
| | }) |
| | cached_tables["sections"] = df |
| |
|
| | return jsonify(sections) |
| |
|
| |
|
| | @app.route('/getmethod/<jsdata>', methods=['GET']) |
| | def get_javascript_data(jsdata): |
| | progress_updates = [] |
| |
|
| | def generate_progress(): |
| | yield f"data: 5\n\n" |
| | yield f"data: 10\n\n" |
| | |
| | documentsToMeasure, RelevantDocuments, extracted_path = tsadropboxretrieval.retrieveProjects( |
| | jsdata, |
| | progress_callback=lambda p: progress_updates.append(p) |
| | ) |
| |
|
| | |
| | while progress_updates: |
| | progress = progress_updates.pop(0) |
| | yield f"data: {progress}\n\n" |
| | |
| | yield f"data: 100\n\n" |
| | result = json.dumps([documentsToMeasure, RelevantDocuments, extracted_path]) |
| | yield f"data: {result}\n\n" |
| |
|
| | return Response(generate_progress(), content_type='text/event-stream') |
| |
|
| | |
| | @app.route('/WordSearch',methods=["GET", "POST"]) |
| | def searchDocument(): |
| | return render_template('wordSearch.html') |
| |
|
| | @app.route('/tableDetection',methods=["GET", "POST"]) |
| | def TableDetection(): |
| | return render_template('tableDetection.html') |
| |
|
| |
|
| | @app.route('/TestingMeasurement',methods=["GET", "POST"]) |
| | def TestingMeasurement(): |
| | return render_template('TestingMeasurement.html') |
| |
|
| |
|
| | @app.route('/FindInitialMarkups',methods=["GET", "POST"]) |
| | def FindInitialMarkups(): |
| | return render_template('FindInitialMarkups.html') |
| |
|
| |
|
| | @app.route('/legends',methods=["GET", "POST"]) |
| | def legendDirectory(): |
| | return render_template('legendDirectory.html') |
| |
|
| | @app.route('/searchInDocs',methods=["GET", "POST"]) |
| | def getSearchinDocs(): |
| | arr=[] |
| | values = request.get_json() |
| | keyword=values.get('keyword') |
| | listpfProjs=values.get('listofprojs') |
| | print(keyword,listpfProjs) |
| | df,img_list=doc_search.search_docs(keyword,listpfProjs) |
| | for img in img_list: |
| | _, buffer = cv2.imencode('.png', img) |
| | arr.append(base64.b64encode(buffer).decode('utf-8')) |
| | return jsonify([df.to_html(index=False, escape=False),arr]) |
| |
|
| | @app.route('/searchInFolder',methods=["GET", "POST"]) |
| | def getSearchinFolder(): |
| | arr=[] |
| | values = request.get_json() |
| | keyword=values.get('keyword') |
| | projname=values.get('ProjectName') |
| | df,img_list=doc_search.slow_search(keyword=keyword,project=projname) |
| | for img in img_list: |
| | _, buffer = cv2.imencode('.png', img) |
| | arr.append(base64.b64encode(buffer).decode('utf-8')) |
| | return jsonify([df.to_html(index=False, escape=False),arr]) |
| |
|
| |
|
| |
|
| | @app.route("/measurementConsole",methods=["GET", "POST"]) |
| | def measurementConsoleFn(): |
| | return render_template("proposed-GUI.html") |
| |
|
| | @app.route('/RetrieveMCTNames/',methods=['GET']) |
| | def CallAPIforMCTNames(): |
| | |
| | DictionaryOfTemplates=MC_Templates_API.RetrieveMC_Templates_API() |
| | |
| | print('here') |
| | return jsonify(DictionaryOfTemplates) |
| |
|
| | |
| | |
| | |
| | def hexRGB(color): |
| | color=color.lstrip('#') |
| | color= tuple(int(color[i:i+2], 16) for i in (0, 2, 4)) |
| | color=list(color) |
| | return color |
| | |
| | |
| | @app.route('/updatepreviewimg/<jsdata>',methods=["GET", "POST"]) |
| | def getfromdropboxImg(jsdata): |
| | |
| | pdfpath='' |
| | |
| | jsdata=eval(jsdata) |
| | print('pdfnameeee==',jsdata) |
| |
|
| | dbPath='/TSA JOBS/ADR Test/'+jsdata[0][0]+'/'+jsdata[0][1]+'/'+jsdata[0][2]+'/Measured Plan/'+jsdata[1] |
| | print(dbPath) |
| | dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') |
| | md, res =dbxTeam.files_download(path=dbPath) |
| | data = res.content |
| | doc = fitz.open("pdf",data) |
| | page=doc[0] |
| | pix = page.get_pixmap() |
| | pl=Image.frombytes('RGB', [pix.width,pix.height],pix.samples) |
| | img=np.array(pl) |
| | img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) |
| | _, buffer = cv2.imencode('.png', img) |
| | return base64.b64encode(buffer).decode('utf-8') |
| |
|
| |
|
| | @app.route('/savedetectedtables/',methods=["POST"]) |
| | def table(): |
| | |
| | tt = eval(request.form.get('answers')) |
| | print("Value of tt = ",tt) |
| | print("Value of tt[0] = ",tt[0]) |
| | pdflist=[] |
| | pdfnames=[] |
| | if tt[0].startswith('http'): |
| | pdf_path=tt[0] |
| | if pdf_path and ('http' in pdf_path or 'dropbox' in pdf_path): |
| | pdf_path = pdf_path.replace('dl=0', 'dl=1') |
| |
|
| | response = requests.get(pdf_path) |
| | pdf_content = BytesIO(response.content) |
| | if not pdf_content: |
| | raise ValueError("No valid PDF content found.") |
| | |
| | excel_io = Azure_api.detect_tables([response.content],['pdfname.pdf']) |
| | else: |
| | for i in range(len(tt[0])): |
| | print("Value of tt[0][i] = ",tt[0][i]) |
| | pdfpath, _ = tsadropboxretrieval.getPathtoPDF_File(nameofPDF=tt[0][i]) |
| | dbx = tsadropboxretrieval.ADR_Access_DropboxTeam('user') |
| | _, res = dbx.files_download(path=pdfpath) |
| | pdf_bytes = res.content |
| | pdflist.append(pdf_bytes) |
| | pdfnames.append(pdfpath) |
| | |
| | |
| | |
| | print("pdflist = ",pdflist) |
| | excel_io = Azure_api.detect_tables(pdflist,pdfnames) |
| | |
| | if excel_io is None: |
| | |
| | return ('', 204) |
| | |
| | return send_file( |
| | excel_io, |
| | as_attachment=True, |
| | download_name='detected_tables.xlsx', |
| | mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' |
| | ) |
| |
|
| | @app.route('/savebase64Img/',methods=["POST"]) |
| | def stringToRGB(): |
| | vv = eval(request.form.get('answers')) |
| | print(vv) |
| | if type(vv[0]) == list: |
| | vv[0] = vv[0][0] |
| | if vv[0].startswith('http'): |
| | if ( vv[5].startswith('3.2') or vv[5].startswith('3.3') or vv[5].startswith('2.7')) : |
| | print('3.2 section') |
| | |
| | |
| | link = urllib.parse.unquote(vv[0].strip('"')) |
| | if link and ('http' in link or 'dropbox' in link): |
| | if 'dl=0' in link: |
| | link = link.replace('dl=0', 'dl=1') |
| | elif 'www.dropbox.com' in link and '?dl=1' not in link: |
| | link += '?dl=1' |
| |
|
| | response = requests.get(link) |
| |
|
| | pdf_content = BytesIO(response.content) |
| | if not pdf_content: |
| | raise ValueError("No valid PDF content found.") |
| | if 'file' not in request.files: |
| | print('error, No file part in the request') |
| | else: |
| | file = request.files['file'] |
| | print('file done, measuring') |
| | arr=measureproject(result=vv,dxffile=file,pdf_content=pdf_content) |
| | |
| | return jsonify(arr) |
| |
|
| | if vv[5].startswith('2.8') or vv[5].startswith('2.6') or vv[5].startswith('2.1'): |
| | print(f"el mawgood fe vv[0]: {vv[0]}") |
| | |
| | |
| | |
| | |
| | link = urllib.parse.unquote(vv[0].strip('"')) |
| | if link and ('http' in link or 'dropbox' in link): |
| | if 'dl=0' in link: |
| | link = link.replace('dl=0', 'dl=1') |
| | elif 'www.dropbox.com' in link and '?dl=1' not in link: |
| | link += '?dl=1' |
| |
|
| | response = requests.get(link) |
| |
|
| | pdf_content = BytesIO(response.content) |
| | doc = fitz.open(stream=pdf_content, filetype="pdf") |
| | |
| | page=doc[0] |
| | if page.rotation!=0: |
| | page.set_rotation(0) |
| | pix = page.get_pixmap(dpi=300) |
| | pl=Image.frombytes('RGB', [pix.width,pix.height],pix.samples) |
| |
|
| | arr=measureproject(result=vv,img=pl,pdf_content=pdf_content) |
| | |
| | else: |
| | if ( vv[5][2].startswith('3.2') or vv[5][2].startswith('3.3') or vv[5][2].startswith('2.7')) : |
| | print('3.2 section') |
| | pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=vv[0]) |
| | dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') |
| | md, res =dbxTeam.files_download(path=pdfpath) |
| | dataDoc = res.content |
| | if 'file' not in request.files: |
| | print('error, No file part in the request') |
| | else: |
| | file = request.files['file'] |
| | print('file done, measuring') |
| | arr=measureproject(vv,dataDoc,0,file) |
| | return jsonify(arr) |
| | |
| | |
| | |
| | if vv[5][2].startswith('1.0'): |
| | opencv_img,dataDoc = plan2img( str(vv[0]) ) |
| | if vv[1]==220: |
| | imgdata = base64.b64decode(vv[6]) |
| | img=Image.open(io.BytesIO(imgdata)) |
| | opencv_img= cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) |
| | arr=measureproject(vv,dataDoc,opencv_img) |
| | |
| |
|
| | if vv[5][2].startswith('2.8') or vv[5][2].startswith('2.6') or vv[5][2].startswith('2.1'): |
| | vv = eval(request.form.get('answers')) |
| | print(f"el mawgood fe vv[0]: {vv[0]}") |
| |
|
| | arr_s = [] |
| | dataDocs = [] |
| | pls = [] |
| | for v in vv[0]: |
| | pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF= str(v)) |
| | dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') |
| | md, res =dbxTeam.files_download(path=pdfpath) |
| | dataDoc = res.content |
| | dataDocs.append(dataDoc) |
| | doc = fitz.open("pdf",dataDoc) |
| | page=doc[0] |
| | if page.rotation!=0: |
| | page.set_rotation(0) |
| | pix = page.get_pixmap(dpi=300) |
| | pl=Image.frombytes('RGB', [pix.width,pix.height],pix.samples) |
| | pls.append(pl) |
| | arr=measureproject(vv,dataDocs,pls) |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | return jsonify(arr) |
| |
|
| | @app.route('/2.1Trial/',methods=["POST"]) |
| | def measure2_1(): |
| | name = request.get_json() |
| | result=name.get('allvalues') |
| | arr=measureproject(result) |
| | return arr |
| | |
| | |
| | def measureproject(result,dataDoc=0,img=0,dxffile=0,pdf_content=0): |
| | colorarr=[] |
| | global pdflink |
| | pdfpath='/' |
| | |
| | if pdf_content: |
| | section=result[5] |
| | pdfpath+='testinglink/' |
| | else: |
| | section=result[5][2] |
| | for word in result[5]: |
| | pdfpath+=word +'/' |
| | |
| | |
| | |
| | |
| |
|
| | arr=[] |
| |
|
| | if section.startswith('1.0'): |
| | for item in result[2]: |
| | |
| | c=hexRGB(item) |
| | colorarr.append(c) |
| | print('RATIOS=',result[3], result[4]) |
| | |
| | |
| | imgPerimeter1,image_new1,SimilarAreaDictionary , colorsUsed,spreadsheet_url, spreadsheetId,list1, pdflink, areas_Perimeters, namepathArr =pilecaps_adr.drawAllContours(dataDoc,img,result[1],colorarr, result[3], result[4], result[0],pdfpath) |
| | _, buffer = cv2.imencode('.png', image_new1) |
| | arr=[base64.b64encode(buffer).decode('utf-8'),SimilarAreaDictionary.to_dict(),spreadsheet_url , spreadsheetId,colorsUsed,list1.to_dict(), pdflink, areas_Perimeters, namepathArr] |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | elif section.startswith('3.3') or section.startswith('3.2'): |
| | print('code of 3.3 and 3.2') |
| | dxfpath=dxffile.read() |
| | with tempfile.NamedTemporaryFile(suffix='.dxf', delete=False) as temp_file: |
| | temp_file.write(dxfpath) |
| | temp_filename = temp_file.name |
| | print(temp_filename) |
| | |
| | print("result = ",result) |
| | SearchArray=result[6] |
| | |
| | if pdf_content: |
| | if section.startswith('3.3'): |
| | doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas , bax_pretty_xml, column_xml=deploying_3_3.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4] ,SearchArray, pdfpath,result[0],pdf_content) |
| | else: |
| | doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas, bax_pretty_xml, column_xml=dxf__omar3_2.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4] ,SearchArray, pdfpath,result[0],pdf_content) |
| | else: |
| | if section.startswith('3.3'): |
| | doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas , bax_pretty_xml, column_xml=deploying_3_3.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4] ,SearchArray,pdfpath,result[0]) |
| | else: |
| | doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas, bax_pretty_xml, column_xml=dxf__omar3_2.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4] ,SearchArray,pdfpath,result[0]) |
| | global colorsused |
| | colorsused=list(SimilarAreaDictionary['Color']) |
| | dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' |
| | print(dbPath,result[0]) |
| | parsed_url = urlparse(result[0]) |
| | filename = parsed_url.path.split('/')[-1] |
| | print(filename) |
| | nameofpdf=filename |
| | pdflink= tsadropboxretrieval.uploadanyFile(doc=doc,path=dbPath,pdfname=nameofpdf) |
| |
|
| | _, buffer = cv2.imencode('.png', outputimg) |
| | |
| | |
| | |
| | bax_b64 = base64.b64encode(bax_pretty_xml.encode("utf-8")).decode("ascii") |
| | xml_b64 = base64.b64encode(column_xml.encode("utf-8")).decode("ascii") |
| | |
| | bax_link= tsadropboxretrieval.upload_string_file(content_str=bax_pretty_xml,filename="baxfile.bax",path=dbPath) |
| | xml_link= tsadropboxretrieval.upload_string_file(content_str=column_xml,filename="customCols.xml",path=dbPath) |
| | |
| | arr=[ base64.b64encode(buffer).decode('utf-8'),SimilarAreaDictionary.to_dict(), spreadsheet_url,spreadsheetId,[],list1.to_dict(),pdflink,hatched_areas,namepathArr ,bax_b64,xml_b64, bax_link,xml_link] |
| | |
| | elif section.startswith('2.7') : |
| | print('code of 2.7') |
| | dxfpath=dxffile.read() |
| | with tempfile.NamedTemporaryFile(suffix='.dxf', delete=False) as temp_file: |
| | temp_file.write(dxfpath) |
| | temp_filename = temp_file.name |
| | print(temp_filename) |
| |
|
| | SearchArray=result[6] |
| | Thickness=result[7] |
| | print(result) |
| | print("SearchArray = ",SearchArray) |
| | global hatched_areas2_7 |
| | if pdf_content: |
| | doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas,bax_pretty_xml,column_xml=Code_2_7.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4],SearchArray,Thickness, pdfpath,result[0],pdf_content) |
| | else: |
| | doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas,bax_pretty_xml,column_xml=Code_2_7.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4],SearchArray,Thickness, pdfpath,result[0]) |
| | |
| | hatched_areas2_7=hatched_areas |
| | colorsused=list(SimilarAreaDictionary['Color']) |
| | dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' |
| | print(dbPath,result[0]) |
| | parsed_url = urlparse(result[0]) |
| | filename = parsed_url.path.split('/')[-1] |
| | print(filename) |
| | nameofpdf=filename |
| | pdflink= tsadropboxretrieval.uploadanyFile(doc=doc,path=dbPath,pdfname=nameofpdf) |
| |
|
| | _, buffer = cv2.imencode('.png', outputimg) |
| | bax_b64 = base64.b64encode(bax_pretty_xml.encode("utf-8")).decode("ascii") |
| | xml_b64 = base64.b64encode(column_xml.encode("utf-8")).decode("ascii") |
| | |
| | bax_link= tsadropboxretrieval.upload_string_file(content_str=bax_pretty_xml,filename="baxfile.bax",path=dbPath) |
| | xml_link= tsadropboxretrieval.upload_string_file(content_str=column_xml,filename="customCols.xml",path=dbPath) |
| | |
| | arr=[ base64.b64encode(buffer).decode('utf-8'),SimilarAreaDictionary.to_dict(), spreadsheet_url,spreadsheetId,[],list1.to_dict(),pdflink,[],namepathArr,bax_b64,xml_b64, bax_link,xml_link] |
| | |
| | |
| | elif section.startswith('2.8') or section.startswith('2.6'): |
| | |
| | imgss=[] |
| | dpxlinks=[] |
| | legendLinks=[] |
| | listofmarkups=[] |
| | SearchArray=result[7] |
| | print('searchhh:',SearchArray) |
| |
|
| | |
| | if pdf_content: |
| | link = urllib.parse.unquote(result[6].strip('"')) |
| | if link and ('http' in link or 'dropbox' in link): |
| | if 'dl=0' in link: |
| | link = link.replace('dl=0', 'dl=1') |
| | elif 'www.dropbox.com' in link and '?dl=1' not in link: |
| | link += '?dl=1' |
| |
|
| | response = requests.get(link) |
| |
|
| | pdf_contentSched = BytesIO(response.content) |
| | |
| | annotatedimgs, pdf_document , list1, repeated_labels , not_found, bax_pretty_xml, column_xml =Doors_Schedule.mainRun(pdf_contentSched, dataDoc, SearchArray,pdf_content,pdf_contentSched) |
| | else: |
| | pdfpathDoors,_=tsadropboxretrieval.getPathtoPDF_File(nameofPDF= str(result[6])) |
| | dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') |
| | md, resDoors =dbxTeam.files_download(path=pdfpathDoors) |
| | dataDocDoorsSchedule = resDoors.content |
| | annotatedimgs, pdf_document , list1, repeated_labels , not_found, bax_pretty_xml, column_xml =Doors_Schedule.mainRun(dataDocDoorsSchedule, dataDoc, SearchArray) |
| | |
| | dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' |
| | pdflink= tsadropboxretrieval.uploadanyFile(doc=pdf_document,path=dbPath,pdfname="combined_output.pdf") |
| | |
| | repeatedLabelsReturn='' |
| | NotFoundReturn='' |
| | if len(repeated_labels)>0: |
| | repeatedLabelsReturn=repeated_labels |
| | if len(not_found)>0: |
| | NotFoundReturn=not_found |
| | annotatedimgsBuffered=[] |
| | for b in annotatedimgs: |
| | _, buffer = cv2.imencode('.png', b) |
| | |
| | b64_str = base64.b64encode(buffer).decode('utf-8') |
| | annotatedimgsBuffered.append(b64_str) |
| | bax_b64 = base64.b64encode(bax_pretty_xml.encode("utf-8")).decode("ascii") |
| | xml_b64 = base64.b64encode(column_xml.encode("utf-8")).decode("ascii") |
| | dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' |
| | bax_link= tsadropboxretrieval.upload_string_file(content_str=bax_pretty_xml,filename="baxfile.bax",path=dbPath) |
| | xml_link= tsadropboxretrieval.upload_string_file(content_str=column_xml,filename="customCols.xml",path=dbPath) |
| | |
| | arr = [ |
| | annotatedimgsBuffered, |
| | pdflink, |
| | list1.to_dict(), |
| | str(repeatedLabelsReturn), |
| | str(NotFoundReturn), |
| | bax_b64, |
| | xml_b64, |
| | bax_link, |
| | xml_link |
| | ] |
| | |
| | elif section.startswith('2.1'): |
| | |
| | imgss=[] |
| | dpxlinks=[] |
| | legendLinks=[] |
| | listofmarkups=[] |
| |
|
| | annotatedimg,pdf_document,spreadsheet_url, list1 , df_doors=Counting_Columns_2_1.mainfun(dataDoc,pdfpath,result[0]) |
| | |
| | |
| | |
| | |
| | arr=[base64.b64encode(buffer).decode('utf-8') ,pdflink,spreadsheet_url,list1.to_dict(), df_doors.to_dict()] |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | return arr |
| | |
| | |
| |
|
| | @app.route("/canvaspdftoimgBackground/<jsdata>",methods=["GET", "POST"]) |
| | def pdftoimgCanvas(jsdata): |
| | img=plan2img(jsdata)[0] |
| | _, buffer = cv2.imencode('.png', img) |
| | arr=[base64.b64encode(buffer).decode('utf-8') , img.shape[0],img.shape[1]] |
| | return jsonify(arr) |
| |
|
| |
|
| | |
| | def plan2img(nameofpdf): |
| |
|
| | pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=nameofpdf) |
| | dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') |
| | md, res =dbxTeam.files_download(path=pdfpath) |
| | data = res.content |
| | doc = fitz.open("pdf",data) |
| | page=doc[0] |
| | if page.rotation!=0: |
| | page.set_rotation(0) |
| | pix = page.get_pixmap(dpi=300) |
| | pl=Image.frombytes('RGB', [pix.width,pix.height],pix.samples) |
| | img=np.array(pl) |
| | img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) |
| | return img ,data |
| | |
| | |
| |
|
| | |
| | def convert2img(path): |
| | pdf = pdfium.PdfDocument(path) |
| | page = pdf.get_page(0) |
| | pil_image = page.render().to_pil() |
| | pl1=np.array(pil_image) |
| | img = cv2.cvtColor(pl1, cv2.COLOR_RGB2BGR) |
| | return img |
| | |
| | |
| |
|
| | |
| | @app.route("/mctnametoGoogleSheet/<jsdata>",methods=["GET", "POST"]) |
| | def sendmctnametoLegend(jsdata): |
| | result = json.loads(jsdata) |
| | print(result) |
| | global pdflink |
| | summaryid=google_sheet_Legend.mapnametoLegend(result,colorsused,pdflink) |
| | allreturns=[summaryid] |
| | return jsonify(allreturns) |
| |
|
| | @app.route("/getguessednamepath/",methods=["GET", "POST"]) |
| | def getguessedNames(): |
| | guessednamesDrpdwn=google_sheet_Legend.getallguessednames() |
| | return jsonify(guessednamesDrpdwn) |
| | |
| | |
| | |
| | def calcRef(img): |
| | blk = np.ones(img.shape, dtype="uint8") * [[[np.uint8(0), np.uint8(0), np.uint8(0)]]] |
| | |
| | start_point = (50, 100) |
| | end_point = (120, 200) |
| | color = (255, 255, 255) |
| | thickness = -1 |
| |
|
| | blk = cv2.rectangle(blk, start_point, end_point, color, thickness) |
| | |
| | blk = cv2.cvtColor(blk, cv2.COLOR_BGR2GRAY) |
| |
|
| | contourzz, hierarchy = cv2.findContours(image=blk, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE) |
| | for i, cnt3 in enumerate(contourzz): |
| | M = cv2.moments(cnt3) |
| | if M['m00'] != 0.0: |
| | x2 = int(M['m10']/M['m00']) |
| | y2 = int(M['m01']/M['m00']) |
| | area = cv2.contourArea(cnt3) |
| | perimeter = cv2.arcLength(cnt3, True) |
| | return area,perimeter , blk |
| |
|
| | |
| | def modifyingcalcRefDynamic(img): |
| | imgcopy = img.copy() |
| |
|
| | blk = np.ones(img.shape, dtype="uint8") * [[[np.uint8(0), np.uint8(0), np.uint8(0)]]] |
| | |
| | x = 50 |
| | y = 100 |
| | xD = int(img.shape[1] * 0.10) |
| | yD = int(img.shape[0] * 0.10) |
| | start_point = (x, y) |
| | end_point = (x+xD, y+yD) |
| | blue = (255, 0, 0) |
| | white = (255, 255, 255) |
| | thickness = -1 |
| |
|
| | imgcopy = cv2.rectangle(imgcopy, start_point, end_point, blue, thickness) |
| | blk = cv2.rectangle(blk, start_point, end_point, white, thickness) |
| | |
| | blk = cv2.cvtColor(blk, cv2.COLOR_BGR2GRAY) |
| |
|
| | contourzz, hierarchy = cv2.findContours(image=blk, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE) |
| | for i, cnt3 in enumerate(contourzz): |
| | M = cv2.moments(cnt3) |
| | if M['m00'] != 0.0: |
| | x2 = int(M['m10']/M['m00']) |
| | y2 = int(M['m01']/M['m00']) |
| | area = cv2.contourArea(cnt3) |
| | perimeter = cv2.arcLength(cnt3, True) |
| | return area, perimeter, blk , imgcopy |
| | |
| | |
| |
|
| | |
| | @app.route('/downloadPDFfromLink') |
| | def downloadPDFfromLinkFunc(): |
| | encoded_url = request.args.get('url') |
| | link = urllib.parse.unquote(encoded_url) |
| |
|
| | if link and ('http' in link or 'dropbox' in link): |
| | if 'dl=0' in link: |
| | link = link.replace('dl=0', 'dl=1') |
| | elif 'www.dropbox.com' in link and '?dl=1' not in link: |
| | link += '?dl=1' |
| |
|
| | try: |
| | res = requests.get(link) |
| | res.raise_for_status() |
| | except Exception as e: |
| | return f"Error downloading PDF from link: {e}", 400 |
| |
|
| | pdf_data = res.content |
| | filename = link.split("/")[-1].split("?")[0] or "downloaded.pdf" |
| |
|
| | response = make_response(io.BytesIO(pdf_data).getvalue()) |
| | response.headers.set('Content-Type', 'application/pdf') |
| | response.headers.set('Content-Disposition', f'attachment; filename="{filename}"') |
| | return response |
| |
|
| | @app.route('/get-pdf/<jsdata>') |
| | def get_pdf(jsdata): |
| | print('pdfname',jsdata) |
| | |
| | pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=jsdata) |
| | print('pdfpath',pdfpath) |
| | dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') |
| | md, res =dbxTeam.files_download(path=pdfpath) |
| | pdf_data = res.content |
| | response = make_response(io.BytesIO(pdf_data).getvalue()) |
| | response.headers.set('Content-Type', 'application/pdf') |
| | response.headers.set('Content-Disposition', 'attachment', filename='filename.pdf') |
| | |
| | return response |
| |
|
| |
|
| | @app.route('/pixeltestingimg') |
| | def pixeltesting(): |
| | encoded_url = request.args.get('url') |
| | link = urllib.parse.unquote(encoded_url.strip('"')) |
| | if link and ('http' in link or 'dropbox' in link): |
| | if 'dl=0' in link: |
| | link = link.replace('dl=0', 'dl=1') |
| | elif 'www.dropbox.com' in link and '?dl=1' not in link: |
| | link += '?dl=1' |
| |
|
| | pdf_path=link |
| | response = requests.get(pdf_path) |
| | pdf_content = BytesIO(response.content) |
| | if not pdf_content: |
| | raise ValueError("No valid PDF content found.") |
| | progress_updates = [] |
| | def generate_progressPixel(): |
| | |
| | yield f"data: 10\n\n" |
| |
|
| | doc,areaPixel,perimeterPixel=pixelconversion.drawisrotated(pdf_content=pdf_content) |
| | |
| | yield f"data: 20\n\n" |
| | dbPath='/TSA JOBS/ADR Test/'+'TestingLinks'+'/'+'Scale Document' +'/' |
| | dburl=tsadropboxretrieval.uploadanyFile(doc=doc,pdfname=str('testinglink') ,path=dbPath) |
| | |
| | yield f"data: 40\n\n" |
| | outputs=[areaPixel,perimeterPixel , dburl] |
| | while progress_updates: |
| | progress = progress_updates.pop(0) |
| | yield f"data: {progress}\n\n" |
| | |
| | yield f"data: 80\n\n" |
| | yield f"data: 100\n\n" |
| | result = json.dumps(outputs) |
| | yield f"data: {result}\n\n" |
| |
|
| | return Response(generate_progressPixel(), content_type='text/event-stream') |
| | |
| | @app.route("/pixelimg/<jsdata>",methods=["GET", "POST"]) |
| | def getimg(jsdata): |
| | progress_updates = [] |
| | jsdata=eval(jsdata) |
| | print('piexxxeell',jsdata) |
| | def generate_progressPixel(): |
| | |
| | yield f"data: 10\n\n" |
| |
|
| | pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=jsdata[3], progress_callback=lambda p: progress_updates.append(p)) |
| |
|
| | dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') |
| | md, res =dbxTeam.files_download(path=pdfpath) |
| | data = res.content |
| | |
| |
|
| | if str(jsdata[2]).startswith('1.0'): |
| | doc,areaPixel,perimeterPixel=pixelconversion.drawisrotated(data=data,dpi=300) |
| | else: |
| | doc,areaPixel,perimeterPixel=pixelconversion.drawisrotated(data=data) |
| | |
| | yield f"data: 20\n\n" |
| | dbPath='/TSA JOBS/ADR Test/'+jsdata[0]+'/'+jsdata[1]+'/'+jsdata[2]+'/'+'Scale Document' +'/' |
| | dburl=tsadropboxretrieval.uploadanyFile(doc=doc,pdfname=str(jsdata[3]) ,path=dbPath) |
| | |
| | yield f"data: 40\n\n" |
| | outputs=[areaPixel,perimeterPixel , dburl] |
| | while progress_updates: |
| | progress = progress_updates.pop(0) |
| | yield f"data: {progress}\n\n" |
| | |
| | yield f"data: 80\n\n" |
| | yield f"data: 100\n\n" |
| | result = json.dumps(outputs) |
| | yield f"data: {result}\n\n" |
| |
|
| | return Response(generate_progressPixel(), content_type='text/event-stream') |
| | |
| | |
| | |
| | @app.route('/getsunburst',methods=["GET", "POST"]) |
| | def getSunburst(): |
| |
|
| | |
| | tree=doc_search.prepare_sunburst() |
| | return jsonify(tree.to_dict()) |
| | |
| | |
| | |
| | @app.route('/_submission',methods=["GET", "POST"]) |
| | def getnewlegend(): |
| | pdfpth='' |
| | alljson = request.get_json() |
| | list1=alljson.get('dict1') |
| | print('list1',list1) |
| | |
| | path=alljson.get('path') |
| | |
| | spreadsheetId=alljson.get('spreadsheetId') |
| | |
| | pdfpathpath=alljson.get('pdfpathpath') |
| | |
| | print(pdfpathpath,type(pdfpathpath)) |
| | pdfname=request.args.get('pdfname') |
| | for word in eval(pdfpathpath): |
| | pdfpth+='/' +word |
| | pdfpth+='/' |
| | dbPath='/TSA JOBS/ADR Test'+pdfpth+'Measured Plan/' |
| | print(pdfpth) |
| | deletedrows1=google_sheet_Legend.deletemarkups(list1=list1,dbPath=dbPath,path=path) |
| | arr1=[deletedrows1.to_dict()] |
| | print('arr,',arr1) |
| | return jsonify(arr1) |
| | |
| | |
| | @app.route('/deletemarkupsroute',methods=["GET", "POST"]) |
| | def dltmarkupslegend(): |
| | print('IN deletemarkupsroute') |
| | pdfpth='' |
| | alljson = request.get_json() |
| | SimilarAreaDictionary=alljson.get('dict') |
| | |
| | deletedrows=alljson.get('deletedrows') |
| | print('deletedrowsssssssssssssssssssssssssssssss',deletedrows) |
| | |
| | path=alljson.get('path') |
| | |
| | spreadsheetId=alljson.get('spreadsheetId') |
| | |
| | areaPermArr=alljson.get('areaPermArr') |
| | print('aaaaaaaaaaaaa',areaPermArr) |
| | |
| | section=alljson.get('section') |
| | |
| | pdfpathpath=alljson.get('pdfpathpath') |
| | |
| | for word in eval(pdfpathpath): |
| | pdfpth+='/' +word |
| | pdfpth+='/' |
| | |
| | deletedrows=pd.DataFrame(deletedrows) |
| | print('deletedrows',deletedrows) |
| | if section.startswith('2.7'): |
| | areaPermArr=hatched_areas2_7 |
| | if section.startswith('1.0') or section.startswith('3.2') or section.startswith('3.3'): |
| | newlgnd=google_sheet_Legend.deletefromlegend(deletedrows=deletedrows,SimilarAreaDictionarycopy=SimilarAreaDictionary, section=section,areaPermArr=areaPermArr) |
| | elif section.startswith('2.8') or section.startswith('2.6'): |
| | newlgnd=google_sheet_Legend.deletedoors(deletedrows,SimilarAreaDictionary) |
| | print('done wit 2.8 in deleting, didnt append yet ') |
| | else: |
| | newlgnd=google_sheet_Legend.deletefromlegend(deletedrows=deletedrows,SimilarAreaDictionarycopy=SimilarAreaDictionary, section=section) |
| | try: |
| | newlgnd=google_sheet_Legend.legendGoogleSheets(SimilarAreaDictionary=newlgnd,path=path,spreadsheetId=spreadsheetId ,pdfpath=pdfpth) |
| | except: |
| |
|
| | print("An exception occurred") |
| | time.sleep(20) |
| | newlgnd=google_sheet_Legend.legendGoogleSheets(SimilarAreaDictionary=newlgnd,path=path,spreadsheetId=spreadsheetId,pdfpath=pdfpth) |
| |
|
| | return jsonify('donee') |
| |
|
| | |
| | |
| | |
| | @app.route('/getdropboxurl/<jsdata>',methods=["GET", "POST"]) |
| | def calldropboxurl(jsdata): |
| | print('jsdata',jsdata) |
| | pdfurl=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=jsdata)[1] |
| | print('urll',pdfurl) |
| | if pdfurl and ('http' in pdfurl or 'dropbox' in pdfurl): |
| | if 'dl=0' in pdfurl: |
| | pdfurl = pdfurl.replace('dl=0', 'dl=1') |
| | print('urll1',pdfurl) |
| | |
| | response = requests.get(pdfurl) |
| | pdf_content = BytesIO(response.content) |
| | if pdf_content is None: |
| | raise ValueError("No valid PDF content found.") |
| |
|
| | |
| | pdf_document = fitz.open(stream=pdf_content, filetype="pdf") |
| | pdf_bytes = BytesIO() |
| | pdf_document.save(pdf_bytes) |
| | return Response(pdf_bytes.getvalue(), content_type='application/pdf') |
| |
|
| | |
| | |
| |
|
| | pdf_content = None |
| | pageNumTextFound = 0 |
| | BASE_URL = "https://marthee-nbslink.hf.space" |
| |
|
| | @app.route("/", methods=["GET", "POST"]) |
| | def thismain(): |
| | print('ayhaga') |
| | return render_template("gui.html") |
| |
|
| | @app.route('/view-pdf', methods=['GET']) |
| | def download_pdf(): |
| |
|
| | |
| | full_query_string = request.query_string.decode() |
| | parsed_params = urllib.parse.parse_qs(full_query_string) |
| |
|
| | |
| | pdf_link = parsed_params.get('pdfLink', [None])[0] |
| | keyword = parsed_params.get('keyword', [None])[0] |
| |
|
| | if not pdf_link or not keyword: |
| | return "Missing required parameters.", 400 |
| |
|
| | |
| | pdf_link = urllib.parse.unquote(pdf_link) |
| | keyword = urllib.parse.unquote(keyword) |
| |
|
| | |
| | try: |
| | keyword = json.loads(keyword) |
| | except json.JSONDecodeError: |
| | keyword = [keyword] |
| |
|
| | print("Extracted PDF Link:", pdf_link) |
| | print("Extracted Keywords:", keyword) |
| | createDF=False |
| | pdf_content = Find_Hyperlinking_text.annotate_text_from_pdf([pdf_link], keyword)[0] |
| | if pdf_content is None: |
| | return "PDF content not found.", 404 |
| |
|
| | pdf_bytes = BytesIO(pdf_content) |
| | return send_file( |
| | pdf_bytes, |
| | mimetype='application/pdf', |
| | as_attachment=False, |
| | download_name=f"annotated_page_{pageNumTextFound}.pdf" |
| | ) |
| |
|
| | @app.route('/api/process-data', methods=['POST']) |
| | def receive_pdf_data(): |
| | global pdf_content, pageNumTextFound |
| |
|
| | |
| | pdfLink, keyword = finddata() |
| |
|
| | if not pdfLink or not keyword: |
| | return jsonify({"error": "Both 'pdfLink' and 'keyword' must be provided."}), 400 |
| |
|
| | try: |
| | print(pdfLink, keyword) |
| |
|
| |
|
| | pdfbytes, pdf_document , df ,tablepdfoutput= Find_Hyperlinking_text.annotate_text_from_pdf([pdfLink], keyword) |
| | dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') |
| |
|
| | |
| | metadata = dbxTeam.sharing_get_shared_link_metadata(pdfLink) |
| | dbPath='/TSA JOBS/ADR Test/FIND/' |
| | pdflink= tsadropboxretrieval.uploadanyFile(doc=pdf_document,path=dbPath,pdfname=metadata.name) |
| | print('LINKS0',pdflink) |
| | |
| | dbPath='/TSA JOBS/ADR Test/FIND/' |
| | tablepdfLink=tsadropboxretrieval.uploadanyFile(doc=tablepdfoutput,path=dbPath,pdfname=metadata.name.rsplit(".pdf", 1)[0] +' Markup Summary'+'.pdf') |
| | print(f"PDF successfully uploaded to Dropbox at") |
| | print('LINKS1',tablepdfLink) |
| | return jsonify({ |
| | "message": "PDF processed successfully.", |
| | "PDF_MarkedUp": pdflink, |
| | 'Table_PDF_Markup_Summary': tablepdfLink |
| | }) |
| |
|
| | except Exception as e: |
| | return jsonify({"error": str(e)}), 500 |
| |
|
| | def finddata(): |
| | pdfLink = 'https://www.dropbox.com/scl/fi/hnp4mqigb51a5kp89kgfa/00801-ARC-20-ZZ-S-A-0002.pdf?rlkey=45abeoebzqw4qwnslnei6dkd6&st=m4yrcjm2&dl=1' |
| | keyword = ['115 INTEGRATED MRI ROOM LININGS', '310 ACCURACY'] |
| | return pdfLink, keyword |
| |
|
| |
|
| | |
| | |
| |
|
| | |
| | @app.route('/getdrivelinks/<jsdata>',methods=["GET", "POST"]) |
| | def getlinkscreated(jsdata): |
| | spreadsheet_service,drive_service,gc= google_sheet_Legend.authorizeLegend() |
| | ids=gc.spreadsheet_ids() |
| | titles=gc.spreadsheet_titles() |
| | allpaths=[] |
| | print('HEREEEEEEEEEE') |
| | |
| | for i in range(0,len(titles)): |
| | print('titles',titles[i]) |
| | if not (titles[i].startswith('API') or (titles[i].startswith('Dropbox')) ) : |
| | |
| | ws=gc.open(titles[i]) |
| | path_metadata = ws.get_developer_metadata('path') |
| | print(path_metadata) |
| | allpaths.append([titles[i], ws.get_developer_metadata('path')[0].value , drive_service.files().get(fileId=ids[i],fields="createdTime, modifiedTime").execute() ,ids[i] ]) |
| | |
| | return jsonify(allpaths) |
| | |
| | |
| | @app.route('/getAPITables/',methods=["GET", "POST"]) |
| | def returnAPITables(): |
| | |
| | table1,table2,table3=API.GenerateTables() |
| | return jsonify([table1.to_dict(),table2.to_dict(),table3.to_dict()]) |
| |
|
| | @app.route('/refreshAPI', methods=["GET", "POST"]) |
| | def checktables(): |
| | projectname = request.args.get('prjname') |
| | projectpart = request.args.get('prjpart') |
| | projectpartid = request.args.get('prjpartid') |
| |
|
| | global cached_tables |
| |
|
| | |
| | if cached_tables["projects"] is None or cached_tables["projects"].empty: |
| | return jsonify(["No cached data available yet. Please fetch projects first."]) |
| |
|
| | |
| | new_projects, new_parts, new_sections = API.GenerateTables() |
| |
|
| | returnString = None |
| |
|
| | |
| | old_names = cached_tables["projects"]["ProjectName"].tolist() |
| | new_names = new_projects["ProjectName"].tolist() |
| | if set(old_names) != set(new_names): |
| | added = list(set(new_names) - set(old_names)) |
| | removed = list(set(old_names) - set(new_names)) |
| | returnString = ["Changes in Projects", "project", {"added": added, "removed": removed}] |
| |
|
| | |
| | elif projectname and cached_tables["parts"] is not None and not cached_tables["parts"].empty: |
| | prjid = new_projects.loc[new_projects['ProjectName'] == projectname, 'ProjectId'].values[0] |
| | old_parts = cached_tables["parts"].loc[cached_tables["parts"]["ProjectId"] == prjid, "ProjectPart"].tolist() |
| | new_parts_list = new_parts.loc[new_parts["ProjectId"] == prjid, "ProjectPart"].tolist() |
| | if set(old_parts) != set(new_parts_list): |
| | added = list(set(new_parts_list) - set(old_parts)) |
| | removed = list(set(old_parts) - set(new_parts_list)) |
| | returnString = ["Changes in Parts", "part", {"added": added, "removed": removed}] |
| |
|
| | |
| | elif projectname and projectpart and projectpartid and cached_tables["sections"] is not None and not cached_tables["sections"].empty: |
| | prjid = new_projects.loc[new_projects['ProjectName'] == projectname, 'ProjectId'].values[0] |
| | old_sections = cached_tables["sections"]["ProjectSection"].tolist() |
| | new_sections_list = new_sections[ |
| | (new_sections["ProjectId"] == prjid) & |
| | (new_sections["ProjectPartId"] == int(projectpartid)) |
| | ]["ProjectSection"].tolist() |
| | if set(old_sections) != set(new_sections_list): |
| | added = list(set(new_sections_list) - set(old_sections)) |
| | removed = list(set(old_sections) - set(new_sections_list)) |
| | returnString = ["Changes in Sections", "section", {"added": added, "removed": removed}] |
| |
|
| | if not returnString: |
| | returnString = ["No changes detected"] |
| |
|
| | |
| | cached_tables["projects"] = new_projects |
| | cached_tables["parts"] = new_parts |
| | cached_tables["sections"] = new_sections |
| |
|
| | return jsonify(returnString) |
| |
|
| | @app.route('/refreshDropbox',methods=["GET", "POST"]) |
| | def checkdropbox(): |
| | print('checkingggdf') |
| | dfFromDropbox=tsadropboxretrieval.DropboxItemstoDF("/TSA JOBS")[0] |
| | dfParquet=tsadropboxretrieval.GetParquetDF() |
| |
|
| | dfParquet1 = dfParquet[['name', 'path_display', 'client_modified', 'server_modified']] |
| |
|
| | deletedrows = pd.concat([dfFromDropbox, dfParquet1]).drop_duplicates(keep=False) |
| | deletedrows = deletedrows.reset_index(drop=True) |
| | deletedrows.columns = ['name', 'path_display', 'client_modified', 'server_modified'] |
| | differences = deletedrows[~deletedrows.isin(dfFromDropbox)].dropna() |
| | if (len(differences)>0): |
| | print(differences) |
| | dbxTeam=tsadropboxretrieval.dropbox_upload_file(dfFromDropbox) |
| | stringReturned= 'Updated Sucessfully.' |
| | else: |
| | stringReturned= 'Nothing to update.' |
| | return 'stringReturned' |
| |
|
| | def refreshDropboxRetrievals(extractedPath): |
| | dfFromDropbox = tsadropboxretrieval.DropboxItemstoDF(extractedPath)[0] |
| | dfParquet = tsadropboxretrieval.GetParquetDF() |
| |
|
| | |
| |
|
| | |
| | dfParquet = dfParquet[['name', 'path_display', 'client_modified', 'server_modified']] |
| | dfFromDropbox = dfFromDropbox[['name', 'path_display', 'client_modified', 'server_modified']] |
| |
|
| | |
| | dfParquetUpdated = dfParquet[~dfParquet['path_display'].str.startswith(extractedPath)] |
| | |
| | |
| | dfParquetUpdated = pd.concat([dfParquetUpdated, dfFromDropbox], ignore_index=True) |
| |
|
| | |
| |
|
| | |
| | tsadropboxretrieval.dropbox_upload_file(dfParquetUpdated) |
| |
|
| | if len(dfFromDropbox) > 0: |
| | print("Updated entries:", dfFromDropbox) |
| | return 'Updated Successfully.' |
| | else: |
| | return 'Nothing to update.' |
| |
|
| |
|
| |
|
| |
|
| |
|
| | @app.route('/postdropboxprojects/<path:encoded_path>') |
| | def handle_path(encoded_path): |
| | decoded_path = urllib.parse.unquote(encoded_path) |
| | extracted_path = json.loads(decoded_path) |
| | print('path to refresh',extracted_path) |
| | stringReturned=refreshDropboxRetrievals(extracted_path) |
| | print(stringReturned) |
| | return stringReturned |
| |
|
| |
|
| |
|
| | @app.route('/refreshAPIAppendNewTables',methods=["GET", "POST"]) |
| | def appendNewTables(): |
| | |
| | |
| | API.AppendtablestoSheets() |
| | return jsonify('appended') |
| |
|
| |
|
| | @app.route('/summarytoXML/<jsdata>',methods=["GET", "POST"]) |
| | def cvtSummarytoXML(jsdata): |
| | path='/TSA JOBS/ADR Test/' |
| | result = json.loads(jsdata) |
| | for word in result[0]: |
| | path+=word +'/' |
| | print(path) |
| | path=path+'XML/' |
| | |
| | |
| | xmllink=google_sheet_to_xml.create_xml(documentname=result[1],dbPath=path) |
| | return jsonify(xmllink) |
| |
|
| |
|
| | |
| | |
| |
|
| | |
| | |
| |
|
| | |
| | |
| | def runn(): |
| | |
| | from gevent.pywsgi import WSGIServer |
| | http_server = WSGIServer(("0.0.0.0", 7860), app) |
| | http_server.serve_forever() |
| | |
| |
|
| |
|
| | def keep_alive(): |
| | t=Thread(target=runn) |
| | t.start() |
| |
|
| | dtn = datetime.datetime.now(datetime.timezone.utc) |
| | print(dtn) |
| | next_start = datetime.datetime(dtn.year, dtn.month, dtn.day, 21, 0, 0).astimezone(datetime.timezone.utc) |
| | print(next_start) |
| | keep_alive() |
| | |
| | while 1: |
| | dtnNow = datetime.datetime.now(datetime.timezone.utc) |
| | print(dtnNow) |
| | if dtnNow >= next_start: |
| | next_start += datetime.timedelta(hours=12) |
| | print(next_start) |
| | checkdropbox() |
| | |
| | time.sleep(1800) |
| | |
| | if __name__ == "__main__": |
| | runn() |
| | |
| | |