from __future__ import print_function from flask import Flask, render_template,request,flash , json, url_for,g , redirect , jsonify , send_file ,make_response import os import json import fitz from PIL import Image import cv2 import numpy as np import pilecaps_adr import base64 from db import dropbox_connect import db import cv2 import pandas as pd import time from io import BytesIO, StringIO import tempfile from flask import Flask, Response from werkzeug.wsgi import wrap_file import tameem3_2 import pypdfium2 as pdfium import pixelconversion import tameem2_1 import base64 import io from urllib.parse import unquote import API import MC_Templates_API import tsadropboxretrieval import doc_search import google_sheet_Legend import dxf__omar3_2 import google_sheet_to_xml from threading import Thread import mainDBAlaa import datetime import doors_fasterrcnn import deploying_3_3 import Counting_Columns_2_1 import ezdxf app = Flask(__name__) prjnamesURL= 'https://docs.google.com/spreadsheets/d/1nsIgi9o9VSBKQlNxbxihPzG_N7s4um0eNVfgL4gaGPc/export?format=csv&gid=0' prjpartsURL= 'https://docs.google.com/spreadsheets/d/1nsIgi9o9VSBKQlNxbxihPzG_N7s4um0eNVfgL4gaGPc/export?format=csv&gid=34865056' prjsectionsURL= 'https://docs.google.com/spreadsheets/d/1nsIgi9o9VSBKQlNxbxihPzG_N7s4um0eNVfgL4gaGPc/export?format=csv&gid=1751466819' global colorsused global pdflink @app.route("/",methods=["GET", "POST"]) def getInfotoMeasure(): API.AppendtablestoSheets() return render_template("proposed-GUI.html") @app.route('/getprojectnames/',methods=['GET']) def getprjnamesfromTestAPI(): prjnames,prjids=API.getPrjNames() # API.AppendtablestoSheets() return jsonify([prjnames,prjids]) @app.route('/getprojectparts/',methods=['GET']) def getprjpartsfromTestAPI(jsdata): print('kkkk',jsdata) prjparts,partsIds=API.getprjParts(jsdata) return jsonify([prjparts,partsIds]) @app.route('/getprojectsections/',methods=['GET']) def getprjSectionsfromTestAPI(jsdata): data=json.loads(jsdata) sections=API.getprjSections(data[0],data[1]) return jsonify(sections) #retrieves projects @app.route('/getmethod/',methods=['GET','POST']) def get_javascript_data(jsdata): #get project from the first dropdown # jsdata=eval(jsdata) print('tsa') documnetsToMeasure,RelevantDocuments=tsadropboxretrieval.retrieveProjects(jsdata) if RelevantDocuments: return jsonify ([documnetsToMeasure, RelevantDocuments]) else: return ['No projects found'] #--------------------------------------------------------------- @app.route('/WordSearch',methods=["GET", "POST"]) def searchDocument(): return render_template('wordSearch.html') @app.route('/legends',methods=["GET", "POST"]) def legendDirectory(): return render_template('legendDirectory.html') @app.route('/searchInDocs',methods=["GET", "POST"]) def getSearchinDocs(): arr=[] values = request.get_json() keyword=values.get('keyword') listpfProjs=values.get('listofprojs') print(keyword,listpfProjs) df,img_list=doc_search.search_docs(keyword,listpfProjs) for img in img_list: _, buffer = cv2.imencode('.png', img) arr.append(base64.b64encode(buffer).decode('utf-8')) return jsonify([df.to_html(index=False, escape=False),arr]) @app.route('/searchInFolder',methods=["GET", "POST"]) def getSearchinFolder(): arr=[] values = request.get_json() keyword=values.get('keyword') projname=values.get('ProjectName') df,img_list=doc_search.slow_search(keyword=keyword,project=projname) for img in img_list: _, buffer = cv2.imencode('.png', img) arr.append(base64.b64encode(buffer).decode('utf-8')) return jsonify([df.to_html(index=False, escape=False),arr]) @app.route("/measurementConsole",methods=["GET", "POST"]) def measurementConsoleFn(): return render_template("proposed-GUI.html") @app.route('/RetrieveMCTNames/',methods=['GET']) def CallAPIforMCTNames(): # print(jsdata) DictionaryOfTemplates=MC_Templates_API.RetrieveMC_Templates_API() # jsdata=jsdata.replace('"', '') print('here') return jsonify(DictionaryOfTemplates) #[str(jsdata).lower()] #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ #Hex value to RGB value def hexRGB(color): color=color.lstrip('#') color= tuple(int(color[i:i+2], 16) for i in (0, 2, 4)) #hex to rgb color=list(color) #rgb to bgr return color #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ @app.route('/updatepreviewimg/',methods=["GET", "POST"]) def getfromdropboxImg(jsdata): # vv = eval(request.form.get('pdflink')) pdfpath='' # jsdata=jsdata.replace('"', '') jsdata=eval(jsdata) print('pdfnameeee==',jsdata) dbPath='/TSA JOBS/ADR Test/'+jsdata[0][0]+'/'+jsdata[0][1]+'/'+jsdata[0][2]+'/Measured Plan/'+jsdata[1] print(dbPath) dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') md, res =dbxTeam.files_download(path=dbPath) data = res.content doc = fitz.open("pdf",data) page=doc[0] pix = page.get_pixmap() # render page to an image pl=Image.frombytes('RGB', [pix.width,pix.height],pix.samples) img=np.array(pl) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) _, buffer = cv2.imencode('.png', img) return base64.b64encode(buffer).decode('utf-8') @app.route('/savebase64Img/',methods=["POST"]) def stringToRGB(): vv = eval(request.form.get('answers')) print(vv) if ( vv[5][2].startswith('3.2') or vv[5][2].startswith('3.3') ) : print('3.2 section') pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=vv[0]) dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') md, res =dbxTeam.files_download(path=pdfpath) dataDoc = res.content if 'file' not in request.files: print('error, No file part in the request') else: file = request.files['file'] print('file done, measuring') arr=measureproject(vv,dataDoc,0,file) return jsonify(arr) if vv[5][2].startswith('1.0'): opencv_img,dataDoc = plan2img( str(vv[0]) ) if vv[1]==220: imgdata = base64.b64decode(vv[6]) img=Image.open(io.BytesIO(imgdata)) opencv_img= cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) arr=measureproject(vv,dataDoc,opencv_img) if vv[5][2].startswith('2.8') or vv[5][2].startswith('2.1'): pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF= str(vv[0])) dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') md, res =dbxTeam.files_download(path=pdfpath) dataDoc = res.content doc = fitz.open("pdf",dataDoc) page=doc[0] if page.rotation!=0: page.set_rotation(0) pix = page.get_pixmap(dpi=300) # render page to an image pl=Image.frombytes('RGB', [pix.width,pix.height],pix.samples) arr=measureproject(vv,dataDoc,pl) return jsonify(arr) @app.route('/2.1Trial/',methods=["POST"]) def measure2_1(): name = request.get_json() result=name.get('allvalues') arr=measureproject(result) return arr #MAIN FUNCTION -- calls python code to measure the chosen plan from the interface # @app.route('/projecttomeasure/',methods=["GET","POST"]) def measureproject(result,dataDoc=0,img=0,dxffile=0): colorarr=[] global pdflink # result = json.loads(jsdata) pdfpath='/' ################################# -1.0- ################################# for word in result[5]: pdfpath+=word +'/' arr=[] if result[5][2].startswith('1.0'): #section value - 1.0 substructure- pile caps for item in result[2]: # item1 ='#'+item c=hexRGB(item) colorarr.append(c) print('RATIOS=',result[3], result[4]) # cv2.imwrite(img,'imgg.png') # global pdflink imgPerimeter1,image_new1,SimilarAreaDictionary , colorsUsed,spreadsheet_url, spreadsheetId,list1, pdflink, areas_Perimeters, namepathArr =pilecaps_adr.drawAllContours(dataDoc,img,result[1],colorarr, result[3], result[4], result[0],pdfpath) _, buffer = cv2.imencode('.png', image_new1) arr=[base64.b64encode(buffer).decode('utf-8'),SimilarAreaDictionary.to_dict(),spreadsheet_url , spreadsheetId,colorsUsed,list1.to_dict(), pdflink, areas_Perimeters, namepathArr] ################################# -3.2- ################################# # elif result[5][2].startswith('3.2'): #section value - 3.2 floor finishes # print('IN HEREEEE 3.2') # dxfpath=dxffile.read() # with tempfile.NamedTemporaryFile(suffix='.dxf', delete=False) as temp_file: # temp_file.write(dxfpath) # temp_filename = temp_file.name # print(temp_filename) # doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas=dxf__omar3_2.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4] , pdfpath,result[0]) # dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' # pdflink= tsadropboxretrieval.uploadanyFile(doc=doc,path=dbPath,pdfname=result[0]) #doc=doc,pdfname=path,pdfpath=pdfpath+'Measured Plan/' # _, buffer = cv2.imencode('.png', outputimg) # arr=[ base64.b64encode(buffer).decode('utf-8'),SimilarAreaDictionary.to_dict(), spreadsheet_url,spreadsheetId,[],list1.to_dict(),pdflink,hatched_areas,namepathArr]# , spreadsheetId, spreadsheet_url , list1.to_dict()] ################################# -3.3 or 3.2- ################################# elif result[5][2].startswith('3.3') or result[5][2].startswith('3.2'): #section value - 3.2 floor finishes print('code of 3.3 and 3.2') dxfpath=dxffile.read() with tempfile.NamedTemporaryFile(suffix='.dxf', delete=False) as temp_file: temp_file.write(dxfpath) temp_filename = temp_file.name print(temp_filename) doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas=deploying_3_3.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4] , pdfpath,result[0]) global colorsused colorsused=list(SimilarAreaDictionary['Color']) dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' print(dbPath,result[0]) # pdflink= tsadropboxretrieval.uploadanyFile(doc=doc,path=dbPath,pdfname=result[0]) #doc=doc,pdfname=path,pdfpath=pdfpath+'Measured Plan/' _, buffer = cv2.imencode('.png', outputimg) arr=[ base64.b64encode(buffer).decode('utf-8'),SimilarAreaDictionary.to_dict(), spreadsheet_url,spreadsheetId,[],list1.to_dict(),pdflink,hatched_areas,namepathArr]#,hatched_areas,namepathArr]# , spreadsheetId, spreadsheet_url , list1.to_dict()] ################################# -2.8- ################################# elif result[5][2].startswith('2.8'): #section value - 2.8 floor finishes #vv[0] array of pdf names imgss=[] dpxlinks=[] legendLinks=[] listofmarkups=[] annotatedimg,pdf_document,spreadsheet_url, list1 , df_doors=doors_fasterrcnn.main_run(img,dataDoc,'separated_classes.pth','separated_classes_all.pth',result[0],pdfpath, result[4]) #single_double.pth dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' pdflink= tsadropboxretrieval.uploadanyFile(doc=pdf_document,path=dbPath,pdfname=result[0]) #doc=doc,pdfname=path,pdfpath=pdfpath+'Measured Plan/ _, buffer = cv2.imencode('.png', annotatedimg) arr=[base64.b64encode(buffer).decode('utf-8') ,pdflink,spreadsheet_url,list1.to_dict(), df_doors.to_dict()]# , spreadsheetId, spreadsheet_url , list1.to_dict()] ################################# -2.1- ################################# elif result[5][2].startswith('2.1'): #section value - 2.8 floor finishes #vv[0] array of pdf names imgss=[] dpxlinks=[] legendLinks=[] listofmarkups=[] annotatedimg,pdf_document,spreadsheet_url, list1 , df_doors=Counting_Columns_2_1.mainfun(dataDoc) # dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' # pdflink= tsadropboxretrieval.uploadanyFile(doc=pdf_document,path=dbPath,pdfname=result[0]) #doc=doc,pdfname=path,pdfpath=pdfpath+'Measured Plan/ # _, buffer = cv2.imencode('.png', annotatedimg) arr=[base64.b64encode(buffer).decode('utf-8') ,pdflink,spreadsheet_url,list1.to_dict(), df_doors.to_dict()]# , spreadsheetId, spreadsheet_url , list1.to_dict()] ################################# -2.2- ################################# # elif result[5][2].startswith('2.2'): #section value - 2.2 rc slabs #add here python code # #link (add this to ur code) # pdflink= db.dropbox_upload_file(doc=doc,pdfname=path,pdfpath=pdfpath) # gc,spreadsheet_service,spreadsheetId, spreadsheet_url , namepathArr= pilecaps_adr.legendGoogleSheets(df,plan ,pdfpath) #output img # _, buffer = cv2.imencode('.png', outputimg) #let first result to be the img #return results in arr=[base64.b64encode(buffer).decode('utf-8'),pdflink,spreadsheetId,spreadsheet_url] like the previous sections in the above lines # elif result[5][2].startswith('2.1'): #section value - 2.1 frames # url = tameem2_1.mainfun( result[0], pdfpath) # return jsonify([url]) return arr #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ @app.route("/canvaspdftoimgBackground/",methods=["GET", "POST"]) def pdftoimgCanvas(jsdata): img=plan2img(jsdata)[0] _, buffer = cv2.imencode('.png', img) arr=[base64.b64encode(buffer).decode('utf-8') , img.shape[0],img.shape[1]] return jsonify(arr) #pdf to img def plan2img(nameofpdf): pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=nameofpdf) dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') md, res =dbxTeam.files_download(path=pdfpath) data = res.content doc = fitz.open("pdf",data) page=doc[0] if page.rotation!=0: page.set_rotation(0) pix = page.get_pixmap(dpi=300) # render page to an image pl=Image.frombytes('RGB', [pix.width,pix.height],pix.samples) img=np.array(pl) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) return img ,data #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ #not used in 1.0 def convert2img(path): pdf = pdfium.PdfDocument(path) page = pdf.get_page(0) pil_image = page.render().to_pil() pl1=np.array(pil_image) img = cv2.cvtColor(pl1, cv2.COLOR_RGB2BGR) return img #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ #User-made MC-T NAME - in the second part of the interface @app.route("/mctnametoGoogleSheet/",methods=["GET", "POST"]) def sendmctnametoLegend(jsdata): result = json.loads(jsdata) print(result) global pdflink summaryid=google_sheet_Legend.mapnametoLegend(result,colorsused,pdflink) allreturns=[summaryid] return jsonify(allreturns) @app.route("/getguessednamepath/",methods=["GET", "POST"]) def getguessedNames(): guessednamesDrpdwn=google_sheet_Legend.getallguessednames() return jsonify(guessednamesDrpdwn) #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ #NOT USED (pixel conversion instead) def calcRef(img): blk = np.ones(img.shape, dtype="uint8") * [[[np.uint8(0), np.uint8(0), np.uint8(0)]]] start_point = (50, 100) end_point = (120, 200) color = (255, 255, 255) # white BGR thickness = -1 # Thickness of -1 will fill the entire shape blk = cv2.rectangle(blk, start_point, end_point, color, thickness) blk = cv2.cvtColor(blk, cv2.COLOR_BGR2GRAY) contourzz, hierarchy = cv2.findContours(image=blk, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE) for i, cnt3 in enumerate(contourzz): M = cv2.moments(cnt3) if M['m00'] != 0.0: x2 = int(M['m10']/M['m00']) y2 = int(M['m01']/M['m00']) area = cv2.contourArea(cnt3) perimeter = cv2.arcLength(cnt3, True) return area,perimeter , blk #NOT USED (pixel conversion instead) def modifyingcalcRefDynamic(img): imgcopy = img.copy() blk = np.ones(img.shape, dtype="uint8") * [[[np.uint8(0), np.uint8(0), np.uint8(0)]]] x = 50 y = 100 xD = int(img.shape[1] * 0.10) yD = int(img.shape[0] * 0.10) start_point = (x, y) end_point = (x+xD, y+yD) blue = (255, 0, 0) # BGR white = (255, 255, 255) # BGR thickness = -1 # Thickness of -1 will fill the entire shape imgcopy = cv2.rectangle(imgcopy, start_point, end_point, blue, thickness) blk = cv2.rectangle(blk, start_point, end_point, white, thickness) blk = cv2.cvtColor(blk, cv2.COLOR_BGR2GRAY) contourzz, hierarchy = cv2.findContours(image=blk, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE) for i, cnt3 in enumerate(contourzz): M = cv2.moments(cnt3) if M['m00'] != 0.0: x2 = int(M['m10']/M['m00']) y2 = int(M['m01']/M['m00']) area = cv2.contourArea(cnt3) perimeter = cv2.arcLength(cnt3, True) return area, perimeter, blk , imgcopy #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ @app.route('/get-pdf/') def get_pdf(jsdata): print('pdfname',jsdata) # Download PDF from Dropbox pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=jsdata) print('pdfpath',pdfpath) dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') md, res =dbxTeam.files_download(path=pdfpath) doc = fitz.open("pdf",res.content) page=doc[0] # print('get pdf size::',page.mediabox) pdf_data = res.content # Ensure this is your correct PDF data from Dropbox response = make_response(io.BytesIO(pdf_data).getvalue()) response.headers.set('Content-Type', 'application/pdf') response.headers.set('Content-Disposition', 'attachment', filename='filename.pdf') # Replace with your desired file name # return response return response #PIXEL CONVERSION METHOD -- SAVES DOC ON DROPBOX TO BE MEASURED BY USER @app.route("/pixelimg/",methods=["GET", "POST"]) def getimg(jsdata): jsdata=eval(jsdata) pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=jsdata[3]) dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') md, res =dbxTeam.files_download(path=pdfpath) data = res.content print('jsdata',jsdata) if str(jsdata[2]).startswith('1.0'): doc,areaPixel,perimeterPixel=pixelconversion.drawisrotated(data,300) # .openDrawPDF(data) else: doc,areaPixel,perimeterPixel,pdfdata=pixelconversion.drawisrotated(data) # .openDrawPDF(data) dbPath='/TSA JOBS/ADR Test/'+jsdata[0]+'/'+jsdata[1]+'/'+jsdata[2]+'/'+'Scale Document' +'/' dburl=tsadropboxretrieval.uploadanyFile(doc=doc,pdfname=str(jsdata[3]) ,path=dbPath) # areaPixel,perimeterPixel= pixelconversion.getAreaPerimeter(dbPath, str(jsdata[3])) outputs=[areaPixel,perimeterPixel , dburl] # pdf_data = io.BytesIO(res.content) # pdf_data.seek(0) return jsonify(outputs) #, send_file(pdf_data, as_attachment=True, mimetype='application/pdf', download_name='downloaded_file.pdf') #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ #get sunburst from doc_search @app.route('/getsunburst',methods=["GET", "POST"]) def getSunburst(): # df=tsadropboxretrieval.GetParquetDF() tree=doc_search.prepare_sunburst() return jsonify(tree.to_dict()) #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ #DELETE MARKUPS (for comparison) @app.route('/_submission',methods=["GET", "POST"]) def getnewlegend(): pdfpth='' alljson = request.get_json() list1=alljson.get('dict1') print('list1',list1) # list1 = request.args.get('dict1') path=alljson.get('path') # path = request.args.get('path') spreadsheetId=alljson.get('spreadsheetId') # spreadsheetId =request.args.get('spreadsheetId') pdfpathpath=alljson.get('pdfpathpath') # pdfpathpath=request.args.get('pdfpathpath') print(pdfpathpath,type(pdfpathpath)) pdfname=request.args.get('pdfname') for word in eval(pdfpathpath): pdfpth+='/' +word pdfpth+='/' dbPath='/TSA JOBS/ADR Test'+pdfpth+'Measured Plan/' print(pdfpth) deletedrows1=google_sheet_Legend.deletemarkups(list1=list1,dbPath=dbPath,path=path) arr1=[deletedrows1.to_dict()] print('arr,',arr1) return jsonify(arr1) #--------------------------------------------------------------------------- #if user wishes to delete @app.route('/deletemarkupsroute',methods=["GET", "POST"]) def dltmarkupslegend(): print('IN deletemarkupsroute') pdfpth='' alljson = request.get_json() SimilarAreaDictionary=alljson.get('dict') # SimilarAreaDictionary = request.args.get('dict') deletedrows=alljson.get('deletedrows') print('deletedrowsssssssssssssssssssssssssssssss',deletedrows) # deletedrows = request.args.get('deletedrows') path=alljson.get('path') # path = request.args.get('path') spreadsheetId=alljson.get('spreadsheetId') # spreadsheetId =request.args.get('spreadsheetId') areaPermArr=alljson.get('areaPermArr') print('aaaaaaaaaaaaa',areaPermArr) # areaPermArr=request.args.get('areaPermArr') section=alljson.get('section') # section=request.args.get('section') pdfpathpath=alljson.get('pdfpathpath') # pdfpathpath=request.args.get('pdfpathpath') for word in eval(pdfpathpath): pdfpth+='/' +word pdfpth+='/' # myDict=eval(deletedrows) deletedrows=pd.DataFrame(deletedrows) print('deletedrows',deletedrows) if section.startswith('1.0') or section.startswith('3.2') or section.startswith('3.3'): newlgnd=google_sheet_Legend.deletefromlegend(deletedrows=deletedrows,SimilarAreaDictionarycopy=SimilarAreaDictionary, section=section,areaPermArr=areaPermArr) elif section.startswith('2.8'): newlgnd=google_sheet_Legend.deletedoors(deletedrows,SimilarAreaDictionary) print('done wit 2.8 in deleting, didnt append yet ') else: newlgnd=google_sheet_Legend.deletefromlegend(deletedrows=deletedrows,SimilarAreaDictionarycopy=SimilarAreaDictionary, section=section) try: newlgnd=google_sheet_Legend.legendGoogleSheets(SimilarAreaDictionary=newlgnd,path=path,spreadsheetId=spreadsheetId ,pdfpath=pdfpth) #new legend except: print("An exception occurred") time.sleep(20) newlgnd=google_sheet_Legend.legendGoogleSheets(SimilarAreaDictionary=newlgnd,path=path,spreadsheetId=spreadsheetId,pdfpath=pdfpth) return jsonify('donee') #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ #get pdf dropbox url after measurement is done @app.route('/getdropboxurl/',methods=["GET", "POST"]) def calldropboxurl(jsdata): print(jsdata) pdfurl=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=jsdata)[1] print(pdfurl) return jsonify(pdfurl) #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ #Google sheet links @app.route('/getdrivelinks/',methods=["GET", "POST"]) def getlinkscreated(jsdata): spreadsheet_service,drive_service,gc= google_sheet_Legend.authorizeLegend() ids=gc.spreadsheet_ids() titles=gc.spreadsheet_titles() allpaths=[] print('HEREEEEEEEEEE') # for title in titles: for i in range(0,len(titles)): print('titles',titles[i]) if not (titles[i].startswith('API') or (titles[i].startswith('Dropbox')) ) : ws=gc.open(titles[i]) path_metadata = ws.get_developer_metadata('path') print(path_metadata) allpaths.append([titles[i], ws.get_developer_metadata('path')[0].value , drive_service.files().get(fileId=ids[i],fields="createdTime, modifiedTime").execute() ,ids[i] ]) return jsonify(allpaths) #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ @app.route('/getAPITables/',methods=["GET", "POST"]) def returnAPITables(): # API.update_df() table1,table2,table3=API.GenerateTables() return jsonify([table1.to_dict(),table2.to_dict(),table3.to_dict()]) @app.route('/refreshAPI',methods=["GET", "POST"]) def checktables(): projectname = request.args.get('prjname') projectpart = request.args.get('prjpart') projectpartid = request.args.get('prjpartid') print('hgh',projectname , projectpart) table1,table2,table3=API.GenerateTables() url_1 = prjnamesURL.replace('/edit#gid=', '/export?format=csv&gid=') url_2 = prjpartsURL.replace('/edit#gid=', '/export?format=csv&gid=') url_3 = prjsectionsURL.replace('/edit#gid=', '/export?format=csv&gid=') table1N = pd.read_csv(url_1) table2N = pd.read_csv(url_2) table3N = pd.read_csv(url_3) t1Diff=table1N[~table1N.isin(table1)].dropna() t1Diff1=table1[~table1.isin(table1N)].dropna() t2Diff=pd.DataFrame() t2Diff1=pd.DataFrame() t3Diff=pd.DataFrame() t3Diff1=pd.DataFrame() if projectname: print('here') differentParts=[] a = table1N.to_numpy() row = np.where(a == projectname) print(projectname,row) returnString='' t2Diff1=table2[~table2.isin(table2N)].dropna() t2Diff=table2N[~table2N.isin(table2)].dropna() if projectpart and projectname: a1 = table1N.to_numpy() row1 = np.where(a1 == projectname) prjid=a1[row1[0]][0][0] t3Diff1=table3[~table3.isin(table3N)].dropna() t3Diff=table3N[~table3N.isin(table3)].dropna() returnString='' if not t1Diff.empty or not t1Diff1.empty : if not t1Diff.empty: newvalues= t1Diff['ProjectName'].values else: newvalues= t1Diff1['ProjectName'].values returnString='' returnString=['Changes have been made in Projects. Would you like to retrieve them in the console? By saying yes, you may need to repeat the website related processes(if any).','project',(newvalues).tolist(),table2.to_dict()] if (not t2Diff.empty)or( not t2Diff1.empty) : if projectname: for i in range(len(t2Diff1['ProjectId'].values)): if int(t2Diff1['ProjectId'].values[i]) ==int(a[row[0]][0][0]): differentParts.append(t2Diff1['ProjectPart'].iloc[i]) if len(differentParts)>0: returnString=['Changes have been made in Parts. Would you like to retrieve them in the console?','part',differentParts] if (not t3Diff.empty) or (not t3Diff1.empty) : differentSections=[] if projectpart and projectname: roww=t3Diff1.iloc[np.where((t3Diff1['ProjectId']==int(prjid) ) & (t3Diff1['ProjectPartId']==int(projectpartid)) ) ] for i in range(len(roww['ProjectId'].values)): differentSections.append(str(roww['ProjectSection'].iloc[i])) if len(differentSections)>0: returnString=['Changes have been made in Sections. Would you like to retrieve them in the console?','section',differentSections] # if not t2Diff.empty: # newvalues= t2Diff['ProjectName'].values # else: # newvalues= t2Diff1['ProjectName'].values # returnString='' # returnString=['Changes have been made in Parts. Would you like to retrieve them in the console? By saying yes, you may need to repeat the website related processes(if any).','part'] # elif (not t3Diff.empty) or (not t3Diff1.empty): # returnString='' # returnString=['Changes have been made in Sections. Would you like to retrieve them in the console? By saying yes, you may need to repeat the website related processes(if any).','section'] # elif ((not t1Diff.empty) or (not t1Diff1.empty)) and ((not t2Diff.empty) or ( not t2Diff1.empty)): # returnString='' # returnString=['Changes have been made in Projects and Parts. Would you like to retrieve them in the console? By saying yes, you may need to repeat the website related processes(if any).','projectpart'] # elif ((not t1Diff.empty) or (not t1Diff1.empty)) and ((not t3Diff.empty) or (not t3Diff1.empty) ): # returnString='' # returnString=['Changes have been made in Projects and Sections. Would you like to retrieve them in the console? By saying yes, you may need to repeat the website related processes(if any).','projectsection'] # elif ((not t2Diff.empty) or (not t2Diff1.empty)) and ((not t3Diff.empty) or (not t3Diff1.empty)): # returnString='' # returnString=['Changes have been made in Parts and Sections. Would you like to retrieve them in the console? By saying yes, you may need to repeat the website related processes(if any).','partsection'] # elif ((not t1Diff.empty) or (not t1Diff1.empty)) and ((not t2Diff.empty) or (not t2Diff1.empty)) and ((not t3Diff.empty) or (not t3Diff1.empty)): # returnString='' # returnString=['Changes have been made in Projects, Parts and Sections. Would you like to retrieve them in the console? By saying yes, you may need to repeat the website related processes(if any).','project'] print(returnString) # if sectionData!='noSectionYet': # return jsonify([returnString,DictionaryOfTemplates]) # else: # returnString=prjpartsRefreshAPItable2(table2,projectname) return jsonify(returnString) @app.route('/refreshDropbox',methods=["GET", "POST"]) def checkdropbox(): print('checkingggdf') dfFromDropbox=tsadropboxretrieval.DropboxItemstoDF( "/TSA JOBS")[0] dfParquet=tsadropboxretrieval.GetParquetDF() dfParquet1 = dfParquet[['name', 'path_display', 'client_modified', 'server_modified']] deletedrows = pd.concat([dfFromDropbox, dfParquet1]).drop_duplicates(keep=False) deletedrows = deletedrows.reset_index(drop=True) deletedrows.columns = ['name', 'path_display', 'client_modified', 'server_modified'] differences = deletedrows[~deletedrows.isin(dfFromDropbox)].dropna() if (len(differences)>0): print(differences) dbxTeam=tsadropboxretrieval.dropbox_upload_file(dfFromDropbox) stringReturned= 'Updated Sucessfully.' else: stringReturned= 'Nothing to update.' return stringReturned @app.route('/postdropboxprojects',methods=["GET", "POST"]) def postDropboxprojectsinConsole(): stringReturned=checkdropbox() print(stringReturned) return stringReturned @app.route('/refreshAPIAppendNewTables',methods=["GET", "POST"]) def appendNewTables(): # value = request.args.get('keyword') # print(value) API.AppendtablestoSheets() return jsonify('appended') @app.route('/summarytoXML/',methods=["GET", "POST"]) def cvtSummarytoXML(jsdata): path='/TSA JOBS/ADR Test/' result = json.loads(jsdata) for word in result[0]: path+=word +'/' print(path) path=path+'XML/' # print(result[0]) # print(result[1]) xmllink=google_sheet_to_xml.create_xml(documentname=result[1],dbPath=path) return jsonify(xmllink) #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ #_________________________________________________________________________________________________________________________ def runn(): # app.run(host="127.0.0.1") from gevent.pywsgi import WSGIServer http_server = WSGIServer(("0.0.0.0", 7860), app) http_server.serve_forever() # serve(app, host="127.0.0.1", port=8080) def keep_alive(): t=Thread(target=runn) t.start() dtn = datetime.datetime.now(datetime.timezone.utc) print(dtn) next_start = datetime.datetime(dtn.year, dtn.month, dtn.day, 21, 0, 0).astimezone(datetime.timezone.utc) #us - 2 = utc time (21 utc is 19:00 our time and 9 is 7 our time , it needs to run 9 utc time ____ ) print(next_start) keep_alive() # active_path = "/TSA Team Folder/ADR Test/Test/" while 1: dtnNow = datetime.datetime.now(datetime.timezone.utc) print(dtnNow) if dtnNow >= next_start: next_start += datetime.timedelta(hours=12) # 1 day print(next_start) checkdropbox() time.sleep(1800) if __name__ == "__main__": runn()