Spaces:
Sleeping
Sleeping
| from __future__ import print_function | |
| from flask import Flask, render_template,request,flash , json, url_for,g , redirect , jsonify , send_file | |
| import os | |
| import json | |
| import fitz | |
| from PIL import Image | |
| import cv2 | |
| import numpy as np | |
| import pilecaps_adr | |
| import base64 | |
| from db import dropbox_connect | |
| import db | |
| import cv2 | |
| import pandas as pd | |
| import time | |
| from io import BytesIO, StringIO | |
| import tempfile | |
| from flask import Flask, Response | |
| from werkzeug.wsgi import wrap_file | |
| import tameem3_2 | |
| import pypdfium2 as pdfium | |
| import pixelconversion | |
| import tameem2_1 | |
| import base64 | |
| import io | |
| from urllib.parse import unquote | |
| import API | |
| import MC_Templates_API | |
| import tsadropboxretrieval | |
| import doc_search | |
| import google_sheet_Legend | |
| import dxf__omar3_2 | |
| import google_sheet_to_xml | |
| from threading import Thread | |
| import mainDBAlaa | |
| import datetime | |
| import ezdxf | |
| app = Flask(__name__) | |
| prjnamesURL= 'https://docs.google.com/spreadsheets/d/1nsIgi9o9VSBKQlNxbxihPzG_N7s4um0eNVfgL4gaGPc/export?format=csv&gid=0' | |
| prjpartsURL= 'https://docs.google.com/spreadsheets/d/1nsIgi9o9VSBKQlNxbxihPzG_N7s4um0eNVfgL4gaGPc/export?format=csv&gid=34865056' | |
| prjsectionsURL= 'https://docs.google.com/spreadsheets/d/1nsIgi9o9VSBKQlNxbxihPzG_N7s4um0eNVfgL4gaGPc/export?format=csv&gid=1751466819' | |
| def getInfotoMeasure(): | |
| API.AppendtablestoSheets() | |
| return render_template("proposed-GUI.html") | |
| def getprjnamesfromTestAPI(): | |
| prjnames,prjids=API.getPrjNames() | |
| # API.AppendtablestoSheets() | |
| return jsonify([prjnames,prjids]) | |
| def getprjpartsfromTestAPI(jsdata): | |
| print('kkkk',jsdata) | |
| prjparts,partsIds=API.getprjParts(jsdata) | |
| return jsonify([prjparts,partsIds]) | |
| def getprjSectionsfromTestAPI(jsdata): | |
| data=json.loads(jsdata) | |
| sections=API.getprjSections(data[0],data[1]) | |
| return jsonify(sections) | |
| #retrieves projects | |
| def get_javascript_data(jsdata): | |
| #get project from the first dropdown | |
| # jsdata=eval(jsdata) | |
| print('tsa') | |
| documnetsToMeasure,RelevantDocuments=tsadropboxretrieval.retrieveProjects(jsdata) | |
| if RelevantDocuments: | |
| return jsonify ([documnetsToMeasure, RelevantDocuments]) | |
| else: | |
| return ['No projects found'] | |
| #--------------------------------------------------------------- | |
| def searchDocument(): | |
| return render_template('wordSearch.html') | |
| def getSearchinDocs(): | |
| arr=[] | |
| values = request.get_json() | |
| keyword=values.get('keyword') | |
| listpfProjs=values.get('listofprojs') | |
| print(keyword,listpfProjs) | |
| df,img_list=doc_search.search_docs(keyword,listpfProjs) | |
| for img in img_list: | |
| _, buffer = cv2.imencode('.png', img) | |
| arr.append(base64.b64encode(buffer).decode('utf-8')) | |
| return jsonify([df.to_html(index=False, escape=False),arr]) | |
| def getSearchinFolder(): | |
| arr=[] | |
| values = request.get_json() | |
| keyword=values.get('keyword') | |
| projname=values.get('ProjectName') | |
| df,img_list=doc_search.slow_search(keyword=keyword,project=projname) | |
| for img in img_list: | |
| _, buffer = cv2.imencode('.png', img) | |
| arr.append(base64.b64encode(buffer).decode('utf-8')) | |
| return jsonify([df.to_html(index=False, escape=False),arr]) | |
| def measurementConsoleFn(): | |
| return render_template("proposed-GUI.html") | |
| def CallAPIforMCTNames(): | |
| # print(jsdata) | |
| DictionaryOfTemplates=MC_Templates_API.RetrieveMC_Templates_API() | |
| # jsdata=jsdata.replace('"', '') | |
| print('here') | |
| return jsonify(DictionaryOfTemplates) #[str(jsdata).lower()] | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| #Hex value to RGB value | |
| def hexRGB(color): | |
| color=color.lstrip('#') | |
| color= tuple(int(color[i:i+2], 16) for i in (0, 2, 4)) #hex to rgb | |
| color=list(color) #rgb to bgr | |
| return color | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| def stringToRGB(): | |
| vv = eval(request.form.get('answers')) | |
| if vv[5][2].startswith('3.2'): | |
| print('3.2 section') | |
| pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=vv[0]) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| md, res =dbxTeam.files_download(path=pdfpath) | |
| dataDoc = res.content | |
| else: | |
| opencv_img,dataDoc = plan2img( str(vv[0]) ) | |
| if 'file' not in request.files: | |
| print('error, No file part in the request') | |
| else: | |
| file = request.files['file'] | |
| print('file done, measuring') | |
| arr=measureproject(vv,dataDoc,0,file) | |
| return jsonify(arr) | |
| if vv[1]==220: | |
| imgdata = base64.b64decode(vv[6]) | |
| img=Image.open(io.BytesIO(imgdata)) | |
| opencv_img= cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) | |
| arr=measureproject(vv,dataDoc,opencv_img) | |
| return jsonify(arr) | |
| def measure2_1(): | |
| name = request.get_json() | |
| result=name.get('allvalues') | |
| arr=measureproject(result) | |
| return arr | |
| #MAIN FUNCTION -- calls python code to measure the chosen plan from the interface | |
| # @app.route('/projecttomeasure/<jsdata>',methods=["GET","POST"]) | |
| def measureproject(result,dataDoc,img=0,dxffile=0): | |
| colorarr=[] | |
| # result = json.loads(jsdata) | |
| pdfpath='/' | |
| ################################# -1.0- ################################# | |
| for word in result[5]: | |
| pdfpath+=word +'/' | |
| arr=[] | |
| if result[5][2].startswith('1.0'): #section value - 1.0 substructure- pile caps | |
| for item in result[2]: | |
| # item1 ='#'+item | |
| c=hexRGB(item) | |
| colorarr.append(c) | |
| print('RATIOS=',result[3], result[4]) | |
| # cv2.imwrite(img,'imgg.png') | |
| imgPerimeter1,image_new1,SimilarAreaDictionary , colorsUsed,spreadsheet_url, spreadsheetId,list1, pdflink, areas_Perimeters, namepathArr =pilecaps_adr.drawAllContours(dataDoc,img,result[1],colorarr, result[3], result[4], result[0],pdfpath) | |
| _, buffer = cv2.imencode('.png', image_new1) | |
| arr=[base64.b64encode(buffer).decode('utf-8'),SimilarAreaDictionary.to_dict(),spreadsheet_url , spreadsheetId,colorsUsed,list1.to_dict(), pdflink, areas_Perimeters, namepathArr] | |
| ################################# -3.2- ################################# | |
| elif result[5][2].startswith('3.2'): #section value - 3.2 floor finishes | |
| print('IN HEREEEE 3.2') | |
| dxfpath=dxffile.read() | |
| with tempfile.NamedTemporaryFile(suffix='.dxf', delete=False) as temp_file: | |
| temp_file.write(dxfpath) | |
| temp_filename = temp_file.name | |
| print(temp_filename) | |
| doc,outputimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas=dxf__omar3_2.mainFunctionDrawImgPdf(dataDoc,temp_filename,result[4] , pdfpath,result[0]) | |
| print('done measuring') | |
| dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/' | |
| pdflink= tsadropboxretrieval.uploadanyFile(doc=doc,path=dbPath,pdfname=result[0]) #doc=doc,pdfname=path,pdfpath=pdfpath+'Measured Plan/' | |
| _, buffer = cv2.imencode('.png', outputimg) | |
| arr=[ base64.b64encode(buffer).decode('utf-8'),SimilarAreaDictionary.to_dict(), spreadsheet_url,spreadsheetId,[],list1.to_dict(),pdflink,hatched_areas,namepathArr]# , spreadsheetId, spreadsheet_url , list1.to_dict()] | |
| ################################# -2.2- ################################# | |
| # elif result[5][2].startswith('2.2'): #section value - 2.2 rc slabs | |
| #add here python code | |
| # | |
| #link (add this to ur code) | |
| # pdflink= db.dropbox_upload_file(doc=doc,pdfname=path,pdfpath=pdfpath) | |
| # gc,spreadsheet_service,spreadsheetId, spreadsheet_url , namepathArr= pilecaps_adr.legendGoogleSheets(df,plan ,pdfpath) | |
| #output img | |
| # _, buffer = cv2.imencode('.png', outputimg) | |
| #let first result to be the img | |
| #return results in arr=[base64.b64encode(buffer).decode('utf-8'),pdflink,spreadsheetId,spreadsheet_url] like the previous sections in the above lines | |
| elif result[5][2].startswith('2.1'): #section value - 2.1 frames | |
| url = tameem2_1.mainfun( result[0], pdfpath) | |
| return jsonify([url]) | |
| return arr | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| def pdftoimgCanvas(jsdata): | |
| img=plan2img(jsdata)[0] | |
| _, buffer = cv2.imencode('.png', img) | |
| arr=[base64.b64encode(buffer).decode('utf-8') , img.shape[0],img.shape[1]] | |
| return jsonify(arr) | |
| #pdf to img | |
| def plan2img(nameofpdf): | |
| pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=nameofpdf) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| md, res =dbxTeam.files_download(path=pdfpath) | |
| data = res.content | |
| doc = fitz.open("pdf",data) | |
| page=doc[0] | |
| if page.rotation!=0: | |
| page.set_rotation(0) | |
| pix = page.get_pixmap(dpi=300) # render page to an image | |
| pl=Image.frombytes('RGB', [pix.width,pix.height],pix.samples) | |
| img=np.array(pl) | |
| img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) | |
| return img ,data | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| #not used in 1.0 | |
| def convert2img(path): | |
| pdf = pdfium.PdfDocument(path) | |
| page = pdf.get_page(0) | |
| pil_image = page.render().to_pil() | |
| pl1=np.array(pil_image) | |
| img = cv2.cvtColor(pl1, cv2.COLOR_RGB2BGR) | |
| return img | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| #User-made MC-T NAME - in the second part of the interface | |
| def sendmctnametoLegend(jsdata): | |
| result = json.loads(jsdata) | |
| print(result) | |
| summaryid=google_sheet_Legend.mapnametoLegend(result) | |
| allreturns=[summaryid] | |
| return jsonify(allreturns) | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| #NOT USED (pixel conversion instead) | |
| def calcRef(img): | |
| blk = np.ones(img.shape, dtype="uint8") * [[[np.uint8(0), np.uint8(0), np.uint8(0)]]] | |
| start_point = (50, 100) | |
| end_point = (120, 200) | |
| color = (255, 255, 255) # white BGR | |
| thickness = -1 # Thickness of -1 will fill the entire shape | |
| blk = cv2.rectangle(blk, start_point, end_point, color, thickness) | |
| blk = cv2.cvtColor(blk, cv2.COLOR_BGR2GRAY) | |
| contourzz, hierarchy = cv2.findContours(image=blk, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE) | |
| for i, cnt3 in enumerate(contourzz): | |
| M = cv2.moments(cnt3) | |
| if M['m00'] != 0.0: | |
| x2 = int(M['m10']/M['m00']) | |
| y2 = int(M['m01']/M['m00']) | |
| area = cv2.contourArea(cnt3) | |
| perimeter = cv2.arcLength(cnt3, True) | |
| return area,perimeter , blk | |
| #NOT USED (pixel conversion instead) | |
| def modifyingcalcRefDynamic(img): | |
| imgcopy = img.copy() | |
| blk = np.ones(img.shape, dtype="uint8") * [[[np.uint8(0), np.uint8(0), np.uint8(0)]]] | |
| x = 50 | |
| y = 100 | |
| xD = int(img.shape[1] * 0.10) | |
| yD = int(img.shape[0] * 0.10) | |
| start_point = (x, y) | |
| end_point = (x+xD, y+yD) | |
| blue = (255, 0, 0) # BGR | |
| white = (255, 255, 255) # BGR | |
| thickness = -1 # Thickness of -1 will fill the entire shape | |
| imgcopy = cv2.rectangle(imgcopy, start_point, end_point, blue, thickness) | |
| blk = cv2.rectangle(blk, start_point, end_point, white, thickness) | |
| blk = cv2.cvtColor(blk, cv2.COLOR_BGR2GRAY) | |
| contourzz, hierarchy = cv2.findContours(image=blk, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE) | |
| for i, cnt3 in enumerate(contourzz): | |
| M = cv2.moments(cnt3) | |
| if M['m00'] != 0.0: | |
| x2 = int(M['m10']/M['m00']) | |
| y2 = int(M['m01']/M['m00']) | |
| area = cv2.contourArea(cnt3) | |
| perimeter = cv2.arcLength(cnt3, True) | |
| return area, perimeter, blk , imgcopy | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| def get_pdf(jsdata): | |
| # Download PDF from Dropbox | |
| pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=jsdata) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| md, res =dbxTeam.files_download(path=pdfpath) | |
| # Convert res.content to a BytesIO object | |
| pdf_stream = io.BytesIO(res.content) | |
| # Attempt to seek to the beginning of the BytesIO stream | |
| pdf_stream.seek(0) | |
| # Send file | |
| response = send_file( | |
| pdf_stream, | |
| as_attachment=True, | |
| mimetype='application/pdf', | |
| download_name='downloaded_file.pdf' | |
| ) | |
| return response | |
| #PIXEL CONVERSION METHOD -- SAVES DOC ON DROPBOX TO BE MEASURED BY USER | |
| def getimg(jsdata): | |
| jsdata=eval(jsdata) | |
| pdfpath,pdflink=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=jsdata[3]) | |
| dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user') | |
| md, res =dbxTeam.files_download(path=pdfpath) | |
| data = res.content | |
| print('jsdata',jsdata) | |
| if str(jsdata[2]).startswith('1.0'): | |
| doc,areaPixel,perimeterPixel=pixelconversion.drawisrotated(data,300) # .openDrawPDF(data) | |
| else: | |
| doc,areaPixel,perimeterPixel,pdfdata=pixelconversion.drawisrotated(data) # .openDrawPDF(data) | |
| dbPath='/TSA JOBS/ADR Test/'+jsdata[0]+'/'+jsdata[1]+'/'+jsdata[2]+'/'+'Scale Document' +'/' | |
| dburl=tsadropboxretrieval.uploadanyFile(doc=doc,pdfname=str(jsdata[3]) ,path=dbPath) | |
| # areaPixel,perimeterPixel= pixelconversion.getAreaPerimeter(dbPath, str(jsdata[3])) | |
| outputs=[areaPixel,perimeterPixel , dburl] | |
| # pdf_data = io.BytesIO(res.content) | |
| # pdf_data.seek(0) | |
| return jsonify(outputs) #, send_file(pdf_data, as_attachment=True, mimetype='application/pdf', download_name='downloaded_file.pdf') | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| #get sunburst from doc_search | |
| def getSunburst(): | |
| # df=tsadropboxretrieval.GetParquetDF() | |
| tree=doc_search.prepare_sunburst() | |
| return jsonify(tree.to_dict()) | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| #DELETE MARKUPS (for comparison) | |
| def getnewlegend(): | |
| pdfpth='' | |
| alljson = request.get_json() | |
| list1=alljson.get('dict1') | |
| # list1 = request.args.get('dict1') | |
| path=alljson.get('path') | |
| # path = request.args.get('path') | |
| spreadsheetId=alljson.get('spreadsheetId') | |
| # spreadsheetId =request.args.get('spreadsheetId') | |
| pdfpathpath=alljson.get('pdfpathpath') | |
| # pdfpathpath=request.args.get('pdfpathpath') | |
| print(pdfpathpath,type(pdfpathpath)) | |
| pdfname=request.args.get('pdfname') | |
| for word in eval(pdfpathpath): | |
| pdfpth+='/' +word | |
| pdfpth+='/' | |
| dbPath='/TSA JOBS/ADR Test'+pdfpth+'Measured Plan/' | |
| deletedrows1=pilecaps_adr.deletemarkups(list1=list1,dbPath=dbPath,path=path) | |
| arr1=[deletedrows1.to_dict()] | |
| print('arr,',arr1) | |
| return jsonify(arr1) | |
| #--------------------------------------------------------------------------- | |
| #if user wishes to delete | |
| def dltmarkupslegend(): | |
| print('IN deletemarkupsroute') | |
| pdfpth='' | |
| alljson = request.get_json() | |
| SimilarAreaDictionary=alljson.get('dict') | |
| # SimilarAreaDictionary = request.args.get('dict') | |
| deletedrows=alljson.get('deletedrows') | |
| print('deletedrowsssssssssssssssssssssssssssssss',deletedrows) | |
| # deletedrows = request.args.get('deletedrows') | |
| path=alljson.get('path') | |
| # path = request.args.get('path') | |
| spreadsheetId=alljson.get('spreadsheetId') | |
| # spreadsheetId =request.args.get('spreadsheetId') | |
| areaPermArr=alljson.get('areaPermArr') | |
| print('aaaaaaaaaaaaa',areaPermArr) | |
| # areaPermArr=request.args.get('areaPermArr') | |
| section=alljson.get('section') | |
| # section=request.args.get('section') | |
| pdfpathpath=alljson.get('pdfpathpath') | |
| # pdfpathpath=request.args.get('pdfpathpath') | |
| for word in eval(pdfpathpath): | |
| pdfpth+='/' +word | |
| pdfpth+='/' | |
| # myDict=eval(deletedrows) | |
| deletedrows=pd.DataFrame(deletedrows) | |
| print('deletedrows',deletedrows) | |
| if section.startswith('1.0'): | |
| newlgnd=google_sheet_Legend.deletefromlegend(deletedrows=deletedrows,SimilarAreaDictionarycopy=SimilarAreaDictionary, section=section,areaPermArr=areaPermArr) | |
| else: | |
| newlgnd=google_sheet_Legend.deletefromlegend(deletedrows=deletedrows,SimilarAreaDictionarycopy=SimilarAreaDictionary, section=section) | |
| try: | |
| # print('newlgnd',newlgnd) | |
| newlgnd1=google_sheet_Legend.legendGoogleSheets(SimilarAreaDictionary=newlgnd,path=path,spreadsheetId=spreadsheetId ,pdfpath=pdfpth) #new legend | |
| except: | |
| print("An exception occurred") | |
| time.sleep(10) | |
| newlgnd1=google_sheet_Legend.legendGoogleSheets(SimilarAreaDictionary=newlgnd,path=path,spreadsheetId=spreadsheetId,pdfpath=pdfpth) | |
| return jsonify('donee') | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| #get pdf dropbox url after measurement is done | |
| def calldropboxurl(jsdata): | |
| print(jsdata) | |
| pdfurl=tsadropboxretrieval.getPathtoPDF_File(nameofPDF=jsdata)[1] | |
| print(pdfurl) | |
| return jsonify(pdfurl) | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| #Google sheet links | |
| def getlinkscreated(jsdata): | |
| spreadsheet_service,drive_service,gc= google_sheet_Legend.authorizeLegend() | |
| ids=gc.spreadsheet_ids() | |
| titles=gc.spreadsheet_titles() | |
| allpaths=[] | |
| # for title in titles: | |
| for i in range(0,len(titles)): | |
| if not (titles[i].startswith('API') or (titles[i].startswith('Dropbox')) ) : | |
| ws=gc.open(titles[i]) | |
| allpaths.append([titles[i], ws.get_developer_metadata('path')[0].value , drive_service.files().get(fileId=ids[i],fields="createdTime, modifiedTime").execute() ,ids[i] ]) | |
| return jsonify(allpaths) | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| def returnAPITables(): | |
| # API.update_df() | |
| table1,table2,table3=API.GenerateTables() | |
| return jsonify([table1.to_dict(),table2.to_dict(),table3.to_dict()]) | |
| def checktables(): | |
| projectname = request.args.get('prjname') | |
| projectpart = request.args.get('prjpart') | |
| projectpartid = request.args.get('prjpartid') | |
| print('hgh',projectname , projectpart) | |
| table1,table2,table3=API.GenerateTables() | |
| url_1 = prjnamesURL.replace('/edit#gid=', '/export?format=csv&gid=') | |
| url_2 = prjpartsURL.replace('/edit#gid=', '/export?format=csv&gid=') | |
| url_3 = prjsectionsURL.replace('/edit#gid=', '/export?format=csv&gid=') | |
| table1N = pd.read_csv(url_1) | |
| table2N = pd.read_csv(url_2) | |
| table3N = pd.read_csv(url_3) | |
| t1Diff=table1N[~table1N.isin(table1)].dropna() | |
| t1Diff1=table1[~table1.isin(table1N)].dropna() | |
| t2Diff=pd.DataFrame() | |
| t2Diff1=pd.DataFrame() | |
| t3Diff=pd.DataFrame() | |
| t3Diff1=pd.DataFrame() | |
| if projectname: | |
| print('here') | |
| differentParts=[] | |
| a = table1N.to_numpy() | |
| row = np.where(a == projectname) | |
| print(projectname,row) | |
| returnString='' | |
| t2Diff1=table2[~table2.isin(table2N)].dropna() | |
| t2Diff=table2N[~table2N.isin(table2)].dropna() | |
| if projectpart and projectname: | |
| a1 = table1N.to_numpy() | |
| row1 = np.where(a1 == projectname) | |
| prjid=a1[row1[0]][0][0] | |
| # a2 = table2N.to_numpy() | |
| # row2 = np.where(a2 == projectpart) | |
| # partid=a2[row2[0]][0][1] | |
| t3Diff1=table3[~table3.isin(table3N)].dropna() | |
| t3Diff=table3N[~table3N.isin(table3)].dropna() | |
| returnString='' | |
| if not t1Diff.empty or not t1Diff1.empty : | |
| if not t1Diff.empty: | |
| newvalues= t1Diff['ProjectName'].values | |
| else: | |
| newvalues= t1Diff1['ProjectName'].values | |
| returnString='' | |
| returnString=['Changes have been made in Projects. Would you like to retrieve them in the console? By saying yes, you may need to repeat the website related processes(if any).','project',(newvalues).tolist(),table2.to_dict()] | |
| if (not t2Diff.empty)or( not t2Diff1.empty) : | |
| if projectname: | |
| for i in range(len(t2Diff1['ProjectId'].values)): | |
| if int(t2Diff1['ProjectId'].values[i]) ==int(a[row[0]][0][0]): | |
| differentParts.append(t2Diff1['ProjectPart'].iloc[i]) | |
| if len(differentParts)>0: | |
| returnString=['Changes have been made in Parts. Would you like to retrieve them in the console?','part',differentParts] | |
| if (not t3Diff.empty) or (not t3Diff1.empty) : | |
| differentSections=[] | |
| if projectpart and projectname: | |
| roww=t3Diff1.iloc[np.where((t3Diff1['ProjectId']==int(prjid) ) & (t3Diff1['ProjectPartId']==int(projectpartid)) ) ] | |
| for i in range(len(roww['ProjectId'].values)): | |
| differentSections.append(str(roww['ProjectSection'].iloc[i])) | |
| if len(differentSections)>0: | |
| returnString=['Changes have been made in Sections. Would you like to retrieve them in the console?','section',differentSections] | |
| # if not t2Diff.empty: | |
| # newvalues= t2Diff['ProjectName'].values | |
| # else: | |
| # newvalues= t2Diff1['ProjectName'].values | |
| # returnString='' | |
| # returnString=['Changes have been made in Parts. Would you like to retrieve them in the console? By saying yes, you may need to repeat the website related processes(if any).','part'] | |
| # elif (not t3Diff.empty) or (not t3Diff1.empty): | |
| # returnString='' | |
| # returnString=['Changes have been made in Sections. Would you like to retrieve them in the console? By saying yes, you may need to repeat the website related processes(if any).','section'] | |
| # elif ((not t1Diff.empty) or (not t1Diff1.empty)) and ((not t2Diff.empty) or ( not t2Diff1.empty)): | |
| # returnString='' | |
| # returnString=['Changes have been made in Projects and Parts. Would you like to retrieve them in the console? By saying yes, you may need to repeat the website related processes(if any).','projectpart'] | |
| # elif ((not t1Diff.empty) or (not t1Diff1.empty)) and ((not t3Diff.empty) or (not t3Diff1.empty) ): | |
| # returnString='' | |
| # returnString=['Changes have been made in Projects and Sections. Would you like to retrieve them in the console? By saying yes, you may need to repeat the website related processes(if any).','projectsection'] | |
| # elif ((not t2Diff.empty) or (not t2Diff1.empty)) and ((not t3Diff.empty) or (not t3Diff1.empty)): | |
| # returnString='' | |
| # returnString=['Changes have been made in Parts and Sections. Would you like to retrieve them in the console? By saying yes, you may need to repeat the website related processes(if any).','partsection'] | |
| # elif ((not t1Diff.empty) or (not t1Diff1.empty)) and ((not t2Diff.empty) or (not t2Diff1.empty)) and ((not t3Diff.empty) or (not t3Diff1.empty)): | |
| # returnString='' | |
| # returnString=['Changes have been made in Projects, Parts and Sections. Would you like to retrieve them in the console? By saying yes, you may need to repeat the website related processes(if any).','project'] | |
| print(returnString) | |
| # if sectionData!='noSectionYet': | |
| # return jsonify([returnString,DictionaryOfTemplates]) | |
| # else: | |
| # returnString=prjpartsRefreshAPItable2(table2,projectname) | |
| return jsonify(returnString) | |
| def checkdropbox(): | |
| print('checkingggdf') | |
| dfFromDropbox=tsadropboxretrieval.DropboxItemstoDF( "/TSA JOBS")[0] | |
| dfParquet=tsadropboxretrieval.GetParquetDF() | |
| dfParquet1 = dfParquet[['name', 'path_display', 'client_modified', 'server_modified']] | |
| deletedrows = pd.concat([dfFromDropbox, dfParquet1]).drop_duplicates(keep=False) | |
| deletedrows = deletedrows.reset_index(drop=True) | |
| deletedrows.columns = ['name', 'path_display', 'client_modified', 'server_modified'] | |
| differences = deletedrows[~deletedrows.isin(dfFromDropbox)].dropna() | |
| if (len(differences)>0): | |
| print(differences) | |
| dbxTeam=tsadropboxretrieval.dropbox_upload_file(dfFromDropbox) | |
| stringReturned= 'Updated Sucessfully.' | |
| else: | |
| stringReturned= 'Nothing to update.' | |
| return stringReturned | |
| def postDropboxprojectsinConsole(): | |
| stringReturned=checkdropbox() | |
| print(stringReturned) | |
| return stringReturned | |
| def appendNewTables(): | |
| # value = request.args.get('keyword') | |
| # print(value) | |
| API.AppendtablestoSheets() | |
| return jsonify('appended') | |
| def cvtSummarytoXML(jsdata): | |
| path='/TSA JOBS/ADR Test/' | |
| result = json.loads(jsdata) | |
| for word in result[0]: | |
| path+=word +'/' | |
| print(path) | |
| path=path+'XML/' | |
| # print(result[0]) | |
| # print(result[1]) | |
| xmllink=google_sheet_to_xml.create_xml(documentname=result[1],dbPath=path) | |
| return jsonify(xmllink) | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| #_________________________________________________________________________________________________________________________ | |
| def runn(): | |
| # app.run(host="127.0.0.1") | |
| from gevent.pywsgi import WSGIServer | |
| http_server = WSGIServer(("0.0.0.0", 7860), app) | |
| http_server.serve_forever() | |
| # serve(app, host="127.0.0.1", port=8080) | |
| def keep_alive(): | |
| t=Thread(target=runn) | |
| t.start() | |
| dtn = datetime.datetime.now(datetime.timezone.utc) | |
| print(dtn) | |
| next_start = datetime.datetime(dtn.year, dtn.month, dtn.day, 21, 0, 0).astimezone(datetime.timezone.utc) #us - 2 = utc time (21 utc is 19:00 our time and 9 is 7 our time , it needs to run 9 utc time ____ ) | |
| print(next_start) | |
| keep_alive() | |
| # active_path = "/TSA Team Folder/ADR Test/Test/" | |
| while 1: | |
| dtnNow = datetime.datetime.now(datetime.timezone.utc) | |
| print(dtnNow) | |
| if dtnNow >= next_start: | |
| next_start += datetime.timedelta(hours=12) # 1 day | |
| print(next_start) | |
| checkdropbox() | |
| time.sleep(1800) | |
| if __name__ == "__main__": | |
| runn() | |