Marthee commited on
Commit
fb5d442
·
verified ·
1 Parent(s): bd20ee3

Update Doors_Schedule.py

Browse files
Files changed (1) hide show
  1. Doors_Schedule.py +29 -11
Doors_Schedule.py CHANGED
@@ -1009,7 +1009,7 @@ def get_similar_colors_all(selected_columns_new):
1009
  def col_template():
1010
  d = {
1011
  'values': [],
1012
- 'color': None
1013
  }
1014
  for field in clmns_fields:
1015
  d[field] = []
@@ -1024,7 +1024,7 @@ def get_similar_colors_all(selected_columns_new):
1024
  for field in clmns_fields:
1025
  col_dict[key][field].append(row.get(field, None))
1026
 
1027
- col_dict[key]['color'] = key_colors[key]
1028
 
1029
  return dict(col_dict)
1030
 
@@ -1040,7 +1040,7 @@ def get_flattened_tuples_list_all(col_dict):
1040
  n_rows = len(values_dict[list_fields[0]]) if list_fields else 0
1041
 
1042
  for i in range(n_rows):
1043
- tuple_row = tuple(values_dict[field][i] for field in list_fields) + (values_dict['color'],)
1044
  flattened_list.append(tuple_row)
1045
 
1046
  return flattened_list
@@ -1549,11 +1549,11 @@ def get_widths_bb_format_st_op(cleaned_width, kelma):
1549
  # New for new dictionary logic
1550
  def create_bb_bax_new(df_points, CountStyles, page_number, height_plan):
1551
  bax_annotations = []
1552
- exclude = {"location", "color"}
1553
  for _, row in df_points.iterrows():
1554
  rw = row
1555
  customDta = row.drop(labels=exclude, errors="ignore").to_dict()
1556
- r,g,b = rw['color']
1557
  R = str(float(r/255))
1558
  G = str(float(g/255))
1559
  B = str(float(b/255))
@@ -1692,7 +1692,11 @@ def mainRun(schedule, plan, searcharray, sch_csv_pdf):
1692
  if sch_csv_pdf:
1693
  print("shcedule type is PDF")
1694
  no_tables_normal, not_found_any_plan_normal = pick_approach(schedule, plan, searcharray, 1)
1695
- no_tables_model, not_found_any_plan_model = pick_approach(schedule, plan, searcharray, 2)
 
 
 
 
1696
  pick_normal = False
1697
  pick_model = False
1698
  if no_tables_model:
@@ -1812,6 +1816,9 @@ def mainRun(schedule, plan, searcharray, sch_csv_pdf):
1812
  else:
1813
  if secondary_presence:
1814
  main_info = main_info + [""]
 
 
 
1815
  flattened_list = get_flattened_tuples_list_no_doortype(selected_columns_combined)
1816
 
1817
 
@@ -1832,15 +1839,22 @@ def mainRun(schedule, plan, searcharray, sch_csv_pdf):
1832
  df_points = grouped_to_dataframe_dynamic(col_dict,
1833
  drop_empty_locations=True,
1834
  explode_locations=True)
1835
- df_points.columns = df_points.columns.str.strip().str.replace(r"\s+", "_", regex=True)
 
 
 
 
 
 
 
1836
  print(f"col_dict: {col_dict}")
1837
  print(f"selected_columns_combined: {selected_columns_combined}")
1838
  print(f"df: {df_points}")
1839
  if df_points.empty:
1840
- continue
1841
  # handling no door type in the new dictionary logic
1842
- if 'color' not in df_points:
1843
- df_points['color'] = (0, 0, 255)
1844
  dupes = df_points['door_id'].value_counts()
1845
  repeated_ids = dupes[dupes > 1].index.to_list()
1846
  repeated_labels_list.append(repeated_ids)
@@ -1857,6 +1871,10 @@ def mainRun(schedule, plan, searcharray, sch_csv_pdf):
1857
  df_points['width'] = widths # e.g., a list/Series/np.array or a scalar
1858
  df_points['height'] = heights
1859
  else:
 
 
 
 
1860
  lst_width = df_points["width"].tolist()
1861
  lst_height = df_points["height"].tolist()
1862
  clean_widths, clean_height = get_width_clean_width_height(lst_width, lst_height)
@@ -1876,7 +1894,7 @@ def mainRun(schedule, plan, searcharray, sch_csv_pdf):
1876
  #widht_count, height_count = generate_separate_dimensions(widths)
1877
  #bax = create_bb_bax_secondary(new_data3, widht_count, height_count, secondary_tobeprinted, CountStyles, user_input, page_number, page)
1878
  #bax_annotations_all_inputs.append(bax)
1879
- print(f"colors: {df_points['color']}")
1880
  print(f"df: {df_points}")
1881
  bax, customDta = create_bb_bax_new(df_points, CountStyles, page_number, page)
1882
  bax_annotations_all_inputs.append(bax)
 
1009
  def col_template():
1010
  d = {
1011
  'values': [],
1012
+ 'color_annot': None
1013
  }
1014
  for field in clmns_fields:
1015
  d[field] = []
 
1024
  for field in clmns_fields:
1025
  col_dict[key][field].append(row.get(field, None))
1026
 
1027
+ col_dict[key]['color_annot'] = key_colors[key]
1028
 
1029
  return dict(col_dict)
1030
 
 
1040
  n_rows = len(values_dict[list_fields[0]]) if list_fields else 0
1041
 
1042
  for i in range(n_rows):
1043
+ tuple_row = tuple(values_dict[field][i] for field in list_fields) + (values_dict['color_annot'],)
1044
  flattened_list.append(tuple_row)
1045
 
1046
  return flattened_list
 
1549
  # New for new dictionary logic
1550
  def create_bb_bax_new(df_points, CountStyles, page_number, height_plan):
1551
  bax_annotations = []
1552
+ exclude = {"location", "color_annot"}
1553
  for _, row in df_points.iterrows():
1554
  rw = row
1555
  customDta = row.drop(labels=exclude, errors="ignore").to_dict()
1556
+ r,g,b = rw['color_annot']
1557
  R = str(float(r/255))
1558
  G = str(float(g/255))
1559
  B = str(float(b/255))
 
1692
  if sch_csv_pdf:
1693
  print("shcedule type is PDF")
1694
  no_tables_normal, not_found_any_plan_normal = pick_approach(schedule, plan, searcharray, 1)
1695
+ try:
1696
+ no_tables_model, not_found_any_plan_model = pick_approach(schedule, plan, searcharray, 2)
1697
+ except:
1698
+ print("Model detection has issue of file too large")
1699
+ #no_tables_model = True
1700
  pick_normal = False
1701
  pick_model = False
1702
  if no_tables_model:
 
1816
  else:
1817
  if secondary_presence:
1818
  main_info = main_info + [""]
1819
+ # new logic can handle it
1820
+ #col_dict = get_similar_colors_all(selected_columns_combined)
1821
+
1822
  flattened_list = get_flattened_tuples_list_no_doortype(selected_columns_combined)
1823
 
1824
 
 
1839
  df_points = grouped_to_dataframe_dynamic(col_dict,
1840
  drop_empty_locations=True,
1841
  explode_locations=True)
1842
+ #df_points.columns = df_points.columns.str.strip().str.replace(r"\s+", "_", regex=True)
1843
+ # Clean column names
1844
+ df_points.columns = (df_points.columns
1845
+ .str.strip()
1846
+ .str.replace(r"[^\w-]+", "_", regex=True)
1847
+ .str.replace(r"_+", "_", regex=True)
1848
+ .str.strip("_"))
1849
+
1850
  print(f"col_dict: {col_dict}")
1851
  print(f"selected_columns_combined: {selected_columns_combined}")
1852
  print(f"df: {df_points}")
1853
  if df_points.empty:
1854
+ continue # to the next user input
1855
  # handling no door type in the new dictionary logic
1856
+ if 'color_annot' not in df_points:
1857
+ df_points['color_annot'] = (0, 0, 255)
1858
  dupes = df_points['door_id'].value_counts()
1859
  repeated_ids = dupes[dupes > 1].index.to_list()
1860
  repeated_labels_list.append(repeated_ids)
 
1871
  df_points['width'] = widths # e.g., a list/Series/np.array or a scalar
1872
  df_points['height'] = heights
1873
  else:
1874
+ # make sure they are strings first to keep the flow of get_width_clean_width_height function
1875
+ df_points['width'] = df_points['width'].astype('string')
1876
+ df_points['height'] = df_points['height'].astype('string')
1877
+
1878
  lst_width = df_points["width"].tolist()
1879
  lst_height = df_points["height"].tolist()
1880
  clean_widths, clean_height = get_width_clean_width_height(lst_width, lst_height)
 
1894
  #widht_count, height_count = generate_separate_dimensions(widths)
1895
  #bax = create_bb_bax_secondary(new_data3, widht_count, height_count, secondary_tobeprinted, CountStyles, user_input, page_number, page)
1896
  #bax_annotations_all_inputs.append(bax)
1897
+ print(f"color_annot: {df_points['color_annot']}")
1898
  print(f"df: {df_points}")
1899
  bax, customDta = create_bb_bax_new(df_points, CountStyles, page_number, page)
1900
  bax_annotations_all_inputs.append(bax)