f64 commited on
Commit
8d5e34d
·
1 Parent(s): 48db71d
my_static_methods.py CHANGED
@@ -78,7 +78,7 @@ def load_dataframes_from_hf(repo: HfRepo, lstCsvFiles: list[str] = []) -> {str,
78
  if os.path.exists(file_loaded):
79
  compress = "zip" if file_loaded.lower().endswith("zip") else None
80
  df_loaded = pd.read_csv(file_loaded, sep=";", encoding = "utf-8", compression=compress)
81
- dict_res[fl_name] = df_process_v_column(df_loaded)
82
  return dict_res
83
 
84
  ### список CSV и ZIP файлов (c уровнем вложенности) в репозитории
 
78
  if os.path.exists(file_loaded):
79
  compress = "zip" if file_loaded.lower().endswith("zip") else None
80
  df_loaded = pd.read_csv(file_loaded, sep=";", encoding = "utf-8", compression=compress)
81
+ dict_res[fl_name] = df_loaded # df_Vproc = df_process_v_column(df_loaded)
82
  return dict_res
83
 
84
  ### список CSV и ZIP файлов (c уровнем вложенности) в репозитории
pages/4_Просмотр_CSV.py CHANGED
@@ -4,7 +4,6 @@ import my_static_methods as my_stm
4
  st.html(my_stm.STYLE_CORRECTION)
5
 
6
  #st.sidebar.title("⚜️")
7
- #st.sidebar.info("три CSV")
8
 
9
  #col1.metric("Temperature", "70 °F", "1.2 °F") #col2.metric("Wind", "9 mph", "-8%") #col3.metric("Humidity", "86%", "4%")
10
 
@@ -12,8 +11,7 @@ st.html(my_stm.STYLE_CORRECTION)
12
  REPO = my_stm.HfRepo("f64k/gaziev", "dataset", st.secrets["HF_WRITE"])
13
  st.sidebar.info("🧊💽 сохраненные\n таблицы CSV")
14
 
15
- lstTestFiles = my_stm.list_files_hf(REPO)
16
- #dictXYZV = my_stm.load_gaziev_from_hf(REPO, lstTestFiles)
17
 
18
  cols = st.columns(2)
19
 
@@ -23,11 +21,12 @@ if one_file_selected:
23
  dict_ONE_XYZV = my_stm.load_dataframes_from_hf(REPO, [one_file_selected])
24
  if len(dict_ONE_XYZV) > 0:
25
  df_xyz = list(dict_ONE_XYZV.values())[0] #df_xyz = dictXYZV[key_xyz]
26
- fig = my_stm.plotly_xyzv_scatter_gray(df_xyz)
 
27
  cols[0].plotly_chart(fig) # st.plotly_chart(fig) #fig.show()
28
  lstValues = sorted(set(df_xyz.notna().sum()), reverse=True)
29
- cols[1].info(f"Всего записей : {lstValues[0]}; Значений V : {lstValues[1]}")
30
- cols[1].dataframe(df_xyz, height=700)
31
 
32
 
33
 
 
4
  st.html(my_stm.STYLE_CORRECTION)
5
 
6
  #st.sidebar.title("⚜️")
 
7
 
8
  #col1.metric("Temperature", "70 °F", "1.2 °F") #col2.metric("Wind", "9 mph", "-8%") #col3.metric("Humidity", "86%", "4%")
9
 
 
11
  REPO = my_stm.HfRepo("f64k/gaziev", "dataset", st.secrets["HF_WRITE"])
12
  st.sidebar.info("🧊💽 сохраненные\n таблицы CSV")
13
 
14
+ lstTestFiles = [f for f in my_stm.list_files_hf(REPO) if not f.upper().startswith("ID_XYZ/")]
 
15
 
16
  cols = st.columns(2)
17
 
 
21
  dict_ONE_XYZV = my_stm.load_dataframes_from_hf(REPO, [one_file_selected])
22
  if len(dict_ONE_XYZV) > 0:
23
  df_xyz = list(dict_ONE_XYZV.values())[0] #df_xyz = dictXYZV[key_xyz]
24
+ df_xyz_vproc = my_stm.df_process_v_column(df_xyz)
25
+ fig = my_stm.plotly_xyzv_scatter_gray(df_xyz_vproc)
26
  cols[0].plotly_chart(fig) # st.plotly_chart(fig) #fig.show()
27
  lstValues = sorted(set(df_xyz.notna().sum()), reverse=True)
28
+ cols[1].info(f"Bceгo записей : {lstValues[0]}; Значений V : {lstValues[1]}")
29
+ cols[1].dataframe(df_xyz_vproc, height=700)
30
 
31
 
32
 
pages/6_TECT_7_ID.py CHANGED
@@ -2,7 +2,7 @@ import os, re, sys, time, math, shutil, urllib, string, random, pickle, zipfile,
2
  import streamlit as st, pandas as pd, numpy as np
3
  import my_static_methods as my_stm
4
 
5
- st.html(my_stm.STYLE_CORRECTION)
6
  st.sidebar.markdown("🧊 проверка по пакетам XYZ")
7
 
8
  def ReRun():
@@ -23,7 +23,7 @@ def save_dataframe_nodialog_idxyz(new_filename, dfToSave):
23
 
24
  REPO = my_stm.HfRepo("f64k/gaziev", "dataset", st.secrets["HF_WRITE"])
25
  lstRepoFiles = my_stm.list_files_hf(REPO)
26
- lstTestFilesIdXyz = [f.upper().replace("ID_XYZ/","") for f in lstRepoFiles if f.upper().startswith("ID_XYZ/")]
27
 
28
  col1, col2 = st.columns([1,1])
29
  with col1.container():
@@ -46,10 +46,12 @@ with col1.container():
46
  st.error(uploaded_file.type)
47
  # dataframe ready. try to upload to HF
48
  if not dfLoaded is None:
49
- dfToUpload = dfLoaded.query("ID!='ID'")
 
 
50
  #col2.dataframe(df)
51
  colnames = "".join(dfToUpload.columns)
52
- if colnames.lower().startswith("idxyz"):
53
  dgID = dfToUpload.groupby("ID")
54
  dictGroupID = dict(list(dgID))
55
  lstGroupIDs = list(dictGroupID.keys())
@@ -64,8 +66,20 @@ with col1.container():
64
  save_dataframe_nodialog_idxyz(fileXYZ, dfToUpload)
65
  else:
66
  st.error(f"Разные размеры пакетов для разных ID, варианты : {lst_len}")
 
 
67
  # список уже имеющихся файлов
68
- st.write(lstTestFilesIdXyz)
69
- #with col1.container():
 
 
 
 
 
 
 
 
 
 
70
 
71
 
 
2
  import streamlit as st, pandas as pd, numpy as np
3
  import my_static_methods as my_stm
4
 
5
+ # st.html(my_stm.STYLE_CORRECTION)
6
  st.sidebar.markdown("🧊 проверка по пакетам XYZ")
7
 
8
  def ReRun():
 
23
 
24
  REPO = my_stm.HfRepo("f64k/gaziev", "dataset", st.secrets["HF_WRITE"])
25
  lstRepoFiles = my_stm.list_files_hf(REPO)
26
+ dictTestFilesIdXyz = {f.upper().replace("ID_XYZ/",""): f.upper() for f in lstRepoFiles if f.upper().startswith("ID_XYZ/")}
27
 
28
  col1, col2 = st.columns([1,1])
29
  with col1.container():
 
46
  st.error(uploaded_file.type)
47
  # dataframe ready. try to upload to HF
48
  if not dfLoaded is None:
49
+ dfToUpload = dfLoaded
50
+ if "ID" in dfToUpload.columns:
51
+ dfToUpload = dfLoaded.query("ID!='ID'")
52
  #col2.dataframe(df)
53
  colnames = "".join(dfToUpload.columns)
54
+ if colnames.upper().startswith("IDXYZ"):
55
  dgID = dfToUpload.groupby("ID")
56
  dictGroupID = dict(list(dgID))
57
  lstGroupIDs = list(dictGroupID.keys())
 
66
  save_dataframe_nodialog_idxyz(fileXYZ, dfToUpload)
67
  else:
68
  st.error(f"Разные размеры пакетов для разных ID, варианты : {lst_len}")
69
+ else:
70
+ st.error(f"Столбцы не ID;X;Y;Z ! Наблюдаем столбцы : {colnames}")
71
  # список уже имеющихся файлов
72
+ selectedFile = st.radio("# загруженные тестовые пакеты", dictTestFilesIdXyz.keys(), index=None)
73
+ if selectedFile is not None:
74
+ dict_ONE_IDXYZ = my_stm.load_dataframes_from_hf(REPO, [dictTestFilesIdXyz[selectedFile]])
75
+ if len(dict_ONE_IDXYZ) > 0:
76
+ df_idxyz = list(dict_ONE_IDXYZ.values())[0]
77
+ dfShow = df_idxyz
78
+ dgID = df_idxyz.groupby("ID")
79
+ dictGroupID = dict(list(dgID))
80
+ dfShow = dgID.apply(len) #.reset_index()
81
+ col2.dataframe(dfShow, height=700)
82
+
83
+
84
 
85
 
static/test.ipynb CHANGED
@@ -2,7 +2,7 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 2,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
@@ -16,7 +16,7 @@
16
  },
17
  {
18
  "cell_type": "code",
19
- "execution_count": 3,
20
  "metadata": {},
21
  "outputs": [
22
  {
@@ -25,7 +25,7 @@
25
  "text": [
26
  "<>:1: SyntaxWarning: invalid escape sequence '\\M'\n",
27
  "<>:1: SyntaxWarning: invalid escape sequence '\\M'\n",
28
- "C:\\Users\\f64\\AppData\\Local\\Temp\\ipykernel_38792\\1255343956.py:1: SyntaxWarning: invalid escape sequence '\\M'\n",
29
  " path1 = \"N:\\Makarov\\Development\\Python\\Jupiter Notebooks\\Gaziev CSV\\TestData_1504_AB_soloV_gaziev.zip\"\n"
30
  ]
31
  },
@@ -228,7 +228,7 @@
228
  "[12010 rows x 10 columns]"
229
  ]
230
  },
231
- "execution_count": 3,
232
  "metadata": {},
233
  "output_type": "execute_result"
234
  }
@@ -249,7 +249,7 @@
249
  },
250
  {
251
  "cell_type": "code",
252
- "execution_count": 4,
253
  "metadata": {},
254
  "outputs": [
255
  {
@@ -270,7 +270,7 @@
270
  },
271
  {
272
  "cell_type": "code",
273
- "execution_count": 14,
274
  "metadata": {},
275
  "outputs": [
276
  {
@@ -437,7 +437,7 @@
437
  "18 AAA011113 21 392 -205"
438
  ]
439
  },
440
- "execution_count": 14,
441
  "metadata": {},
442
  "output_type": "execute_result"
443
  }
@@ -456,7 +456,7 @@
456
  },
457
  {
458
  "cell_type": "code",
459
- "execution_count": 6,
460
  "metadata": {},
461
  "outputs": [
462
  {
@@ -470,40 +470,167 @@
470
  "name": "stderr",
471
  "output_type": "stream",
472
  "text": [
473
- "C:\\Users\\f64\\AppData\\Local\\Temp\\ipykernel_38792\\1806989190.py:1: DeprecationWarning: DataFrameGroupBy.apply operated on the grouping columns. This behavior is deprecated, and in a future version of pandas the grouping columns will be excluded from the operation. Either pass `include_groups=False` to exclude the groupings or explicitly select the grouping columns after groupby to silence this warning.\n",
474
  " print(f\"{set(df2.groupby(\"ID\").apply(len))=}\")\n",
475
- "C:\\Users\\f64\\AppData\\Local\\Temp\\ipykernel_38792\\1806989190.py:4: DeprecationWarning: DataFrameGroupBy.apply operated on the grouping columns. This behavior is deprecated, and in a future version of pandas the grouping columns will be excluded from the operation. Either pass `include_groups=False` to exclude the groupings or explicitly select the grouping columns after groupby to silence this warning.\n",
476
- " dgID.apply(len)\n"
477
  ]
478
  },
479
  {
480
  "data": {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
481
  "text/plain": [
482
- "ID\n",
483
- "AAA011111 7\n",
484
- "AAA011112 7\n",
485
- "AAA011113 7\n",
486
- "AAA011114 7\n",
487
- "AAA011115 7\n",
488
- "AAA011116 7\n",
489
- "AAA011117 7\n",
490
- "AAA011118 7\n",
491
- "BBB011111 7\n",
492
- "BBB011112 7\n",
493
- "BBB011113 7\n",
494
- "BBB011114 7\n",
495
- "BBB011115 7\n",
496
- "BBB011116 7\n",
497
- "BBB011117 7\n",
498
- "CCC011111 7\n",
499
- "CCC011112 7\n",
500
- "DDD011111 7\n",
501
- "DDD011112 7\n",
502
- "DDD011113 7\n",
503
- "dtype: int64"
504
  ]
505
  },
506
- "execution_count": 6,
507
  "metadata": {},
508
  "output_type": "execute_result"
509
  }
@@ -512,12 +639,12 @@
512
  "print(f\"{set(df2.groupby(\"ID\").apply(len))=}\")\n",
513
  "dgID = df2.groupby(\"ID\")\n",
514
  "dictGroupID = dict(list(dgID))\n",
515
- "dgID.apply(len)"
516
  ]
517
  },
518
  {
519
  "cell_type": "code",
520
- "execution_count": 30,
521
  "metadata": {},
522
  "outputs": [
523
  {
@@ -620,7 +747,7 @@
620
  "79 BBB011112 -210 529 -943"
621
  ]
622
  },
623
- "execution_count": 30,
624
  "metadata": {},
625
  "output_type": "execute_result"
626
  }
@@ -637,14 +764,14 @@
637
  },
638
  {
639
  "cell_type": "code",
640
- "execution_count": 34,
641
  "metadata": {},
642
  "outputs": [
643
  {
644
  "name": "stderr",
645
  "output_type": "stream",
646
  "text": [
647
- "C:\\Users\\f64\\AppData\\Local\\Temp\\ipykernel_38792\\572365488.py:1: DeprecationWarning: DataFrameGroupBy.apply operated on the grouping columns. This behavior is deprecated, and in a future version of pandas the grouping columns will be excluded from the operation. Either pass `include_groups=False` to exclude the groupings or explicitly select the grouping columns after groupby to silence this warning.\n",
648
  " dgID.apply(lambda df: df[xyz].values.reshape(1,21)).reset_index()\n"
649
  ]
650
  },
@@ -802,7 +929,7 @@
802
  "19 DDD011113 [[-220, 487, 741, 22, 516, 433, -123, 478, 234..."
803
  ]
804
  },
805
- "execution_count": 34,
806
  "metadata": {},
807
  "output_type": "execute_result"
808
  }
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 1,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
 
16
  },
17
  {
18
  "cell_type": "code",
19
+ "execution_count": 2,
20
  "metadata": {},
21
  "outputs": [
22
  {
 
25
  "text": [
26
  "<>:1: SyntaxWarning: invalid escape sequence '\\M'\n",
27
  "<>:1: SyntaxWarning: invalid escape sequence '\\M'\n",
28
+ "C:\\Users\\f64\\AppData\\Local\\Temp\\ipykernel_14236\\1255343956.py:1: SyntaxWarning: invalid escape sequence '\\M'\n",
29
  " path1 = \"N:\\Makarov\\Development\\Python\\Jupiter Notebooks\\Gaziev CSV\\TestData_1504_AB_soloV_gaziev.zip\"\n"
30
  ]
31
  },
 
228
  "[12010 rows x 10 columns]"
229
  ]
230
  },
231
+ "execution_count": 2,
232
  "metadata": {},
233
  "output_type": "execute_result"
234
  }
 
249
  },
250
  {
251
  "cell_type": "code",
252
+ "execution_count": 3,
253
  "metadata": {},
254
  "outputs": [
255
  {
 
270
  },
271
  {
272
  "cell_type": "code",
273
+ "execution_count": 4,
274
  "metadata": {},
275
  "outputs": [
276
  {
 
437
  "18 AAA011113 21 392 -205"
438
  ]
439
  },
440
+ "execution_count": 4,
441
  "metadata": {},
442
  "output_type": "execute_result"
443
  }
 
456
  },
457
  {
458
  "cell_type": "code",
459
+ "execution_count": 10,
460
  "metadata": {},
461
  "outputs": [
462
  {
 
470
  "name": "stderr",
471
  "output_type": "stream",
472
  "text": [
473
+ "C:\\Users\\f64\\AppData\\Local\\Temp\\ipykernel_14236\\117446433.py:1: DeprecationWarning: DataFrameGroupBy.apply operated on the grouping columns. This behavior is deprecated, and in a future version of pandas the grouping columns will be excluded from the operation. Either pass `include_groups=False` to exclude the groupings or explicitly select the grouping columns after groupby to silence this warning.\n",
474
  " print(f\"{set(df2.groupby(\"ID\").apply(len))=}\")\n",
475
+ "C:\\Users\\f64\\AppData\\Local\\Temp\\ipykernel_14236\\117446433.py:4: DeprecationWarning: DataFrameGroupBy.apply operated on the grouping columns. This behavior is deprecated, and in a future version of pandas the grouping columns will be excluded from the operation. Either pass `include_groups=False` to exclude the groupings or explicitly select the grouping columns after groupby to silence this warning.\n",
476
+ " dgID.apply(len).reset_index()\n"
477
  ]
478
  },
479
  {
480
  "data": {
481
+ "text/html": [
482
+ "<div>\n",
483
+ "<style scoped>\n",
484
+ " .dataframe tbody tr th:only-of-type {\n",
485
+ " vertical-align: middle;\n",
486
+ " }\n",
487
+ "\n",
488
+ " .dataframe tbody tr th {\n",
489
+ " vertical-align: top;\n",
490
+ " }\n",
491
+ "\n",
492
+ " .dataframe thead th {\n",
493
+ " text-align: right;\n",
494
+ " }\n",
495
+ "</style>\n",
496
+ "<table border=\"1\" class=\"dataframe\">\n",
497
+ " <thead>\n",
498
+ " <tr style=\"text-align: right;\">\n",
499
+ " <th></th>\n",
500
+ " <th>ID</th>\n",
501
+ " <th>0</th>\n",
502
+ " </tr>\n",
503
+ " </thead>\n",
504
+ " <tbody>\n",
505
+ " <tr>\n",
506
+ " <th>0</th>\n",
507
+ " <td>AAA011111</td>\n",
508
+ " <td>7</td>\n",
509
+ " </tr>\n",
510
+ " <tr>\n",
511
+ " <th>1</th>\n",
512
+ " <td>AAA011112</td>\n",
513
+ " <td>7</td>\n",
514
+ " </tr>\n",
515
+ " <tr>\n",
516
+ " <th>2</th>\n",
517
+ " <td>AAA011113</td>\n",
518
+ " <td>7</td>\n",
519
+ " </tr>\n",
520
+ " <tr>\n",
521
+ " <th>3</th>\n",
522
+ " <td>AAA011114</td>\n",
523
+ " <td>7</td>\n",
524
+ " </tr>\n",
525
+ " <tr>\n",
526
+ " <th>4</th>\n",
527
+ " <td>AAA011115</td>\n",
528
+ " <td>7</td>\n",
529
+ " </tr>\n",
530
+ " <tr>\n",
531
+ " <th>5</th>\n",
532
+ " <td>AAA011116</td>\n",
533
+ " <td>7</td>\n",
534
+ " </tr>\n",
535
+ " <tr>\n",
536
+ " <th>6</th>\n",
537
+ " <td>AAA011117</td>\n",
538
+ " <td>7</td>\n",
539
+ " </tr>\n",
540
+ " <tr>\n",
541
+ " <th>7</th>\n",
542
+ " <td>AAA011118</td>\n",
543
+ " <td>7</td>\n",
544
+ " </tr>\n",
545
+ " <tr>\n",
546
+ " <th>8</th>\n",
547
+ " <td>BBB011111</td>\n",
548
+ " <td>7</td>\n",
549
+ " </tr>\n",
550
+ " <tr>\n",
551
+ " <th>9</th>\n",
552
+ " <td>BBB011112</td>\n",
553
+ " <td>7</td>\n",
554
+ " </tr>\n",
555
+ " <tr>\n",
556
+ " <th>10</th>\n",
557
+ " <td>BBB011113</td>\n",
558
+ " <td>7</td>\n",
559
+ " </tr>\n",
560
+ " <tr>\n",
561
+ " <th>11</th>\n",
562
+ " <td>BBB011114</td>\n",
563
+ " <td>7</td>\n",
564
+ " </tr>\n",
565
+ " <tr>\n",
566
+ " <th>12</th>\n",
567
+ " <td>BBB011115</td>\n",
568
+ " <td>7</td>\n",
569
+ " </tr>\n",
570
+ " <tr>\n",
571
+ " <th>13</th>\n",
572
+ " <td>BBB011116</td>\n",
573
+ " <td>7</td>\n",
574
+ " </tr>\n",
575
+ " <tr>\n",
576
+ " <th>14</th>\n",
577
+ " <td>BBB011117</td>\n",
578
+ " <td>7</td>\n",
579
+ " </tr>\n",
580
+ " <tr>\n",
581
+ " <th>15</th>\n",
582
+ " <td>CCC011111</td>\n",
583
+ " <td>7</td>\n",
584
+ " </tr>\n",
585
+ " <tr>\n",
586
+ " <th>16</th>\n",
587
+ " <td>CCC011112</td>\n",
588
+ " <td>7</td>\n",
589
+ " </tr>\n",
590
+ " <tr>\n",
591
+ " <th>17</th>\n",
592
+ " <td>DDD011111</td>\n",
593
+ " <td>7</td>\n",
594
+ " </tr>\n",
595
+ " <tr>\n",
596
+ " <th>18</th>\n",
597
+ " <td>DDD011112</td>\n",
598
+ " <td>7</td>\n",
599
+ " </tr>\n",
600
+ " <tr>\n",
601
+ " <th>19</th>\n",
602
+ " <td>DDD011113</td>\n",
603
+ " <td>7</td>\n",
604
+ " </tr>\n",
605
+ " </tbody>\n",
606
+ "</table>\n",
607
+ "</div>"
608
+ ],
609
  "text/plain": [
610
+ " ID 0\n",
611
+ "0 AAA011111 7\n",
612
+ "1 AAA011112 7\n",
613
+ "2 AAA011113 7\n",
614
+ "3 AAA011114 7\n",
615
+ "4 AAA011115 7\n",
616
+ "5 AAA011116 7\n",
617
+ "6 AAA011117 7\n",
618
+ "7 AAA011118 7\n",
619
+ "8 BBB011111 7\n",
620
+ "9 BBB011112 7\n",
621
+ "10 BBB011113 7\n",
622
+ "11 BBB011114 7\n",
623
+ "12 BBB011115 7\n",
624
+ "13 BBB011116 7\n",
625
+ "14 BBB011117 7\n",
626
+ "15 CCC011111 7\n",
627
+ "16 CCC011112 7\n",
628
+ "17 DDD011111 7\n",
629
+ "18 DDD011112 7\n",
630
+ "19 DDD011113 7"
 
631
  ]
632
  },
633
+ "execution_count": 10,
634
  "metadata": {},
635
  "output_type": "execute_result"
636
  }
 
639
  "print(f\"{set(df2.groupby(\"ID\").apply(len))=}\")\n",
640
  "dgID = df2.groupby(\"ID\")\n",
641
  "dictGroupID = dict(list(dgID))\n",
642
+ "dgID.apply(len).reset_index()"
643
  ]
644
  },
645
  {
646
  "cell_type": "code",
647
+ "execution_count": 6,
648
  "metadata": {},
649
  "outputs": [
650
  {
 
747
  "79 BBB011112 -210 529 -943"
748
  ]
749
  },
750
+ "execution_count": 6,
751
  "metadata": {},
752
  "output_type": "execute_result"
753
  }
 
764
  },
765
  {
766
  "cell_type": "code",
767
+ "execution_count": 7,
768
  "metadata": {},
769
  "outputs": [
770
  {
771
  "name": "stderr",
772
  "output_type": "stream",
773
  "text": [
774
+ "C:\\Users\\f64\\AppData\\Local\\Temp\\ipykernel_14236\\572365488.py:1: DeprecationWarning: DataFrameGroupBy.apply operated on the grouping columns. This behavior is deprecated, and in a future version of pandas the grouping columns will be excluded from the operation. Either pass `include_groups=False` to exclude the groupings or explicitly select the grouping columns after groupby to silence this warning.\n",
775
  " dgID.apply(lambda df: df[xyz].values.reshape(1,21)).reset_index()\n"
776
  ]
777
  },
 
929
  "19 DDD011113 [[-220, 487, 741, 22, 516, 433, -123, 478, 234..."
930
  ]
931
  },
932
+ "execution_count": 7,
933
  "metadata": {},
934
  "output_type": "execute_result"
935
  }