Wuhuwill commited on
Commit
fa1cc41
·
verified ·
1 Parent(s): 2842611

Upload ProDiff/preprocess_data.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ProDiff/preprocess_data.py +92 -0
ProDiff/preprocess_data.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import h5py
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+ import os
6
+
7
+ def create_h5_from_csv(csv_path, train_h5_path, test_h5_path, test_split_ratio=0.1):
8
+ """
9
+ Reads trajectory data from a CSV file, processes it, and saves it into
10
+ HDF5 files structured for the TrajectoryDataset.
11
+
12
+ The HDF5 file will have a group for each user, containing datasets for
13
+ 'hours' (as Unix timestamps), 'latitudes', and 'longitudes'.
14
+ """
15
+ print(f"Loading data from {csv_path}...")
16
+ try:
17
+ df = pd.read_csv(csv_path, parse_dates=['datetime'])
18
+ except Exception as e:
19
+ print(f"Error reading or parsing CSV: {e}")
20
+ return
21
+
22
+ print("Sorting data by user and time...")
23
+ df.sort_values(by=['userid', 'datetime'], inplace=True)
24
+
25
+ all_user_ids = df['userid'].unique()
26
+ test_user_count = int(len(all_user_ids) * test_split_ratio)
27
+ test_user_ids = set(np.random.choice(all_user_ids, size=test_user_count, replace=False))
28
+
29
+ print(f"Total users: {len(all_user_ids)}")
30
+ print(f"Training users: {len(all_user_ids) - test_user_count}")
31
+ print(f"Test users: {test_user_count}")
32
+
33
+ # Process for both train and test sets
34
+ for h5_path, user_ids, set_name in [(train_h5_path, all_user_ids - test_user_ids, "train"),
35
+ (test_h5_path, test_user_ids, "test")]:
36
+
37
+ if not user_ids:
38
+ print(f"No users for {set_name} set, skipping.")
39
+ continue
40
+
41
+ print(f"\nCreating {set_name} HDF5 file at {h5_path}...")
42
+ with h5py.File(h5_path, 'w') as h5f:
43
+ # Group by userid
44
+ grouped = df[df['userid'].isin(user_ids)].groupby('userid')
45
+
46
+ for user_id, user_df in tqdm(grouped, desc=f"Processing {set_name} users"):
47
+ # Ensure data is sorted by time for each user
48
+ user_df = user_df.sort_values('datetime')
49
+
50
+ # Convert datetime to unix timestamp for 'hours'
51
+ # The original code used 'hours', but absolute time is more robust.
52
+ timestamps = user_df['datetime'].apply(lambda x: x.timestamp()).values
53
+ latitudes = user_df['lat'].values
54
+ longitudes = user_df['lng'].values
55
+
56
+ # Create a group for the user
57
+ user_group = h5f.create_group(str(user_id))
58
+
59
+ # Store data in the user's group
60
+ user_group.create_dataset('hours', data=timestamps, dtype='float64')
61
+ user_group.create_dataset('latitudes', data=latitudes, dtype='float64')
62
+ user_group.create_dataset('longitudes', data=longitudes, dtype='float64')
63
+
64
+ print(f"{set_name.capitalize()} data processing complete. File saved to {h5_path}")
65
+
66
+
67
+ if __name__ == '__main__':
68
+ # Configuration
69
+ CSV_DATA_PATH = 'data/May_trajectory_data.csv'
70
+
71
+ # Define output paths inside the 'data' directory
72
+ output_dir = 'data'
73
+ os.makedirs(output_dir, exist_ok=True)
74
+ TRAIN_H5_PATH = os.path.join(output_dir, 'train.h5')
75
+ TEST_H5_PATH = os.path.join(output_dir, 'test.h5')
76
+
77
+ # Run the conversion
78
+ create_h5_from_csv(CSV_DATA_PATH, TRAIN_H5_PATH, TEST_H5_PATH)
79
+
80
+ # Optional: Verify the created file structure for one user
81
+ print("\nVerifying HDF5 file structure...")
82
+ try:
83
+ with h5py.File(TRAIN_H5_PATH, 'r') as h5f:
84
+ if list(h5f.keys()):
85
+ sample_user_id = list(h5f.keys())[0]
86
+ print(f"Sample user '{sample_user_id}' in {TRAIN_H5_PATH}:")
87
+ for dset in h5f[sample_user_id].keys():
88
+ print(f" - Dataset: {dset}, Shape: {h5f[sample_user_id][dset].shape}")
89
+ else:
90
+ print("Train HDF5 file is empty.")
91
+ except Exception as e:
92
+ print(f"Could not verify HDF5 file: {e}")