Datasets:
ArXiv:
DOI:
License:
fix dataset generation error
#7
by
chapsticklover
- opened
This view is limited to 50 files because it contains too many changes.
See the raw diff here.
- .gitattributes +1 -1
- README.md +60 -34
- events.csv +0 -3
- events_test.csv +0 -3
- events_train.csv +0 -3
- example.py +0 -54
- merge_hdf5.py +0 -9
- models/phasenet_picks.csv +0 -3
- models/phasenet_plus_events.csv +0 -3
- models/phasenet_plus_picks.csv +0 -3
- models/phasenet_pt_picks.csv +0 -3
- picks.csv +0 -3
- picks_test.csv +0 -3
- picks_train.csv +0 -3
- quakeflow_nc.py +167 -108
- waveform.h5 +0 -3
- waveform_h5/1987.h5 +0 -3
- waveform_h5/1988.h5 +0 -3
- waveform_h5/1989.h5 +0 -3
- waveform_h5/1990.h5 +0 -3
- waveform_h5/1991.h5 +0 -3
- waveform_h5/1992.h5 +0 -3
- waveform_h5/1993.h5 +0 -3
- waveform_h5/1994.h5 +0 -3
- waveform_h5/1995.h5 +0 -3
- waveform_h5/1996.h5 +0 -3
- waveform_h5/1997.h5 +0 -3
- waveform_h5/1998.h5 +0 -3
- waveform_h5/1999.h5 +0 -3
- waveform_h5/2000.h5 +0 -3
- waveform_h5/2001.h5 +0 -3
- waveform_h5/2002.h5 +0 -3
- waveform_h5/2003.h5 +0 -3
- waveform_h5/2004.h5 +0 -3
- waveform_h5/2005.h5 +0 -3
- waveform_h5/2006.h5 +0 -3
- waveform_h5/2007.h5 +0 -3
- waveform_h5/2008.h5 +0 -3
- waveform_h5/2009.h5 +0 -3
- waveform_h5/2010.h5 +0 -3
- waveform_h5/2011.h5 +0 -3
- waveform_h5/2012.h5 +0 -3
- waveform_h5/2013.h5 +0 -3
- waveform_h5/2014.h5 +0 -3
- waveform_h5/2015.h5 +0 -3
- waveform_h5/2016.h5 +0 -3
- waveform_h5/2017.h5 +0 -3
- waveform_h5/2018.h5 +0 -3
- waveform_h5/2019.h5 +0 -3
- waveform_h5/2020.h5 +0 -3
.gitattributes
CHANGED
|
@@ -52,4 +52,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 55 |
-
|
|
|
|
| 52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
ncedc_eventid.h5 filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -66,7 +66,7 @@ Waveform data, metadata, or data products for this study were accessed through t
|
|
| 66 |
- datasets
|
| 67 |
- h5py
|
| 68 |
- fsspec
|
| 69 |
-
-
|
| 70 |
|
| 71 |
### Usage
|
| 72 |
Import the necessary packages:
|
|
@@ -74,6 +74,7 @@ Import the necessary packages:
|
|
| 74 |
import h5py
|
| 75 |
import numpy as np
|
| 76 |
import torch
|
|
|
|
| 77 |
from datasets import load_dataset
|
| 78 |
```
|
| 79 |
We have 6 configurations for the dataset:
|
|
@@ -88,28 +89,16 @@ We have 6 configurations for the dataset:
|
|
| 88 |
|
| 89 |
The sample of `station` is a dictionary with the following keys:
|
| 90 |
- `data`: the waveform with shape `(3, nt)`, the default time length is 8192
|
| 91 |
-
- `
|
| 92 |
-
- `
|
| 93 |
-
- `phase_time`: the phase arrival time
|
| 94 |
-
- `phase_index`: the time point index of the phase arrival time
|
| 95 |
-
- `phase_type`: the phase type
|
| 96 |
-
- `phase_polarity`: the phase polarity in ('U', 'D', 'N')
|
| 97 |
-
- `event_time`: the event time
|
| 98 |
-
- `event_time_index`: the time point index of the event time
|
| 99 |
-
- `event_location`: the event location with shape `(3,)`, including latitude, longitude, depth
|
| 100 |
- `station_location`: the station location with shape `(3,)`, including latitude, longitude and depth
|
| 101 |
|
| 102 |
The sample of `event` is a dictionary with the following keys:
|
| 103 |
- `data`: the waveform with shape `(n_station, 3, nt)`, the default time length is 8192
|
| 104 |
-
- `
|
| 105 |
-
- `
|
| 106 |
-
- `
|
| 107 |
-
- `
|
| 108 |
-
- `phase_type`: the phase type with shape `(n_station,)`
|
| 109 |
-
- `phase_polarity`: the phase polarity in ('U', 'D', 'N') with shape `(n_station,)`
|
| 110 |
-
- `event_time`: the event time
|
| 111 |
-
- `event_time_index`: the time point index of the event time
|
| 112 |
-
- `event_location`: the space-time coordinates of the event with shape `(n_staion, 3)`
|
| 113 |
- `station_location`: the space coordinates of the station with shape `(n_station, 3)`, including latitude, longitude and depth
|
| 114 |
|
| 115 |
The default configuration is `station_test`. You can specify the configuration by argument `name`. For example:
|
|
@@ -128,33 +117,70 @@ quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="station_test", split="t
|
|
| 128 |
quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="event", split="train")
|
| 129 |
```
|
| 130 |
|
| 131 |
-
####
|
|
|
|
| 132 |
```python
|
| 133 |
quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="station_test", split="test")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
|
| 135 |
# print the first sample of the iterable dataset
|
| 136 |
for example in quakeflow_nc:
|
| 137 |
print("\nIterable test\n")
|
| 138 |
print(example.keys())
|
| 139 |
for key in example.keys():
|
| 140 |
-
|
| 141 |
-
print(key, np.array(example[key]).shape)
|
| 142 |
-
else:
|
| 143 |
-
print(key, example[key])
|
| 144 |
break
|
| 145 |
|
| 146 |
-
|
| 147 |
-
quakeflow_nc = quakeflow_nc.with_format("torch")
|
| 148 |
-
dataloader = DataLoader(quakeflow_nc, batch_size=8, num_workers=0, collate_fn=lambda x: x)
|
| 149 |
|
| 150 |
for batch in dataloader:
|
| 151 |
print("\nDataloader test\n")
|
| 152 |
-
print(
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 159 |
break
|
| 160 |
```
|
|
|
|
| 66 |
- datasets
|
| 67 |
- h5py
|
| 68 |
- fsspec
|
| 69 |
+
- torch (for PyTorch)
|
| 70 |
|
| 71 |
### Usage
|
| 72 |
Import the necessary packages:
|
|
|
|
| 74 |
import h5py
|
| 75 |
import numpy as np
|
| 76 |
import torch
|
| 77 |
+
from torch.utils.data import Dataset, IterableDataset, DataLoader
|
| 78 |
from datasets import load_dataset
|
| 79 |
```
|
| 80 |
We have 6 configurations for the dataset:
|
|
|
|
| 89 |
|
| 90 |
The sample of `station` is a dictionary with the following keys:
|
| 91 |
- `data`: the waveform with shape `(3, nt)`, the default time length is 8192
|
| 92 |
+
- `phase_pick`: the probability of the phase pick with shape `(3, nt)`, the first dimension is noise, P and S
|
| 93 |
+
- `event_location`: the event location with shape `(4,)`, including latitude, longitude, depth and time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
- `station_location`: the station location with shape `(3,)`, including latitude, longitude and depth
|
| 95 |
|
| 96 |
The sample of `event` is a dictionary with the following keys:
|
| 97 |
- `data`: the waveform with shape `(n_station, 3, nt)`, the default time length is 8192
|
| 98 |
+
- `phase_pick`: the probability of the phase pick with shape `(n_station, 3, nt)`, the first dimension is noise, P and S
|
| 99 |
+
- `event_center`: the probability of the event time with shape `(n_station, feature_nt)`, default feature time length is 512
|
| 100 |
+
- `event_location`: the space-time coordinates of the event with shape `(n_staion, 4, feature_nt)`
|
| 101 |
+
- `event_location_mask`: the probability mask of the event time with shape `(n_station, feature_nt)`
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
- `station_location`: the space coordinates of the station with shape `(n_station, 3)`, including latitude, longitude and depth
|
| 103 |
|
| 104 |
The default configuration is `station_test`. You can specify the configuration by argument `name`. For example:
|
|
|
|
| 117 |
quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="event", split="train")
|
| 118 |
```
|
| 119 |
|
| 120 |
+
#### Usage for `station`
|
| 121 |
+
Then you can change the dataset into PyTorch format iterable dataset, and view the first sample:
|
| 122 |
```python
|
| 123 |
quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="station_test", split="test")
|
| 124 |
+
# for PyTorch DataLoader, we need to divide the dataset into several shards
|
| 125 |
+
num_workers=4
|
| 126 |
+
quakeflow_nc = quakeflow_nc.to_iterable_dataset(num_shards=num_workers)
|
| 127 |
+
# because add examples formatting to get tensors when using the "torch" format
|
| 128 |
+
# has not been implemented yet, we need to manually add the formatting when using iterable dataset
|
| 129 |
+
# if you want to use dataset directly, just use
|
| 130 |
+
# quakeflow_nc.with_format("torch")
|
| 131 |
+
quakeflow_nc = quakeflow_nc.map(lambda x: {key: torch.from_numpy(np.array(value, dtype=np.float32)) for key, value in x.items()})
|
| 132 |
+
try:
|
| 133 |
+
isinstance(quakeflow_nc, torch.utils.data.IterableDataset)
|
| 134 |
+
except:
|
| 135 |
+
raise Exception("quakeflow_nc is not an IterableDataset")
|
| 136 |
|
| 137 |
# print the first sample of the iterable dataset
|
| 138 |
for example in quakeflow_nc:
|
| 139 |
print("\nIterable test\n")
|
| 140 |
print(example.keys())
|
| 141 |
for key in example.keys():
|
| 142 |
+
print(key, example[key].shape, example[key].dtype)
|
|
|
|
|
|
|
|
|
|
| 143 |
break
|
| 144 |
|
| 145 |
+
dataloader = DataLoader(quakeflow_nc, batch_size=4, num_workers=num_workers)
|
|
|
|
|
|
|
| 146 |
|
| 147 |
for batch in dataloader:
|
| 148 |
print("\nDataloader test\n")
|
| 149 |
+
print(batch.keys())
|
| 150 |
+
for key in batch.keys():
|
| 151 |
+
print(key, batch[key].shape, batch[key].dtype)
|
| 152 |
+
break
|
| 153 |
+
```
|
| 154 |
+
|
| 155 |
+
#### Usage for `event`
|
| 156 |
+
|
| 157 |
+
Then you can change the dataset into PyTorch format dataset, and view the first sample (Don't forget to reorder the keys):
|
| 158 |
+
```python
|
| 159 |
+
quakeflow_nc = datasets.load_dataset("AI4EPS/quakeflow_nc", split="test", name="event_test")
|
| 160 |
+
|
| 161 |
+
# for PyTorch DataLoader, we need to divide the dataset into several shards
|
| 162 |
+
num_workers=4
|
| 163 |
+
quakeflow_nc = quakeflow_nc.to_iterable_dataset(num_shards=num_workers)
|
| 164 |
+
quakeflow_nc = quakeflow_nc.map(lambda x: {key: torch.from_numpy(np.array(value, dtype=np.float32)) for key, value in x.items()})
|
| 165 |
+
try:
|
| 166 |
+
isinstance(quakeflow_nc, torch.utils.data.IterableDataset)
|
| 167 |
+
except:
|
| 168 |
+
raise Exception("quakeflow_nc is not an IterableDataset")
|
| 169 |
+
|
| 170 |
+
# print the first sample of the iterable dataset
|
| 171 |
+
for example in quakeflow_nc:
|
| 172 |
+
print("\nIterable test\n")
|
| 173 |
+
print(example.keys())
|
| 174 |
+
for key in example.keys():
|
| 175 |
+
print(key, example[key].shape, example[key].dtype)
|
| 176 |
+
break
|
| 177 |
+
|
| 178 |
+
dataloader = DataLoader(quakeflow_nc, batch_size=1, num_workers=num_workers)
|
| 179 |
+
|
| 180 |
+
for batch in dataloader:
|
| 181 |
+
print("\nDataloader test\n")
|
| 182 |
+
print(batch.keys())
|
| 183 |
+
for key in batch.keys():
|
| 184 |
+
print(key, batch[key].shape, batch[key].dtype)
|
| 185 |
break
|
| 186 |
```
|
events.csv
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:84166f6a0be6a02caeb8d11ed3495e5256db698c795dbb3db4d45d8b863313d8
|
| 3 |
-
size 46863258
|
|
|
|
|
|
|
|
|
|
|
|
events_test.csv
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:74b5bf132e23763f851035717a1baa92ab8fb73253138b640103390dce33e154
|
| 3 |
-
size 1602217
|
|
|
|
|
|
|
|
|
|
|
|
events_train.csv
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:ef579400d9354ecaf142bdc7023291c952dbfc20d6bafab4715dff1774b3f7a5
|
| 3 |
-
size 45261178
|
|
|
|
|
|
|
|
|
|
|
|
example.py
DELETED
|
@@ -1,54 +0,0 @@
|
|
| 1 |
-
# %%
|
| 2 |
-
import datasets
|
| 3 |
-
import numpy as np
|
| 4 |
-
from torch.utils.data import DataLoader
|
| 5 |
-
|
| 6 |
-
quakeflow_nc = datasets.load_dataset(
|
| 7 |
-
"AI4EPS/quakeflow_nc",
|
| 8 |
-
name="station",
|
| 9 |
-
split="train",
|
| 10 |
-
# name="station_test",
|
| 11 |
-
# split="test",
|
| 12 |
-
# download_mode="force_redownload",
|
| 13 |
-
trust_remote_code=True,
|
| 14 |
-
num_proc=36,
|
| 15 |
-
)
|
| 16 |
-
# quakeflow_nc = datasets.load_dataset(
|
| 17 |
-
# "./quakeflow_nc.py",
|
| 18 |
-
# name="station",
|
| 19 |
-
# split="train",
|
| 20 |
-
# # name="statoin_test",
|
| 21 |
-
# # split="test",
|
| 22 |
-
# num_proc=36,
|
| 23 |
-
# )
|
| 24 |
-
|
| 25 |
-
print(quakeflow_nc)
|
| 26 |
-
|
| 27 |
-
# print the first sample of the iterable dataset
|
| 28 |
-
for example in quakeflow_nc:
|
| 29 |
-
print("\nIterable dataset\n")
|
| 30 |
-
print(example)
|
| 31 |
-
print(example.keys())
|
| 32 |
-
for key in example.keys():
|
| 33 |
-
if key == "waveform":
|
| 34 |
-
print(key, np.array(example[key]).shape)
|
| 35 |
-
else:
|
| 36 |
-
print(key, example[key])
|
| 37 |
-
break
|
| 38 |
-
|
| 39 |
-
# %%
|
| 40 |
-
quakeflow_nc = quakeflow_nc.with_format("torch")
|
| 41 |
-
dataloader = DataLoader(quakeflow_nc, batch_size=8, num_workers=0, collate_fn=lambda x: x)
|
| 42 |
-
|
| 43 |
-
for batch in dataloader:
|
| 44 |
-
print("\nDataloader dataset\n")
|
| 45 |
-
print(f"Batch size: {len(batch)}")
|
| 46 |
-
print(batch[0].keys())
|
| 47 |
-
for key in batch[0].keys():
|
| 48 |
-
if key == "waveform":
|
| 49 |
-
print(key, np.array(batch[0][key]).shape)
|
| 50 |
-
else:
|
| 51 |
-
print(key, batch[0][key])
|
| 52 |
-
break
|
| 53 |
-
|
| 54 |
-
# %%
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
merge_hdf5.py
CHANGED
|
@@ -11,18 +11,9 @@ h5_out = "waveform.h5"
|
|
| 11 |
h5_train = "waveform_train.h5"
|
| 12 |
h5_test = "waveform_test.h5"
|
| 13 |
|
| 14 |
-
# # %%
|
| 15 |
-
# h5_dir = "waveform_h5"
|
| 16 |
-
# h5_out = "waveform.h5"
|
| 17 |
-
# h5_train = "waveform_train.h5"
|
| 18 |
-
# h5_test = "waveform_test.h5"
|
| 19 |
-
|
| 20 |
h5_files = sorted(os.listdir(h5_dir))
|
| 21 |
train_files = h5_files[:-1]
|
| 22 |
test_files = h5_files[-1:]
|
| 23 |
-
# train_files = h5_files
|
| 24 |
-
# train_files = [x for x in train_files if (x != "2014.h5") and (x not in [])]
|
| 25 |
-
# test_files = []
|
| 26 |
print(f"train files: {train_files}")
|
| 27 |
print(f"test files: {test_files}")
|
| 28 |
|
|
|
|
| 11 |
h5_train = "waveform_train.h5"
|
| 12 |
h5_test = "waveform_test.h5"
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
h5_files = sorted(os.listdir(h5_dir))
|
| 15 |
train_files = h5_files[:-1]
|
| 16 |
test_files = h5_files[-1:]
|
|
|
|
|
|
|
|
|
|
| 17 |
print(f"train files: {train_files}")
|
| 18 |
print(f"test files: {test_files}")
|
| 19 |
|
models/phasenet_picks.csv
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:b51df5987a2a05e44e0949b42d00a28692109da521911c55d2692ebfad0c54d7
|
| 3 |
-
size 9355127
|
|
|
|
|
|
|
|
|
|
|
|
models/phasenet_plus_events.csv
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:f686ebf8da632b71a947e4ee884c76f30a313ae0e9d6e32d1f675828884a95f7
|
| 3 |
-
size 7381331
|
|
|
|
|
|
|
|
|
|
|
|
models/phasenet_plus_picks.csv
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:83d241a54477f722cd032efe8368a653bba170e1abebf3d9097d7756cfd54b23
|
| 3 |
-
size 9987053
|
|
|
|
|
|
|
|
|
|
|
|
models/phasenet_pt_picks.csv
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:bb7ea98484b5e6e1c4c79ea5eb1e38bce43e87b546fc6d29c72d187a6d8b1d00
|
| 3 |
-
size 8715799
|
|
|
|
|
|
|
|
|
|
|
|
picks.csv
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:52f077ae9f94481d4b80f37c9f15038ee1e3636d5da2da3b1d4aaa2991879cc3
|
| 3 |
-
size 422247029
|
|
|
|
|
|
|
|
|
|
|
|
picks_test.csv
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:bb09f0ac169bf451cfcfb4547359756cb1a53828bf4074971d9160a3aa171f38
|
| 3 |
-
size 21850235
|
|
|
|
|
|
|
|
|
|
|
|
picks_train.csv
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:d22c5d5eb1c27a723525c657c1308a3b643d6f3e716eb1c43e064b7a87bb0819
|
| 3 |
-
size 400397230
|
|
|
|
|
|
|
|
|
|
|
|
quakeflow_nc.py
CHANGED
|
@@ -104,10 +104,14 @@ class BatchBuilderConfig(datasets.BuilderConfig):
|
|
| 104 |
"""
|
| 105 |
yield a batch of event-based sample, so the number of sample stations can vary among batches
|
| 106 |
Batch Config for QuakeFlow_NC
|
|
|
|
|
|
|
| 107 |
"""
|
| 108 |
|
| 109 |
-
def __init__(self, **kwargs):
|
| 110 |
super().__init__(**kwargs)
|
|
|
|
|
|
|
| 111 |
|
| 112 |
|
| 113 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
|
@@ -116,7 +120,11 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
| 116 |
|
| 117 |
VERSION = datasets.Version("1.1.0")
|
| 118 |
|
|
|
|
| 119 |
nt = 8192
|
|
|
|
|
|
|
|
|
|
| 120 |
|
| 121 |
# This is an example of a dataset with multiple configurations.
|
| 122 |
# If you don't want/need to define several sub-sets in your dataset,
|
|
@@ -165,44 +173,30 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
| 165 |
or (self.config.name == "station_train")
|
| 166 |
or (self.config.name == "station_test")
|
| 167 |
):
|
| 168 |
-
features
|
| 169 |
{
|
| 170 |
-
"
|
| 171 |
-
"
|
| 172 |
-
"station_id": datasets.Value("string"),
|
| 173 |
-
"waveform": datasets.Array2D(shape=(3, self.nt), dtype="float32"),
|
| 174 |
-
"phase_time": datasets.Sequence(datasets.Value("string")),
|
| 175 |
-
"phase_index": datasets.Sequence(datasets.Value("int32")),
|
| 176 |
-
"phase_type": datasets.Sequence(datasets.Value("string")),
|
| 177 |
-
"phase_polarity": datasets.Sequence(datasets.Value("string")),
|
| 178 |
-
"begin_time": datasets.Value("string"),
|
| 179 |
-
"end_time": datasets.Value("string"),
|
| 180 |
-
"event_time": datasets.Value("string"),
|
| 181 |
-
"event_time_index": datasets.Value("int32"),
|
| 182 |
"event_location": datasets.Sequence(datasets.Value("float32")),
|
| 183 |
"station_location": datasets.Sequence(datasets.Value("float32")),
|
| 184 |
-
}
|
| 185 |
-
|
| 186 |
-
elif (
|
| 187 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
{
|
| 189 |
-
"
|
| 190 |
-
"
|
| 191 |
-
"
|
| 192 |
-
"
|
| 193 |
-
"
|
| 194 |
-
"
|
| 195 |
-
|
| 196 |
-
"end_time": datasets.Value("string"),
|
| 197 |
-
"event_time": datasets.Value("string"),
|
| 198 |
-
"event_time_index": datasets.Value("int32"),
|
| 199 |
-
"event_location": datasets.Sequence(datasets.Value("float32")),
|
| 200 |
-
"station_location": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
|
| 201 |
-
},
|
| 202 |
)
|
| 203 |
-
|
| 204 |
-
raise ValueError(f"config.name = {self.config.name} is not in BUILDER_CONFIGS")
|
| 205 |
-
|
| 206 |
return datasets.DatasetInfo(
|
| 207 |
# This is the description that will appear on the datasets page.
|
| 208 |
description=_DESCRIPTION,
|
|
@@ -228,20 +222,18 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
| 228 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 229 |
urls = _URLS[self.config.name]
|
| 230 |
# files = dl_manager.download(urls)
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
else:
|
| 234 |
-
files = [f"{self.storage_options['bucket']}/{x}" for x in _FILES]
|
| 235 |
-
# files = [f"/nfs/quakeflow_dataset/NC/quakeflow_nc/waveform_h5/{x}" for x in _FILES][-3:]
|
| 236 |
-
print("Files:\n", "\n".join(sorted(files)))
|
| 237 |
-
print(self.storage_options)
|
| 238 |
|
| 239 |
if self.config.name == "station" or self.config.name == "event":
|
| 240 |
return [
|
| 241 |
datasets.SplitGenerator(
|
| 242 |
name=datasets.Split.TRAIN,
|
| 243 |
# These kwargs will be passed to _generate_examples
|
| 244 |
-
gen_kwargs={
|
|
|
|
|
|
|
|
|
|
| 245 |
),
|
| 246 |
datasets.SplitGenerator(
|
| 247 |
name=datasets.Split.TEST,
|
|
@@ -252,7 +244,10 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
| 252 |
return [
|
| 253 |
datasets.SplitGenerator(
|
| 254 |
name=datasets.Split.TRAIN,
|
| 255 |
-
gen_kwargs={
|
|
|
|
|
|
|
|
|
|
| 256 |
),
|
| 257 |
]
|
| 258 |
elif self.config.name == "station_test" or self.config.name == "event_test":
|
|
@@ -271,92 +266,156 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
| 271 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 272 |
|
| 273 |
for file in filepath:
|
| 274 |
-
print(f"\nReading {file}")
|
| 275 |
with fsspec.open(file, "rb") as fs:
|
| 276 |
with h5py.File(fs, "r") as fp:
|
|
|
|
| 277 |
event_ids = list(fp.keys())
|
| 278 |
for event_id in event_ids:
|
| 279 |
event = fp[event_id]
|
| 280 |
-
event_attrs = event.attrs
|
| 281 |
-
begin_time = event_attrs["begin_time"]
|
| 282 |
-
end_time = event_attrs["end_time"]
|
| 283 |
-
event_location = [
|
| 284 |
-
event_attrs["longitude"],
|
| 285 |
-
event_attrs["latitude"],
|
| 286 |
-
event_attrs["depth_km"],
|
| 287 |
-
]
|
| 288 |
-
event_time = event_attrs["event_time"]
|
| 289 |
-
event_time_index = event_attrs["event_time_index"]
|
| 290 |
station_ids = list(event.keys())
|
| 291 |
-
if len(station_ids) == 0:
|
| 292 |
-
continue
|
| 293 |
if (
|
| 294 |
(self.config.name == "station")
|
| 295 |
or (self.config.name == "station_train")
|
| 296 |
or (self.config.name == "station_test")
|
| 297 |
):
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
attrs
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 307 |
station_location = [attrs["longitude"], attrs["latitude"], -attrs["elevation_m"] / 1e3]
|
| 308 |
|
| 309 |
-
yield f"{event_id}/{
|
| 310 |
-
"
|
| 311 |
-
"
|
| 312 |
-
"
|
| 313 |
-
"
|
| 314 |
-
"phase_time": phase_time,
|
| 315 |
-
"phase_index": phase_index,
|
| 316 |
-
"phase_type": phase_type,
|
| 317 |
-
"phase_polarity": phase_polarity,
|
| 318 |
-
"begin_time": begin_time,
|
| 319 |
-
"end_time": end_time,
|
| 320 |
-
"event_time": event_time,
|
| 321 |
-
"event_time_index": event_time_index,
|
| 322 |
-
"event_location": event_location,
|
| 323 |
-
"station_location": station_location,
|
| 324 |
}
|
| 325 |
|
|
|
|
| 326 |
elif (
|
| 327 |
(self.config.name == "event")
|
| 328 |
or (self.config.name == "event_train")
|
| 329 |
or (self.config.name == "event_test")
|
| 330 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 331 |
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 348 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
yield event_id, {
|
| 350 |
-
"
|
| 351 |
-
"
|
| 352 |
-
"
|
| 353 |
-
"
|
| 354 |
-
"
|
| 355 |
-
"
|
| 356 |
-
"begin_time": begin_time,
|
| 357 |
-
"end_time": end_time,
|
| 358 |
-
"event_time": event_time,
|
| 359 |
-
"event_time_index": event_time_index,
|
| 360 |
-
"event_location": event_location,
|
| 361 |
-
"station_location": station_location,
|
| 362 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
"""
|
| 105 |
yield a batch of event-based sample, so the number of sample stations can vary among batches
|
| 106 |
Batch Config for QuakeFlow_NC
|
| 107 |
+
:param batch_size: number of samples in a batch
|
| 108 |
+
:param num_stations_list: possible number of stations in a batch
|
| 109 |
"""
|
| 110 |
|
| 111 |
+
def __init__(self, batch_size: int, num_stations_list: List, **kwargs):
|
| 112 |
super().__init__(**kwargs)
|
| 113 |
+
self.batch_size = batch_size
|
| 114 |
+
self.num_stations_list = num_stations_list
|
| 115 |
|
| 116 |
|
| 117 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
|
|
|
| 120 |
|
| 121 |
VERSION = datasets.Version("1.1.0")
|
| 122 |
|
| 123 |
+
degree2km = 111.32
|
| 124 |
nt = 8192
|
| 125 |
+
feature_nt = 512
|
| 126 |
+
feature_scale = int(nt / feature_nt)
|
| 127 |
+
sampling_rate = 100.0
|
| 128 |
|
| 129 |
# This is an example of a dataset with multiple configurations.
|
| 130 |
# If you don't want/need to define several sub-sets in your dataset,
|
|
|
|
| 173 |
or (self.config.name == "station_train")
|
| 174 |
or (self.config.name == "station_test")
|
| 175 |
):
|
| 176 |
+
features=datasets.Features(
|
| 177 |
{
|
| 178 |
+
"data": datasets.Array2D(shape=(3, self.nt), dtype='float32'),
|
| 179 |
+
"phase_pick": datasets.Array2D(shape=(3, self.nt), dtype='float32'),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
"event_location": datasets.Sequence(datasets.Value("float32")),
|
| 181 |
"station_location": datasets.Sequence(datasets.Value("float32")),
|
| 182 |
+
})
|
| 183 |
+
|
| 184 |
+
elif (
|
| 185 |
+
(self.config.name == "event")
|
| 186 |
+
or (self.config.name == "event_train")
|
| 187 |
+
or (self.config.name == "event_test")
|
| 188 |
+
):
|
| 189 |
+
features=datasets.Features(
|
| 190 |
{
|
| 191 |
+
"data": datasets.Array3D(shape=(None, 3, self.nt), dtype='float32'),
|
| 192 |
+
"phase_pick": datasets.Array3D(shape=(None, 3, self.nt), dtype='float32'),
|
| 193 |
+
"event_center" : datasets.Array2D(shape=(None, self.feature_nt), dtype='float32'),
|
| 194 |
+
"event_location": datasets.Array3D(shape=(None, 4, self.feature_nt), dtype='float32'),
|
| 195 |
+
"event_location_mask": datasets.Array2D(shape=(None, self.feature_nt), dtype='float32'),
|
| 196 |
+
"station_location": datasets.Array2D(shape=(None, 3), dtype="float32"),
|
| 197 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
)
|
| 199 |
+
|
|
|
|
|
|
|
| 200 |
return datasets.DatasetInfo(
|
| 201 |
# This is the description that will appear on the datasets page.
|
| 202 |
description=_DESCRIPTION,
|
|
|
|
| 222 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 223 |
urls = _URLS[self.config.name]
|
| 224 |
# files = dl_manager.download(urls)
|
| 225 |
+
files = dl_manager.download_and_extract(urls)
|
| 226 |
+
print(files)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 227 |
|
| 228 |
if self.config.name == "station" or self.config.name == "event":
|
| 229 |
return [
|
| 230 |
datasets.SplitGenerator(
|
| 231 |
name=datasets.Split.TRAIN,
|
| 232 |
# These kwargs will be passed to _generate_examples
|
| 233 |
+
gen_kwargs={
|
| 234 |
+
"filepath": files[:-1],
|
| 235 |
+
"split": "train",
|
| 236 |
+
},
|
| 237 |
),
|
| 238 |
datasets.SplitGenerator(
|
| 239 |
name=datasets.Split.TEST,
|
|
|
|
| 244 |
return [
|
| 245 |
datasets.SplitGenerator(
|
| 246 |
name=datasets.Split.TRAIN,
|
| 247 |
+
gen_kwargs={
|
| 248 |
+
"filepath": files,
|
| 249 |
+
"split": "train",
|
| 250 |
+
},
|
| 251 |
),
|
| 252 |
]
|
| 253 |
elif self.config.name == "station_test" or self.config.name == "event_test":
|
|
|
|
| 266 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 267 |
|
| 268 |
for file in filepath:
|
|
|
|
| 269 |
with fsspec.open(file, "rb") as fs:
|
| 270 |
with h5py.File(fs, "r") as fp:
|
| 271 |
+
# for event_id in sorted(list(fp.keys())):
|
| 272 |
event_ids = list(fp.keys())
|
| 273 |
for event_id in event_ids:
|
| 274 |
event = fp[event_id]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 275 |
station_ids = list(event.keys())
|
|
|
|
|
|
|
| 276 |
if (
|
| 277 |
(self.config.name == "station")
|
| 278 |
or (self.config.name == "station_train")
|
| 279 |
or (self.config.name == "station_test")
|
| 280 |
):
|
| 281 |
+
waveforms = np.zeros([3, self.nt], dtype="float32")
|
| 282 |
+
phase_pick = np.zeros_like(waveforms)
|
| 283 |
+
attrs = event.attrs
|
| 284 |
+
event_location = [
|
| 285 |
+
attrs["longitude"],
|
| 286 |
+
attrs["latitude"],
|
| 287 |
+
attrs["depth_km"],
|
| 288 |
+
attrs["event_time_index"],
|
| 289 |
+
]
|
| 290 |
+
|
| 291 |
+
for i, sta_id in enumerate(station_ids):
|
| 292 |
+
waveforms[:, : self.nt] = event[sta_id][:, :self.nt]
|
| 293 |
+
# waveforms[:, : self.nt] = event[sta_id][: self.nt, :].T
|
| 294 |
+
attrs = event[sta_id].attrs
|
| 295 |
+
p_picks = attrs["phase_index"][attrs["phase_type"] == "P"]
|
| 296 |
+
s_picks = attrs["phase_index"][attrs["phase_type"] == "S"]
|
| 297 |
+
phase_pick[:, :self.nt] = generate_label([p_picks, s_picks], nt=self.nt)
|
| 298 |
station_location = [attrs["longitude"], attrs["latitude"], -attrs["elevation_m"] / 1e3]
|
| 299 |
|
| 300 |
+
yield f"{event_id}/{sta_id}", {
|
| 301 |
+
"data": torch.from_numpy(waveforms).float(),
|
| 302 |
+
"phase_pick": torch.from_numpy(phase_pick).float(),
|
| 303 |
+
"event_location": torch.from_numpy(np.array(event_location)).float(),
|
| 304 |
+
"station_location": torch.from_numpy(np.array(station_location)).float(),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 305 |
}
|
| 306 |
|
| 307 |
+
|
| 308 |
elif (
|
| 309 |
(self.config.name == "event")
|
| 310 |
or (self.config.name == "event_train")
|
| 311 |
or (self.config.name == "event_test")
|
| 312 |
):
|
| 313 |
+
event_attrs = event.attrs
|
| 314 |
+
|
| 315 |
+
# avoid stations with P arrival equals S arrival
|
| 316 |
+
is_sick = False
|
| 317 |
+
for sta_id in station_ids:
|
| 318 |
+
attrs = event[sta_id].attrs
|
| 319 |
+
if not np.intersect1d(attrs["phase_index"][attrs["phase_type"] == "P"], attrs["phase_index"][attrs["phase_type"] == "S"]):
|
| 320 |
+
is_sick = True
|
| 321 |
+
break
|
| 322 |
+
if is_sick:
|
| 323 |
+
continue
|
| 324 |
+
|
| 325 |
+
waveforms = np.zeros([len(station_ids), 3, self.nt], dtype="float32")
|
| 326 |
+
phase_pick = np.zeros_like(waveforms)
|
| 327 |
+
event_center = np.zeros([len(station_ids), self.nt])
|
| 328 |
+
event_location = np.zeros([len(station_ids), 4, self.nt])
|
| 329 |
+
event_location_mask = np.zeros([len(station_ids), self.nt])
|
| 330 |
+
station_location = np.zeros([len(station_ids), 3])
|
| 331 |
+
|
| 332 |
+
for i, sta_id in enumerate(station_ids):
|
| 333 |
+
# trace_id = event_id + "/" + sta_id
|
| 334 |
+
waveforms[i, :, :] = event[sta_id][:, :self.nt]
|
| 335 |
+
attrs = event[sta_id].attrs
|
| 336 |
+
p_picks = attrs["phase_index"][attrs["phase_type"] == "P"]
|
| 337 |
+
s_picks = attrs["phase_index"][attrs["phase_type"] == "S"]
|
| 338 |
+
phase_pick[i, :, :] = generate_label([p_picks, s_picks], nt=self.nt)
|
| 339 |
+
|
| 340 |
+
## TODO: how to deal with multiple phases
|
| 341 |
+
# center = (attrs["phase_index"][::2] + attrs["phase_index"][1::2])/2.0
|
| 342 |
+
## assuming only one event with both P and S picks
|
| 343 |
+
c0 = ((p_picks) + (s_picks)) / 2.0 # phase center
|
| 344 |
+
c0_width = ((s_picks - p_picks) * self.sampling_rate / 200.0).max() if p_picks!=s_picks else 50
|
| 345 |
+
dx = round(
|
| 346 |
+
(event_attrs["longitude"] - attrs["longitude"])
|
| 347 |
+
* np.cos(np.radians(event_attrs["latitude"]))
|
| 348 |
+
* self.degree2km,
|
| 349 |
+
2,
|
| 350 |
+
)
|
| 351 |
+
dy = round(
|
| 352 |
+
(event_attrs["latitude"] - attrs["latitude"])
|
| 353 |
+
* self.degree2km,
|
| 354 |
+
2,
|
| 355 |
+
)
|
| 356 |
+
dz = round(
|
| 357 |
+
event_attrs["depth_km"] + attrs["elevation_m"] / 1e3,
|
| 358 |
+
2,
|
| 359 |
+
)
|
| 360 |
|
| 361 |
+
event_center[i, :] = generate_label(
|
| 362 |
+
[
|
| 363 |
+
# [c0 / self.feature_scale],
|
| 364 |
+
c0,
|
| 365 |
+
],
|
| 366 |
+
label_width=[
|
| 367 |
+
c0_width,
|
| 368 |
+
],
|
| 369 |
+
# label_width=[
|
| 370 |
+
# 10,
|
| 371 |
+
# ],
|
| 372 |
+
# nt=self.feature_nt,
|
| 373 |
+
nt=self.nt,
|
| 374 |
+
)[1, :]
|
| 375 |
+
mask = event_center[i, :] >= 0.5
|
| 376 |
+
event_location[i, 0, :] = (
|
| 377 |
+
np.arange(self.nt) - event_attrs["event_time_index"]
|
| 378 |
+
) / self.sampling_rate
|
| 379 |
+
# event_location[0, :, i] = (np.arange(self.feature_nt) - 3000 / self.feature_scale) / self.sampling_rate
|
| 380 |
+
# print(event_location[i, 1:, mask].shape, event_location.shape, event_location[i][1:, mask].shape)
|
| 381 |
+
event_location[i][1:, mask] = np.array([dx, dy, dz])[:, np.newaxis]
|
| 382 |
+
event_location_mask[i, :] = mask
|
| 383 |
+
|
| 384 |
+
## station location
|
| 385 |
+
station_location[i, 0] = round(
|
| 386 |
+
attrs["longitude"]
|
| 387 |
+
* np.cos(np.radians(attrs["latitude"]))
|
| 388 |
+
* self.degree2km,
|
| 389 |
+
2,
|
| 390 |
)
|
| 391 |
+
station_location[i, 1] = round(attrs["latitude"] * self.degree2km, 2)
|
| 392 |
+
station_location[i, 2] = round(-attrs["elevation_m"]/1e3, 2)
|
| 393 |
+
|
| 394 |
+
std = np.std(waveforms, axis=1, keepdims=True)
|
| 395 |
+
std[std == 0] = 1.0
|
| 396 |
+
waveforms = (waveforms - np.mean(waveforms, axis=1, keepdims=True)) / std
|
| 397 |
+
waveforms = waveforms.astype(np.float32)
|
| 398 |
+
|
| 399 |
yield event_id, {
|
| 400 |
+
"data": torch.from_numpy(waveforms).float(),
|
| 401 |
+
"phase_pick": torch.from_numpy(phase_pick).float(),
|
| 402 |
+
"event_center": torch.from_numpy(event_center[:, ::self.feature_scale]).float(),
|
| 403 |
+
"event_location": torch.from_numpy(event_location[:, :, ::self.feature_scale]).float(),
|
| 404 |
+
"event_location_mask": torch.from_numpy(event_location_mask[:, ::self.feature_scale]).float(),
|
| 405 |
+
"station_location": torch.from_numpy(station_location).float(),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 406 |
}
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
def generate_label(phase_list, label_width=[150, 150], nt=8192):
|
| 410 |
+
target = np.zeros([len(phase_list) + 1, nt], dtype=np.float32)
|
| 411 |
+
|
| 412 |
+
for i, (picks, w) in enumerate(zip(phase_list, label_width)):
|
| 413 |
+
for phase_time in picks:
|
| 414 |
+
t = np.arange(nt) - phase_time
|
| 415 |
+
gaussian = np.exp(-(t**2) / (2 * (w / 6) ** 2))
|
| 416 |
+
gaussian[gaussian < 0.1] = 0.0
|
| 417 |
+
target[i + 1, :] += gaussian
|
| 418 |
+
|
| 419 |
+
target[0:1, :] = np.maximum(0, 1 - np.sum(target[1:, :], axis=0, keepdims=True))
|
| 420 |
+
|
| 421 |
+
return target
|
waveform.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:77fb8b0bb040e1412a183a217dcbc1aa03ceb86b42db39ac62afe922a1673889
|
| 3 |
-
size 20016390
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/1987.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:8afb94aafbf79db2848ae9c2006385c782493a97e6c71c1b8abf97c5d53bfc9d
|
| 3 |
-
size 7744528
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/1988.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:c1398baca3f539e52744f83625b1dbb6f117a32b8d7e97f6af02a1f452f0dedd
|
| 3 |
-
size 46126800
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/1989.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:533cd50fe365de8c050f0ffd4a90b697dc6b90cb86c8199ec0172316eab2ddaa
|
| 3 |
-
size 48255208
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/1990.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:f5a282a9a8c47cf65d144368085470940660faeb0e77cea59fff16af68020d26
|
| 3 |
-
size 60092656
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/1991.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:5ba897d96eb92e8684b52a206e94a500abfe0192930f971ce7b1319c0638d452
|
| 3 |
-
size 62332336
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/1992.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:d00021f46956bf43192f8c59405e203f823f1f4202c720efa52c5029e8e880b8
|
| 3 |
-
size 67360896
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/1993.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:eec41dd0aa7b88c81fa9f9b5dbcaab80e1c7bc8f6c144bd81761941278c57b4f
|
| 3 |
-
size 706087936
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/1994.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:b1cd002f20573636eaf101a30c5bac477edda201aba3af68be358756543ed48a
|
| 3 |
-
size 609524864
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/1995.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:948f19d71520a0dd25574be300f70e62c383e319b07a7d7182fca1dcfa9d61ee
|
| 3 |
-
size 1728452872
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/1996.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:23654b6f9c3a4c5a0aa56ed13ba04e943a94b458a51ac80ec1d418e9aa132840
|
| 3 |
-
size 1752242680
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/1997.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:d1c0f4c8146fc8ff27c8a47a942b967a97bd2835346203e6de74ca55dd522616
|
| 3 |
-
size 2661543208
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/1998.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:1afac9c1a33424b739d26261ac2e9a4520be9c86c57bae4c8fe1a7a422356e45
|
| 3 |
-
size 2070489120
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/1999.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:2f2595a1919a5435148cdcf2cfa1501ce5edb53878d471500b13936f0f6f558c
|
| 3 |
-
size 2300297608
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2000.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:250fd52d9f8dd17a8bfb58a3ecfef25d62b0a1adf67f6fe6f2b446e9f72caf7a
|
| 3 |
-
size 434865160
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2001.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:d70dea6156b32057760f91742f7a05a336e4f63b1f793408b5e7aad6a15551e5
|
| 3 |
-
size 919203704
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2002.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:f88c4c5960741a8d354db4a7324d56ef8750ab93aa1d9b11fc80d0c497d8d6ae
|
| 3 |
-
size 2445812792
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2003.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:943d649f1a8a0e3989d2458be68fbf041058a581c4c73f8de39f1d50d3e7b35c
|
| 3 |
-
size 3618485352
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2004.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:ed1ba66e10ba5c165568ac13950a1728927ba49b33903a0df42c3d9965a16807
|
| 3 |
-
size 6158740712
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2005.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:c816d75b172148763b19e60c1469c106c1af1f906843c3d6d94e603e02c2b6cb
|
| 3 |
-
size 2994468240
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2006.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:521e6b0ce262461f87b4b0a78ac6403cfbb597d6ace36e17f92354c456a30447
|
| 3 |
-
size 2189511664
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2007.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:ae6654c213fb4838d6a732b2c8d936bd799005b2a189d64f2d74e3767c0c503a
|
| 3 |
-
size 4393926088
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2008.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:d8163aee689448c260032df9b0ab9132a5b46f0fee88a4c1ca8f4492ec5534d6
|
| 3 |
-
size 3964283536
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2009.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:6702c2d3951ddf1034f1886a79e8c5a00dfa47c88c84048edc528f047a2337b5
|
| 3 |
-
size 4162296168
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2010.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:2f2de7c07f088a32ea7ae71c2107dfd121780a47d3e3f23e5c98ddb482c6ce71
|
| 3 |
-
size 4547184704
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2011.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:520d62f3a94f1b4889f583196676fe2eccb6452807461afc93432dca930d6052
|
| 3 |
-
size 5633641952
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2012.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:98b90529df4cbff7f21cd233d482454eaeac77b81117720ca7fe6c2697819071
|
| 3 |
-
size 9520058832
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2013.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:e6f1030ff4ebe488ef9072ec984c91024a8be4ecdbe7e9af47c6e65de942c2fe
|
| 3 |
-
size 8380878704
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2014.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:a63f5e6d7d5bca552dcc99053753603dfa3109a6a080f8402f843ef688927d4c
|
| 3 |
-
size 12088815344
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2015.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:42be6994ad27eb8aee241f5edfb4ed0ee69aa3460397325cc858224ba9dd9721
|
| 3 |
-
size 8536767520
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2016.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:6e706aefd38170da41196974fc92e457d0dc56948a63640a37cea4a86a297843
|
| 3 |
-
size 9287201016
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2017.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:e20f8e5a3f5ec8927e5d44e722987461ef08c9ceb33ab982038528e9000d5323
|
| 3 |
-
size 8627205152
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2018.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:ad6e83734ff1e24ad91b17cb6656766861ae9fb30413948579d762acc092e66a
|
| 3 |
-
size 7158598240
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2019.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:6bda0b414a7a7726aebf89a51d3629ae350ffc4da797c548172a74dfbb723b05
|
| 3 |
-
size 8614182952
|
|
|
|
|
|
|
|
|
|
|
|
waveform_h5/2020.h5
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:1c064183f40d8081b3ab835871a69a58ed4584bb0bfe950cca8cacf77a312b8e
|
| 3 |
-
size 9519933120
|
|
|
|
|
|
|
|
|
|
|
|