sebdg commited on
Commit
9f82e6a
·
verified ·
1 Parent(s): e36043b

Update trading_data.py

Browse files
Files changed (1) hide show
  1. trading_data.py +77 -0
trading_data.py CHANGED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import DatasetBuilder, DownloadManager, DatasetInfo
2
+ import datasets
3
+ import os
4
+ import pandas as pd
5
+
6
+ class TradingDataset(DatasetBuilder):
7
+ # Replace 'your_dataset_name' with an actual name for your dataset
8
+ BUILDER_CONFIGS = [
9
+ datasets.BuilderConfig(name="all", version=datasets.Version("1.0.0")),
10
+ datasets.BuilderConfig(name="stocks", version=datasets.Version("1.0.0")),
11
+ datasets.BuilderConfig(name="etfs", version=datasets.Version("1.0.0"))
12
+ ]
13
+
14
+ def _info(self):
15
+ return datasets.DatasetInfo(
16
+ # This is the description that will appear on the datasets page.
17
+ description="This is my custom dataset.",
18
+
19
+ # datasets.features.FeatureConnectors
20
+ features=datasets.Features({
21
+ "File": datasets.Value("string"),
22
+ "Date": datasets.Value("datetime"),
23
+ "Open": datasets.Value("float64"),
24
+ "High": datasets.Value("float64"),
25
+ "Low": datasets.Value("float64"),
26
+ "Close": datasets.Value("float64"),
27
+ "Adj Close": datasets.Value("float64"),
28
+ "Volume": datasets.Value("float64"),
29
+ }),
30
+ # If there's a common (input, target) tuple from the features,
31
+ # specify them here. They'll get used if as_supervised=True in
32
+ # builder.as_dataset.
33
+ supervised_keys=None,
34
+ # Homepage of the dataset for documentation
35
+ homepage="https://huggingface.co/datasets/sebdg/trading_data/",
36
+ citation="Your Citation Here",
37
+ )
38
+
39
+ def _split_generators(self, dl_manager: DownloadManager):
40
+ """Returns SplitGenerators."""
41
+ print('Split generators')
42
+ # If your dataset is hosted online, use the DownloadManager to download and extract it
43
+ # For local data, you can skip the DownloadManager and use the local paths directly
44
+
45
+ # For example, if your dataset is online:
46
+ # downloaded_files = dl_manager.download_and_extract("Your dataset URL")
47
+ # For local files, directly point to the file paths
48
+ urls_to_download = {"data_file": "path/to/your/local/file.csv"}
49
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
50
+
51
+ return [
52
+ datasets.SplitGenerator(
53
+ name=datasets.Split.TRAIN,
54
+ # These kwargs will be passed to _generate_examples
55
+ gen_kwargs={
56
+ "filepath": downloaded_files["data_file"],
57
+ "split": "train",
58
+ },
59
+ ),
60
+ ]
61
+
62
+ def _generate_examples(self, filepath, split):
63
+ """Yields examples."""
64
+ # Load the CSV file
65
+ print('Yielding examples')
66
+ data = pd.read_csv(filepath)
67
+ for id, row in data.iterrows():
68
+ yield id, {
69
+ "File": row["File"], # Adjust field names based on your CSV
70
+ "Date": row["Date"],
71
+ "Open": row["Open"],
72
+ "High": row["High"],
73
+ "Low": row["Low"],
74
+ "Close": row["Close"],
75
+ "Adj Close": row["Adj Close"],
76
+ "Volume": row["Volume"],
77
+ }