amylonidis commited on
Commit
8cf350d
·
verified ·
1 Parent(s): 9691a02

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +38 -34
README.md CHANGED
@@ -74,8 +74,10 @@ that were published from March until April in 1985.
74
 
75
  ```python
76
 
 
77
  import pandas as pd
78
  from datetime import datetime
 
79
 
80
  def load_csvs_from_huggingface(start_date, end_date):
81
  """
@@ -86,30 +88,30 @@ def load_csvs_from_huggingface(start_date, end_date):
86
 
87
  :return: pd.DataFrame, combined data from selected CSVs
88
  """
 
89
  huggingface_dataset_name = "amylonidis/PatClass2011"
90
 
91
  column_types = {
92
- "ucid": "string",
93
- "country": "category",
94
- "doc_number": "int64",
95
- "kind": "category",
96
- "lang": "category",
97
- "date": "int32",
98
- "application_date": "int32",
99
- "date_produced": "int32",
100
- "status": "category",
101
- "main_code": "string",
102
- "further_codes": "string",
103
- "ipcr_codes": "string",
104
- "ecla_codes": "string",
105
- "title": "string",
106
- "abstract": "string",
107
- "description": "string",
108
- "claims": "string",
109
- "applicants": "string",
110
- "inventors": "string",
111
- }
112
-
113
 
114
  dataset_years = ['1978', '1979', '1980', '1981', '1982', '1983', '1984', '1985', '1986',
115
  '1987', '1988', '1989', '1990', '1991', '1992', '1993', '1994', '1995',
@@ -123,27 +125,29 @@ def load_csvs_from_huggingface(start_date, end_date):
123
  matching_years = [f for f in dataset_years for year in given_years if f==year]
124
 
125
  if not matching_years:
126
- raise ValueError(f"No matching CSV files found in dataset for the given dates")
127
 
128
  df_list = []
129
  for year in matching_years:
130
  filepath = f"data/years/{year}/clefip2011_en_classification_{year}_validated.csv"
131
 
132
  try:
133
- dataset = load_dataset(huggingface_dataset_name, data_files=filepath)
134
- df = dataset["train"].to_pandas().astype(column_types)
135
- df_list.append(df)
136
- except Exception as e:
137
- print(f"Error loading {filepath}: {e}")
138
 
139
 
140
- if df_list:
141
- df = pd.concat(df_list, ignore_index=True)
142
- df["date"] = df["date"].astype(float).astype(int)
143
- df_filtered = df[(df["date"] >= start_date_int) & (df["date"] <= end_date_int)]
144
- return df_filtered
145
- else:
146
- return pd.DataFrame()
 
 
 
147
 
148
 
149
  ```
 
74
 
75
  ```python
76
 
77
+ from datasets import load_dataset
78
  import pandas as pd
79
  from datetime import datetime
80
+ import gc
81
 
82
  def load_csvs_from_huggingface(start_date, end_date):
83
  """
 
88
 
89
  :return: pd.DataFrame, combined data from selected CSVs
90
  """
91
+
92
  huggingface_dataset_name = "amylonidis/PatClass2011"
93
 
94
  column_types = {
95
+ "ucid": "string",
96
+ "country": "category",
97
+ "doc_number": "int64",
98
+ "kind": "category",
99
+ "lang": "category",
100
+ "date": "int32",
101
+ "application_date": "int32",
102
+ "date_produced": "int32",
103
+ "status": "category",
104
+ "main_code": "string",
105
+ "further_codes": "string",
106
+ "ipcr_codes": "string",
107
+ "ecla_codes": "string",
108
+ "title": "string",
109
+ "abstract": "string",
110
+ "description": "string",
111
+ "claims": "string",
112
+ "applicants": "string",
113
+ "inventors": "string",
114
+ }
 
115
 
116
  dataset_years = ['1978', '1979', '1980', '1981', '1982', '1983', '1984', '1985', '1986',
117
  '1987', '1988', '1989', '1990', '1991', '1992', '1993', '1994', '1995',
 
125
  matching_years = [f for f in dataset_years for year in given_years if f==year]
126
 
127
  if not matching_years:
128
+ raise ValueError(f"No matching CSV files found for {start_date} to {end_date}")
129
 
130
  df_list = []
131
  for year in matching_years:
132
  filepath = f"data/years/{year}/clefip2011_en_classification_{year}_validated.csv"
133
 
134
  try:
135
+ dataset = load_dataset(huggingface_dataset_name, data_files=filepath, split="train")
136
+ df = dataset.to_pandas().astype(column_types)
137
+ mask = (df["date"] >= start_date_int) & (df["date"] <= end_date_int)
138
+ df_filtered = df[mask].copy()
 
139
 
140
 
141
+ if not df_filtered.empty:
142
+ df_list.append(df_filtered)
143
+
144
+ del df, dataset, df_filtered, mask
145
+ gc.collect()
146
+
147
+ except Exception as e:
148
+ print(f"Error processing {filepath}: {e}")
149
+
150
+ return pd.concat(df_list, ignore_index=True) if df_list else pd.DataFrame()
151
 
152
 
153
  ```