add arxiv papers export to hub stats job script
Browse files- hub-stats.py +41 -3
hub-stats.py
CHANGED
|
@@ -100,6 +100,11 @@ ENDPOINT_CONFIGS = {
|
|
| 100 |
"params": {},
|
| 101 |
"base_url": "https://huggingface.co/api/daily_papers",
|
| 102 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
}
|
| 104 |
|
| 105 |
|
|
@@ -121,6 +126,13 @@ def to_json_string(x):
|
|
| 121 |
)
|
| 122 |
|
| 123 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
def process_dataframe(df, endpoint):
|
| 125 |
if len(df) == 0:
|
| 126 |
return df
|
|
@@ -151,6 +163,13 @@ def process_dataframe(df, endpoint):
|
|
| 151 |
None
|
| 152 |
)
|
| 153 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
else:
|
| 155 |
for field in ["createdAt", "lastModified"]:
|
| 156 |
if field in df.columns:
|
|
@@ -165,6 +184,9 @@ def process_dataframe(df, endpoint):
|
|
| 165 |
if col in df.columns:
|
| 166 |
df[col] = df[col].apply(to_json_string)
|
| 167 |
|
|
|
|
|
|
|
|
|
|
| 168 |
return df
|
| 169 |
|
| 170 |
|
|
@@ -229,7 +251,14 @@ def jsonl_to_parquet(endpoint, jsonl_file, output_file):
|
|
| 229 |
|
| 230 |
async def create_parquet_files(skip_upload=False):
|
| 231 |
start_time = time.time()
|
| 232 |
-
endpoints = [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 233 |
created_files = []
|
| 234 |
jsonl_files = {}
|
| 235 |
|
|
@@ -265,7 +294,9 @@ async def create_parquet_files(skip_upload=False):
|
|
| 265 |
if endpoint == "posts":
|
| 266 |
total_items = data.get("numTotalItems", 0)
|
| 267 |
items_on_page = len(data.get("socialPosts", []))
|
| 268 |
-
if (page + 1) * params[
|
|
|
|
|
|
|
| 269 |
url = None
|
| 270 |
else:
|
| 271 |
url = base_url
|
|
@@ -307,7 +338,14 @@ async def create_parquet_files(skip_upload=False):
|
|
| 307 |
|
| 308 |
|
| 309 |
def recreate_from_jsonl():
|
| 310 |
-
endpoints = [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 311 |
|
| 312 |
for endpoint in endpoints:
|
| 313 |
jsonl_file = os.path.join(CACHE_DIR, f"{endpoint}_raw.jsonl")
|
|
|
|
| 100 |
"params": {},
|
| 101 |
"base_url": "https://huggingface.co/api/daily_papers",
|
| 102 |
},
|
| 103 |
+
"arxiv_papers": {
|
| 104 |
+
"limit": 100,
|
| 105 |
+
"params": {},
|
| 106 |
+
"base_url": "https://huggingface.co/api/papers",
|
| 107 |
+
},
|
| 108 |
}
|
| 109 |
|
| 110 |
|
|
|
|
| 126 |
)
|
| 127 |
|
| 128 |
|
| 129 |
+
def stringify_nested_columns(df):
|
| 130 |
+
for col in df.columns:
|
| 131 |
+
if df[col].map(lambda value: isinstance(value, (dict, list))).any():
|
| 132 |
+
df[col] = df[col].apply(to_json_string)
|
| 133 |
+
return df
|
| 134 |
+
|
| 135 |
+
|
| 136 |
def process_dataframe(df, endpoint):
|
| 137 |
if len(df) == 0:
|
| 138 |
return df
|
|
|
|
| 163 |
None
|
| 164 |
)
|
| 165 |
|
| 166 |
+
elif endpoint == "arxiv_papers":
|
| 167 |
+
for ts_col in ["publishedAt", "submittedOnDailyAt"]:
|
| 168 |
+
if ts_col in df.columns:
|
| 169 |
+
df[ts_col] = pd.to_datetime(df[ts_col], errors="coerce").dt.tz_localize(
|
| 170 |
+
None
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
else:
|
| 174 |
for field in ["createdAt", "lastModified"]:
|
| 175 |
if field in df.columns:
|
|
|
|
| 184 |
if col in df.columns:
|
| 185 |
df[col] = df[col].apply(to_json_string)
|
| 186 |
|
| 187 |
+
if endpoint == "arxiv_papers":
|
| 188 |
+
df = stringify_nested_columns(df)
|
| 189 |
+
|
| 190 |
return df
|
| 191 |
|
| 192 |
|
|
|
|
| 251 |
|
| 252 |
async def create_parquet_files(skip_upload=False):
|
| 253 |
start_time = time.time()
|
| 254 |
+
endpoints = [
|
| 255 |
+
"daily_papers",
|
| 256 |
+
"arxiv_papers",
|
| 257 |
+
"models",
|
| 258 |
+
"spaces",
|
| 259 |
+
"datasets",
|
| 260 |
+
"posts",
|
| 261 |
+
]
|
| 262 |
created_files = []
|
| 263 |
jsonl_files = {}
|
| 264 |
|
|
|
|
| 294 |
if endpoint == "posts":
|
| 295 |
total_items = data.get("numTotalItems", 0)
|
| 296 |
items_on_page = len(data.get("socialPosts", []))
|
| 297 |
+
if (page + 1) * params[
|
| 298 |
+
"limit"
|
| 299 |
+
] >= total_items or items_on_page == 0:
|
| 300 |
url = None
|
| 301 |
else:
|
| 302 |
url = base_url
|
|
|
|
| 338 |
|
| 339 |
|
| 340 |
def recreate_from_jsonl():
|
| 341 |
+
endpoints = [
|
| 342 |
+
"daily_papers",
|
| 343 |
+
"arxiv_papers",
|
| 344 |
+
"models",
|
| 345 |
+
"spaces",
|
| 346 |
+
"datasets",
|
| 347 |
+
"posts",
|
| 348 |
+
]
|
| 349 |
|
| 350 |
for endpoint in endpoints:
|
| 351 |
jsonl_file = os.path.join(CACHE_DIR, f"{endpoint}_raw.jsonl")
|