Upload sa_download.py with huggingface_hub
Browse files- sa_download.py +57 -0
sa_download.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import wget
|
| 3 |
+
import time
|
| 4 |
+
import glob
|
| 5 |
+
|
| 6 |
+
START_PAGE_NO = 75
|
| 7 |
+
END_PAGE_NO = 78
|
| 8 |
+
PAGE_SIZE = 10
|
| 9 |
+
HEADERS = {
|
| 10 |
+
"content-type": "application/json",
|
| 11 |
+
"cookie": "odl_anonymous=d846642470f1536bc9dbbd02a845eb63; _ga=GA1.1.1958263203.1682393358; _bl_uid=kLl4XgLIvIRpewksafvhe26nX3UX; opendatalab_session=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJVc2VySUQiOjgxNjksIk5hbWUiOm51bGwsIkVtYWlsIjoiemhpeWFuZy5jaGVuQG5scHIuaWEuYWMuY24iLCJQaG9uZSI6IiIsIkdpdEh1YkFjY291bnQiOiIiLCJXZUNoYXRBY2NvdW50IjpudWxsLCJaaGlodUFjY291bnQiOm51bGwsIk9yZ2FuaXphdGlvbiI6bnVsbCwiRXhwaXJ5IjoiMjAyMy0wNy0yNFQxMTozNzowMS40OTAwNzM0NTUrMDg6MDAiLCJSb2xlIjoiIiwiSXNJbnRlcm5hbCI6ZmFsc2UsIlNzb1VpZCI6IjE1NjA4NyJ9.ey79fb4VJyylYlpRkSP4oIaorgSw26nVmGn3L6PuYcY; _ga_FXBQ38GZVP=GS1.1.1682411847.2.1.1682411860.0.0.0",
|
| 12 |
+
#"eagleeye-pappname": "<your_pappname_here>",
|
| 13 |
+
#"eagleeye-sessionid": "<your_sessionid_here>",
|
| 14 |
+
#"eagleeye-traceid": "<your_traceid_here>",
|
| 15 |
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.0.0"
|
| 16 |
+
}
|
| 17 |
+
EXCLUDE_FILENAME = set(glob.glob("sa_00*.tar"))
|
| 18 |
+
|
| 19 |
+
def download_files(dataset_id):
|
| 20 |
+
global EXCLUDE_FILENAME
|
| 21 |
+
|
| 22 |
+
# GET files with pageSize and pageNo to obtain a list of path and size
|
| 23 |
+
files = []
|
| 24 |
+
for page_no in range(START_PAGE_NO, END_PAGE_NO + 1):
|
| 25 |
+
files_url = f"https://opendatalab.com/api/datasets/{dataset_id}/files"
|
| 26 |
+
params = {
|
| 27 |
+
"pageSize": PAGE_SIZE,
|
| 28 |
+
"pageNo": page_no,
|
| 29 |
+
"prefix": "raw",
|
| 30 |
+
}
|
| 31 |
+
response = requests.get(files_url, params=params, headers=HEADERS)
|
| 32 |
+
response.raise_for_status()
|
| 33 |
+
files += response.json()["data"]["list"]
|
| 34 |
+
|
| 35 |
+
# For each file, POST size and name in payload to obtain the download url, and download it
|
| 36 |
+
for file in files:
|
| 37 |
+
filename = 'raw/' + file["path"]
|
| 38 |
+
if file["path"] in EXCLUDE_FILENAME:
|
| 39 |
+
print(f"{file['path']} already exists, skip ...")
|
| 40 |
+
continue
|
| 41 |
+
print(f"Downloading {filename}...")
|
| 42 |
+
EXCLUDE_FILENAME.add(filename)
|
| 43 |
+
payload = {
|
| 44 |
+
"size": file["size"],
|
| 45 |
+
"name": filename
|
| 46 |
+
}
|
| 47 |
+
track_url = f"https://opendatalab.com/api/track/datasets/download/{dataset_id}"
|
| 48 |
+
response = requests.post(track_url, json=[payload], headers=HEADERS)
|
| 49 |
+
response.raise_for_status()
|
| 50 |
+
download_url = response.json()["data"][0]["url"]
|
| 51 |
+
start_time = time.time()
|
| 52 |
+
print("URL:", download_url)
|
| 53 |
+
wget.download(download_url)
|
| 54 |
+
end_time = time.time()
|
| 55 |
+
print(f"Time taken to download {filename}: {end_time - start_time:.2f} seconds\n")
|
| 56 |
+
|
| 57 |
+
download_files(6248)
|