flzta commited on
Commit
17a9d42
·
verified ·
1 Parent(s): afb63c7

Update sync_data.sh

Browse files
Files changed (1) hide show
  1. sync_data.sh +47 -7
sync_data.sh CHANGED
@@ -21,27 +21,38 @@ upload_backup() {
21
  token="$HF_TOKEN"
22
  repo_id="$DATASET_ID"
23
 
 
 
24
  python3 -c "
25
  from huggingface_hub import HfApi
26
  import sys
27
  import os
28
 
 
 
 
29
  def manage_backups(api, repo_id, max_files=50):
 
30
  files = api.list_repo_files(repo_id=repo_id, repo_type='dataset')
31
  backup_files = [f for f in files if f.startswith('$BACKUP_PREFIX') and f.endswith('.tar.gz')]
32
  backup_files.sort()
33
 
34
  if len(backup_files) >= max_files:
 
35
  files_to_delete = backup_files[:(len(backup_files) - max_files + 1)]
36
  for file_to_delete in files_to_delete:
37
  try:
 
38
  api.delete_file(path_in_repo=file_to_delete, repo_id=repo_id, repo_type='dataset')
39
- print(f'Deleted old backup: {file_to_delete}')
40
  except Exception as e:
41
  print(f'Error deleting {file_to_delete}: {str(e)}')
 
 
42
 
43
  api = HfApi(token='$token')
44
  try:
 
45
  api.upload_file(
46
  path_or_fileobj='$file_path',
47
  path_in_repo='$file_name',
@@ -61,22 +72,30 @@ download_latest_backup() {
61
  token="$HF_TOKEN"
62
  repo_id="$DATASET_ID"
63
 
 
 
64
  python3 -c "
65
  from huggingface_hub import HfApi
66
  import sys
67
  import os
68
  import tarfile
69
  import tempfile
 
 
 
 
70
  api = HfApi(token='$token')
71
  try:
 
72
  files = api.list_repo_files(repo_id='$repo_id', repo_type='dataset')
73
  backup_files = [f for f in files if f.startswith('$BACKUP_PREFIX') and f.endswith('.tar.gz')]
74
 
75
  if not backup_files:
76
- print('No backup files found')
77
  sys.exit()
78
 
79
  latest_backup = sorted(backup_files)[-1]
 
80
 
81
  with tempfile.TemporaryDirectory() as temp_dir:
82
  filepath = api.hf_hub_download(
@@ -87,16 +106,28 @@ try:
87
  )
88
 
89
  if filepath and os.path.exists(filepath):
 
 
 
 
90
  # 删除现有的 Cloudreve 目录和配置文件
91
  cd \"$CLOUDREVE_DIR\"
 
92
  rm -rf cloudreve
93
  rm -rf cloudreve.db
94
  rm -rf config.ini
 
95
 
 
96
  with tarfile.open(filepath, 'r:gz') as tar:
97
  tar.extractall(\"$CLOUDREVE_DIR\")
98
  echo f'Successfully restored backup from {latest_backup}'
99
 
 
 
 
 
 
100
  except Exception as e:
101
  print(f'Error downloading backup: {str(e)}')
102
  "
@@ -112,18 +143,26 @@ sync_data() {
112
  echo "Starting sync process at $(date)"
113
 
114
  if [ -d "$CLOUDREVE_DIR" ]; then
 
 
 
115
  timestamp=$(date +%Y%m%d_%H%M%S)
116
  backup_file="${BACKUP_PREFIX}_${timestamp}.tar.gz"
 
 
 
 
 
117
 
118
- # 压缩数据目录和配置文件
119
- tar -czf "/tmp/${backup_file}" -C "$CLOUDREVE_DIR" cloudreve cloudreve.db config.ini
120
 
121
  echo "Uploading backup to HuggingFace..."
122
- upload_backup "/tmp/${backup_file}" "${backup_file}"
123
 
124
- rm -f "/tmp/${backup_file}"
125
  else
126
- echo "Data directory does not exist yet, waiting for next sync..."
127
  fi
128
 
129
  SYNC_INTERVAL=${SYNC_INTERVAL:-7200}
@@ -136,4 +175,5 @@ sync_data() {
136
  sync_data &
137
 
138
  # 启动 Halo (这里需��启动 Cloudreve)
 
139
  exec /opt/cloudreve/cloudreve -c /opt/cloudreve/config.ini
 
21
  token="$HF_TOKEN"
22
  repo_id="$DATASET_ID"
23
 
24
+ echo "Preparing to upload backup file: $file_path as $file_name to Dataset: $repo_id"
25
+
26
  python3 -c "
27
  from huggingface_hub import HfApi
28
  import sys
29
  import os
30
 
31
+ print(f'HF_TOKEN is set: {os.environ.get(\"HF_TOKEN\") is not None}')
32
+ print(f'DATASET_ID is set: {os.environ.get(\"DATASET_ID\") is not None}')
33
+
34
  def manage_backups(api, repo_id, max_files=50):
35
+ print('Managing old backups...')
36
  files = api.list_repo_files(repo_id=repo_id, repo_type='dataset')
37
  backup_files = [f for f in files if f.startswith('$BACKUP_PREFIX') and f.endswith('.tar.gz')]
38
  backup_files.sort()
39
 
40
  if len(backup_files) >= max_files:
41
+ print(f'Found {len(backup_files)} backup files, maximum allowed is {max_files}.')
42
  files_to_delete = backup_files[:(len(backup_files) - max_files + 1)]
43
  for file_to_delete in files_to_delete:
44
  try:
45
+ print(f'Deleting old backup: {file_to_delete}')
46
  api.delete_file(path_in_repo=file_to_delete, repo_id=repo_id, repo_type='dataset')
47
+ print(f'Successfully deleted: {file_to_delete}')
48
  except Exception as e:
49
  print(f'Error deleting {file_to_delete}: {str(e)}')
50
+ else:
51
+ print('Number of backup files is within the limit.')
52
 
53
  api = HfApi(token='$token')
54
  try:
55
+ print(f'Uploading file: $file_path to {repo_id} as $file_name')
56
  api.upload_file(
57
  path_or_fileobj='$file_path',
58
  path_in_repo='$file_name',
 
72
  token="$HF_TOKEN"
73
  repo_id="$DATASET_ID"
74
 
75
+ echo "Preparing to download the latest backup from Dataset: $repo_id"
76
+
77
  python3 -c "
78
  from huggingface_hub import HfApi
79
  import sys
80
  import os
81
  import tarfile
82
  import tempfile
83
+
84
+ print(f'HF_TOKEN is set: {os.environ.get(\"HF_TOKEN\") is not None}')
85
+ print(f'DATASET_ID is set: {os.environ.get(\"DATASET_ID\") is not None}')
86
+
87
  api = HfApi(token='$token')
88
  try:
89
+ print(f'Listing files in Dataset: {repo_id}')
90
  files = api.list_repo_files(repo_id='$repo_id', repo_type='dataset')
91
  backup_files = [f for f in files if f.startswith('$BACKUP_PREFIX') and f.endswith('.tar.gz')]
92
 
93
  if not backup_files:
94
+ print('No backup files found in the Dataset.')
95
  sys.exit()
96
 
97
  latest_backup = sorted(backup_files)[-1]
98
+ print(f'Latest backup file found: {latest_backup}')
99
 
100
  with tempfile.TemporaryDirectory() as temp_dir:
101
  filepath = api.hf_hub_download(
 
106
  )
107
 
108
  if filepath and os.path.exists(filepath):
109
+ print(f'Successfully downloaded backup to temporary directory: {filepath}')
110
+ echo \"Before restoring backup:\"
111
+ ls -l \"$CLOUDREVE_DIR\"
112
+
113
  # 删除现有的 Cloudreve 目录和配置文件
114
  cd \"$CLOUDREVE_DIR\"
115
+ echo \"Deleting existing Cloudreve files...\"
116
  rm -rf cloudreve
117
  rm -rf cloudreve.db
118
  rm -rf config.ini
119
+ echo \"Deletion complete.\"
120
 
121
+ echo \"Extracting backup archive: $filepath to $CLOUDREVE_DIR\"
122
  with tarfile.open(filepath, 'r:gz') as tar:
123
  tar.extractall(\"$CLOUDREVE_DIR\")
124
  echo f'Successfully restored backup from {latest_backup}'
125
 
126
+ echo \"After restoring backup:\"
127
+ ls -l \"$CLOUDREVE_DIR\"
128
+ else:
129
+ print('Error during file download.')
130
+
131
  except Exception as e:
132
  print(f'Error downloading backup: {str(e)}')
133
  "
 
143
  echo "Starting sync process at $(date)"
144
 
145
  if [ -d "$CLOUDREVE_DIR" ]; then
146
+ echo "Before compression:"
147
+ ls -l \"$CLOUDREVE_DIR\"
148
+
149
  timestamp=$(date +%Y%m%d_%H%M%S)
150
  backup_file="${BACKUP_PREFIX}_${timestamp}.tar.gz"
151
+ backup_path="/tmp/${backup_file}"
152
+
153
+ echo "Compressing Cloudreve directory (including database and config) to: $backup_path"
154
+ tar -czf "$backup_path" -C "$CLOUDREVE_DIR" cloudreve cloudreve.db config.ini
155
+ echo "Compression complete."
156
 
157
+ echo "After compression:"
158
+ ls -l "$backup_path"
159
 
160
  echo "Uploading backup to HuggingFace..."
161
+ upload_backup "$backup_path" "${backup_file}"
162
 
163
+ rm -f "$backup_path"
164
  else
165
+ echo "Cloudreve directory does not exist yet, waiting for next sync..."
166
  fi
167
 
168
  SYNC_INTERVAL=${SYNC_INTERVAL:-7200}
 
175
  sync_data &
176
 
177
  # 启动 Halo (这里需��启动 Cloudreve)
178
+ echo "Starting Cloudreve..."
179
  exec /opt/cloudreve/cloudreve -c /opt/cloudreve/config.ini