diff --git "a/app.py" "b/app.py"
--- "a/app.py"
+++ "b/app.py"
@@ -1,4 +1,4 @@
-# --- START OF FILE app (24).py ---
+
import os
import hmac
@@ -10,35 +10,37 @@ from flask_caching import Cache
import logging
import threading
import time
-import shutil
from datetime import datetime
from huggingface_hub import HfApi, hf_hub_download, utils as hf_utils
from werkzeug.utils import secure_filename
import requests
from io import BytesIO
import uuid
-from typing import Union, Optional
+from typing import Union, Optional, Tuple, Any, Dict, List # Enhanced typing
+# --- Configuration ---
app = Flask(__name__)
-app.secret_key = os.getenv("FLASK_SECRET_KEY", "supersecretkey_mini_app_unique")
-BOT_TOKEN = os.getenv('TELEGRAM_BOT_TOKEN', '6750208873:AAE2hvPlJ99dBdhGa_Brre0IIpUdOvXxHt4')
+app.secret_key = os.getenv("FLASK_SECRET_KEY", "supersecretkey_mini_app_unique_v2")
+BOT_TOKEN = os.getenv('TELEGRAM_BOT_TOKEN', '6750208873:AAE2hvPlJ99dBdhGa_Brre0IIpUdOvXxHt4') # MUST be set
DATA_FILE = 'cloudeng_mini_app_data.json'
DATA_FILE_TMP = DATA_FILE + '.tmp'
-DATA_FILE_BAK = DATA_FILE + '.bak'
+DATA_FILE_DOWNLOAD_TMP = DATA_FILE + '.download'
+DATA_FILE_CORRUPT = DATA_FILE + '.corrupt'
REPO_ID = "Eluza133/Z1e1u"
HF_TOKEN_WRITE = os.getenv("HF_TOKEN")
HF_TOKEN_READ = os.getenv("HF_TOKEN_READ") or HF_TOKEN_WRITE
UPLOAD_FOLDER = 'uploads_mini_app'
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
+# --- Caching and Logging ---
cache = Cache(app, config={'CACHE_TYPE': 'simple'})
-logging.basicConfig(level=logging.INFO)
-
-AUTH_DATA_LIFETIME = 3600
+logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
-data_lock = threading.Lock()
+# --- Constants ---
+AUTH_DATA_LIFETIME = 3600 # 1 hour validity for initData
-def find_node_by_id(filesystem, node_id):
+# --- Filesystem Utilities ---
+def find_node_by_id(filesystem: Dict[str, Any], node_id: str) -> Tuple[Optional[Dict[str, Any]], Optional[Dict[str, Any]]]:
if not filesystem or not isinstance(filesystem, dict):
return None, None
if filesystem.get('id') == node_id:
@@ -51,70 +53,95 @@ def find_node_by_id(filesystem, node_id):
current_node, parent = queue.pop(0)
if current_node.get('type') == 'folder' and 'children' in current_node:
for child in current_node.get('children', []):
- child_id = child.get('id')
- if not child_id: continue
+ child_id = child.get('id')
+ if not child_id: continue
- if child_id == node_id:
+ if child_id == node_id:
return child, current_node
- if child_id not in visited and child.get('type') == 'folder':
+ if child_id not in visited and isinstance(child, dict) and child.get('type') == 'folder':
visited.add(child_id)
queue.append((child, current_node))
return None, None
-def add_node(filesystem, parent_id, node_data):
+def add_node(filesystem: Dict[str, Any], parent_id: str, node_data: Dict[str, Any]) -> bool:
parent_node, _ = find_node_by_id(filesystem, parent_id)
if parent_node and parent_node.get('type') == 'folder':
- if 'children' not in parent_node:
+ if 'children' not in parent_node or not isinstance(parent_node['children'], list):
parent_node['children'] = []
- existing_ids = {child.get('id') for child in parent_node['children']}
- if node_data.get('id') not in existing_ids:
+ existing_ids = {child.get('id') for child in parent_node['children'] if isinstance(child, dict)}
+ new_node_id = node_data.get('id')
+ if new_node_id and new_node_id not in existing_ids:
parent_node['children'].append(node_data)
return True
return False
-def remove_node(filesystem, node_id):
+def remove_node(filesystem: Dict[str, Any], node_id: str) -> bool:
node_to_remove, parent_node = find_node_by_id(filesystem, node_id)
- if node_to_remove and parent_node and 'children' in parent_node:
+ if node_to_remove and parent_node and 'children' in parent_node and isinstance(parent_node['children'], list):
original_length = len(parent_node['children'])
- parent_node['children'] = [child for child in parent_node['children'] if child.get('id') != node_id]
+ parent_node['children'] = [child for child in parent_node['children'] if not isinstance(child, dict) or child.get('id') != node_id]
return len(parent_node['children']) < original_length
if node_to_remove and node_id == filesystem.get('id'):
logging.warning("Attempted to remove root node directly.")
return False
return False
-def get_node_path_list(filesystem, node_id):
+def get_node_path_list(filesystem: Dict[str, Any], node_id: str) -> List[Dict[str, str]]:
path_list = []
current_id = node_id
processed_ids = set()
- while current_id and current_id not in processed_ids:
+ max_depth = 20 # Prevent infinite loops
+ depth = 0
+
+ while current_id and current_id not in processed_ids and depth < max_depth:
processed_ids.add(current_id)
+ depth += 1
node, parent = find_node_by_id(filesystem, current_id)
+
if not node:
+ logging.warning(f"Node ID {current_id} not found during path generation.")
break
+
path_list.append({
'id': node.get('id'),
'name': node.get('name', node.get('original_filename', 'Unknown'))
})
+
if not parent:
+ if node.get('id') != 'root':
+ logging.warning(f"Node {current_id} found but has no parent (and isn't root).")
break
+
parent_id = parent.get('id')
if parent_id == current_id:
logging.error(f"Filesystem loop detected at node {current_id}")
break
current_id = parent_id
- if not any(p['id'] == 'root' for p in path_list):
- path_list.append({'id': 'root', 'name': 'Root'})
+
+ if not path_list or path_list[-1].get('id') != 'root':
+ # Ensure root is always the first element conceptually (will be reversed)
+ if not any(p['id'] == 'root' for p in path_list):
+ path_list.append({'id': 'root', 'name': 'Root'})
+
+ # Reverse and deduplicate preserving order
final_path = []
seen_ids = set()
for item in reversed(path_list):
- if item['id'] not in seen_ids:
- final_path.append(item)
- seen_ids.add(item['id'])
+ item_id = item.get('id')
+ if item_id and item_id not in seen_ids:
+ final_path.append(item)
+ seen_ids.add(item_id)
+
+ if not final_path or final_path[0].get('id') != 'root':
+ logging.error(f"Path generation failed for {node_id}, missing root. Result: {final_path}")
+ # Fallback to just root if path is broken
+ return [{'id': 'root', 'name': 'Root'}]
+
return final_path
-def initialize_user_filesystem(user_data):
- if 'filesystem' not in user_data or not isinstance(user_data['filesystem'], dict) or user_data['filesystem'].get('id') != 'root':
+
+def initialize_user_filesystem(user_data: Dict[str, Any]):
+ if 'filesystem' not in user_data or not isinstance(user_data.get('filesystem'), dict) or not user_data['filesystem'].get('id') == 'root':
user_data['filesystem'] = {
"type": "folder",
"id": "root",
@@ -122,112 +149,92 @@ def initialize_user_filesystem(user_data):
"children": []
}
-@cache.memoize(timeout=120)
-def load_data():
- with data_lock:
- logging.info("Attempting to load data...")
- try:
- download_db_from_hf()
- except Exception as e:
- logging.error(f"Failed to download latest DB from HF, will use local version if available: {e}")
-
- loaded_data = None
- files_to_try = [DATA_FILE, DATA_FILE_BAK]
-
- for file_path in files_to_try:
- try:
- with open(file_path, 'r', encoding='utf-8') as file:
- data = json.load(file)
- if isinstance(data, dict) and 'users' in data:
- loaded_data = data
- logging.info(f"Successfully loaded data from {file_path}")
- if file_path == DATA_FILE_BAK:
- logging.warning("Loaded data from backup file. Original might be corrupt.")
- # Try to restore from backup immediately
- try:
- shutil.copy2(DATA_FILE_BAK, DATA_FILE)
- logging.info(f"Restored {DATA_FILE} from {DATA_FILE_BAK}")
- except Exception as copy_err:
- logging.error(f"Failed to restore {DATA_FILE} from backup: {copy_err}")
- break
- else:
- logging.warning(f"Data in {file_path} is not a valid dict or missing 'users' key. Trying next.")
- except FileNotFoundError:
- logging.warning(f"{file_path} not found.")
- continue
- except json.JSONDecodeError:
- logging.error(f"Error decoding JSON from {file_path}. Trying next.")
- continue
- except Exception as e:
- logging.error(f"Unexpected error loading data from {file_path}: {e}")
- continue
-
- if loaded_data is None:
- logging.critical(f"Failed to load data from both {DATA_FILE} and {DATA_FILE_BAK}. Initializing empty data structure.")
- loaded_data = {'users': {}}
-
- loaded_data.setdefault('users', {})
- for user_id, user_data in loaded_data['users'].items():
- initialize_user_filesystem(user_data)
-
- logging.info("Data loading process complete.")
- return loaded_data
-
-def save_data(data):
- with data_lock:
- logging.info("Attempting to save data...")
- if not isinstance(data, dict) or 'users' not in data:
- logging.error("Attempted to save invalid data structure. Aborting save.")
- raise ValueError("Invalid data structure for saving")
-
- # 1. Backup current file
- if os.path.exists(DATA_FILE):
- try:
- shutil.copy2(DATA_FILE, DATA_FILE_BAK)
- logging.info(f"Created backup: {DATA_FILE_BAK}")
- except Exception as e:
- logging.error(f"Failed to create backup file {DATA_FILE_BAK}: {e}")
- # Decide if we should proceed without backup? For now, let's proceed but log error.
-
- # 2. Write to temporary file
- try:
- with open(DATA_FILE_TMP, 'w', encoding='utf-8') as file:
- json.dump(data, file, ensure_ascii=False, indent=4)
- logging.info(f"Successfully wrote data to temporary file: {DATA_FILE_TMP}")
- except Exception as e:
- logging.error(f"Error writing data to temporary file {DATA_FILE_TMP}: {e}")
- # Clean up temp file if it exists and failed
- if os.path.exists(DATA_FILE_TMP):
- try: os.remove(DATA_FILE_TMP)
- except OSError: pass
- raise # Re-raise the exception to prevent inconsistent state
-
- # 3. Replace original file with temporary file (atomic operation on most systems)
- try:
- os.replace(DATA_FILE_TMP, DATA_FILE)
- logging.info(f"Successfully replaced {DATA_FILE} with {DATA_FILE_TMP}")
- except Exception as e:
- logging.error(f"Error replacing {DATA_FILE} with {DATA_FILE_TMP}: {e}")
- # Temp file still exists, original file (and backup) might be intact.
- raise # Re-raise the exception
-
- # 4. Clear cache and trigger HF upload (after successful local save)
+# --- Data Loading/Saving ---
+@cache.memoize(timeout=60) # Reduced timeout for faster reflection of changes
+def load_data() -> Dict[str, Any]:
+ try:
+ logging.info(f"Attempting to load data from {DATA_FILE}")
+ if not os.path.exists(DATA_FILE):
+ logging.warning(f"{DATA_FILE} not found locally. Attempting download/init.")
+ download_db_from_hf() # Try to get it from HF
+ if not os.path.exists(DATA_FILE):
+ logging.warning(f"Creating new empty local DB file: {DATA_FILE}")
+ with open(DATA_FILE, 'w', encoding='utf-8') as f:
+ json.dump({'users': {}}, f, ensure_ascii=False, indent=4)
+
+ with open(DATA_FILE, 'r', encoding='utf-8') as file:
+ data = json.load(file)
+ if not isinstance(data, dict):
+ logging.error(f"Data file {DATA_FILE} is not a dict. Possible corruption.")
+ raise json.JSONDecodeError("Root is not a dictionary", "", 0)
+
+ data.setdefault('users', {})
+ for user_id, user_data in data['users'].items():
+ if isinstance(user_data, dict):
+ initialize_user_filesystem(user_data)
+ else:
+ logging.warning(f"User data for {user_id} is not a dict, skipping filesystem init.")
+ logging.info("Data loaded and filesystems checked/initialized.")
+ return data
+ except FileNotFoundError:
+ logging.error(f"CRITICAL: {DATA_FILE} not found even after download/init attempt.")
+ return {'users': {}} # Return empty but log critical error
+ except json.JSONDecodeError as e:
+ logging.critical(f"CRITICAL: Error decoding JSON from {DATA_FILE}. Attempting to move to {DATA_FILE_CORRUPT}. Error: {e}")
try:
- cache.clear()
- logging.info("Cache cleared.")
- upload_db_to_hf()
- except Exception as e:
- logging.error(f"Error clearing cache or initiating HF upload after save: {e}")
- # Data is saved locally, but log this error.
+ if os.path.exists(DATA_FILE):
+ os.replace(DATA_FILE, DATA_FILE_CORRUPT)
+ logging.info(f"Moved corrupted file to {DATA_FILE_CORRUPT}")
+ except OSError as move_err:
+ logging.error(f"Failed to move corrupted file: {move_err}")
+ return {'users': {}} # Return empty after attempting to preserve corrupt file
+ except Exception as e:
+ logging.error(f"Unexpected error loading data: {e}", exc_info=True)
+ return {'users': {}}
- logging.info("Data saving process completed.")
+def save_data(data: Dict[str, Any]):
+ temp_file_path = DATA_FILE_TMP
+ try:
+ with open(temp_file_path, 'w', encoding='utf-8') as file:
+ json.dump(data, file, ensure_ascii=False, indent=4)
+
+ # Atomic replace
+ os.replace(temp_file_path, DATA_FILE)
+ logging.info(f"Data saved successfully to {DATA_FILE}")
+
+ # Clear cache immediately after successful save
+ cache.delete_memoized(load_data)
+ logging.info("Cache cleared after saving.")
+
+ # Upload to HF (can run in background)
+ upload_db_to_hf()
+
+ except json.JSONDecodeError as e:
+ logging.critical(f"CRITICAL ERROR during JSON serialization for save: {e}. Data NOT saved.", exc_info=True)
+ # Clean up temp file if it exists and might be corrupted
+ if os.path.exists(temp_file_path):
+ try: os.remove(temp_file_path)
+ except OSError: pass
+ except OSError as e:
+ logging.critical(f"CRITICAL OS ERROR during file write/replace: {e}. Data potentially NOT saved.", exc_info=True)
+ # Clean up temp file if it exists
+ if os.path.exists(temp_file_path):
+ try: os.remove(temp_file_path)
+ except OSError: pass
+ except Exception as e:
+ logging.critical(f"CRITICAL UNEXPECTED ERROR during save_data: {e}. Data potentially NOT saved.", exc_info=True)
+ # Clean up temp file if it exists
+ if os.path.exists(temp_file_path):
+ try: os.remove(temp_file_path)
+ except OSError: pass
+ # No finally block needed for temp_file_path removal if os.replace succeeded
def upload_db_to_hf():
if not HF_TOKEN_WRITE:
logging.warning("HF_TOKEN_WRITE not set, skipping database upload.")
return
if not os.path.exists(DATA_FILE):
- logging.warning(f"Data file {DATA_FILE} does not exist, skipping HF upload.")
+ logging.error(f"Cannot upload {DATA_FILE} to HF: File does not exist.")
return
try:
api = HfApi()
@@ -240,71 +247,90 @@ def upload_db_to_hf():
commit_message=f"Backup MiniApp {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
run_as_future=True
)
- logging.info("Database upload to Hugging Face scheduled.")
+ logging.info(f"Database upload to Hugging Face scheduled for {DATA_FILE}.")
except Exception as e:
- logging.error(f"Error scheduling database upload: {e}")
+ logging.error(f"Error scheduling database upload: {e}", exc_info=True)
def download_db_from_hf():
if not HF_TOKEN_READ:
logging.warning("HF_TOKEN_READ not set, skipping database download.")
- # Do not create an empty file here, load_data handles initialization
- return
+ return False # Indicate download was skipped
- logging.info(f"Attempting download of {DATA_FILE} from {REPO_ID}")
- local_path = "." # Download directly to current dir
+ download_path = DATA_FILE_DOWNLOAD_TMP
try:
- downloaded_path = hf_hub_download(
+ # Download to temp location first
+ hf_hub_download(
repo_id=REPO_ID,
filename=DATA_FILE,
repo_type="dataset",
token=HF_TOKEN_READ,
- local_dir=local_path,
- local_dir_use_symlinks=False, # Safer for overwriting
- force_download=True, # Always get the latest
- etag_timeout=10
+ local_dir=".",
+ local_dir_use_symlinks=False,
+ force_download=True, # Get the latest version
+ etag_timeout=10,
+ local_path_and_repo_id_exists=False, # Avoid potential symlink issues
+ cache_dir=None, # Don't use HF cache, manage directly
+ local_path=download_path # Specify exact download path
)
- logging.info(f"Database downloaded from Hugging Face to {downloaded_path}")
- # Ensure the downloaded file is named correctly if local_dir='.' causes issues
- expected_path = os.path.join(local_path, DATA_FILE)
- if downloaded_path != expected_path and os.path.exists(downloaded_path):
- logging.warning(f"Downloaded file path {downloaded_path} differs from expected {expected_path}. Renaming.")
- try:
- os.replace(downloaded_path, expected_path)
- logging.info(f"Renamed downloaded file to {expected_path}")
- except Exception as rename_err:
- logging.error(f"Failed to rename downloaded file: {rename_err}")
- # Raise the error so load_data knows download wasn't fully successful
- raise rename_err
- elif not os.path.exists(expected_path):
- logging.error(f"hf_hub_download reported success but expected file {expected_path} not found.")
- raise FileNotFoundError(f"Downloaded file {expected_path} missing after reported success.")
+ logging.info(f"Database downloaded from Hugging Face to {download_path}")
+
+ # Basic validation: Check if it's valid JSON before replacing
+ try:
+ with open(download_path, 'r', encoding='utf-8') as f:
+ json.load(f)
+ # If JSON is valid, replace the main file
+ os.replace(download_path, DATA_FILE)
+ logging.info(f"Successfully validated and replaced {DATA_FILE} with downloaded version.")
+ cache.delete_memoized(load_data) # Clear cache as data changed
+ return True
+ except (json.JSONDecodeError, UnicodeDecodeError) as e:
+ logging.error(f"Downloaded DB file {download_path} is corrupted or not valid JSON: {e}. Keeping existing local file.")
+ try: os.remove(download_path) # Clean up invalid download
+ except OSError: pass
+ return False
+ except OSError as e:
+ logging.error(f"OS Error replacing {DATA_FILE} with {download_path}: {e}. Keeping existing local file.")
+ try: os.remove(download_path) # Clean up download
+ except OSError: pass
+ return False
except hf_utils.RepositoryNotFoundError:
logging.error(f"Repository {REPO_ID} not found on Hugging Face.")
- raise # Re-raise to indicate download failure
+ return False
except hf_utils.EntryNotFoundError:
- logging.warning(f"{DATA_FILE} not found in repo {REPO_ID}. Will use local version if available.")
- # Don't raise, allow load_data to proceed with local/backup
+ logging.warning(f"{DATA_FILE} not found in repo {REPO_ID}. No file downloaded.")
+ # Do not create an empty file here, let load_data handle initial creation if needed
+ return False
except requests.exceptions.RequestException as e:
- logging.error(f"Connection error downloading DB from HF: {e}. Will use local version if available.")
- raise # Re-raise to indicate download failure
+ logging.error(f"Connection error downloading DB from HF: {e}. Using local version if available.")
+ return False
except Exception as e:
- logging.error(f"Generic error downloading database from HF: {e}")
- raise # Re-raise to indicate download failure
+ logging.error(f"Unexpected error downloading database: {e}", exc_info=True)
+ return False
+ finally:
+ # Ensure temp download file is removed if it still exists (e.g., download interrupted)
+ if os.path.exists(download_path):
+ try:
+ os.remove(download_path)
+ except OSError as e:
+ logging.warning(f"Could not remove temporary download file {download_path}: {e}")
-def get_file_type(filename):
+# --- File Type Helper ---
+def get_file_type(filename: str) -> str:
if not filename or '.' not in filename: return 'other'
ext = filename.lower().split('.')[-1]
- if ext in ['mp4', 'mov', 'avi', 'webm', 'mkv']: return 'video'
- if ext in ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'webp', 'svg']: return 'image'
+ if ext in ['mp4', 'mov', 'avi', 'webm', 'mkv', 'wmv', 'flv', 'ogg', 'ogv']: return 'video'
+ if ext in ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'webp', 'svg', 'ico', 'tif', 'tiff']: return 'image'
if ext == 'pdf': return 'pdf'
- if ext in ['txt', 'log', 'csv', 'json', 'xml', 'html', 'css', 'js', 'py', 'md']: return 'text'
- if ext in ['mp3', 'wav', 'ogg', 'aac', 'flac']: return 'audio'
- if ext in ['zip', 'rar', '7z', 'tar', 'gz']: return 'archive'
+ if ext in ['txt', 'log', 'md', 'py', 'js', 'css', 'html', 'json', 'xml', 'csv', 'tsv', 'yaml', 'yml']: return 'text'
+ if ext in ['mp3', 'wav', 'aac', 'flac', 'ogg', 'oga', 'm4a']: return 'audio'
+ if ext in ['zip', 'rar', '7z', 'tar', 'gz', 'bz2']: return 'archive'
+ if ext in ['doc', 'docx', 'ppt', 'pptx', 'xls', 'xlsx', 'odt', 'odp', 'ods']: return 'document'
return 'other'
-def check_telegram_authorization(auth_data: str, bot_token: str) -> Optional[dict]:
+# --- Telegram Validation ---
+def check_telegram_authorization(auth_data: str, bot_token: str) -> Optional[Dict[str, Any]]:
if not auth_data or not bot_token or bot_token == 'YOUR_BOT_TOKEN':
logging.warning("Validation skipped: Missing auth_data or valid BOT_TOKEN.")
return None
@@ -334,6 +360,8 @@ def check_telegram_authorization(auth_data: str, bot_token: str) -> Optional[dic
if 'id' not in user_info:
logging.error("Validated user data missing 'id'")
return None
+ # Ensure ID is string for consistency
+ user_info['id'] = str(user_info['id'])
return user_info
except json.JSONDecodeError:
logging.error("Failed to decode user JSON from auth data")
@@ -345,19 +373,20 @@ def check_telegram_authorization(auth_data: str, bot_token: str) -> Optional[dic
logging.warning("Hash mismatch during validation")
return None
except Exception as e:
- logging.error(f"Exception during validation: {e}")
+ logging.error(f"Exception during validation: {e}", exc_info=True)
return None
+# --- HTML, CSS, JS Template ---
HTML_TEMPLATE = """
-
+
Zeus Cloud
-
+
-
Loading...
+
Загрузка...
+
Zeus Cloud
-
+
Загрузка пути...
-
-
+
+
+
+
-
-
-
-
+
+
+
+
0%
-
Folder Contents
-
+
Содержимое папки
+
+
Загрузка содержимого...
-
This folder is empty.
-
+
- ×
+ ×
-
-
-
"""
+
+# --- Flask Routes ---
+
@app.route('/')
def index():
return Response(HTML_TEMPLATE, mimetype='text/html')
@@ -1169,7 +1245,7 @@ def validate_init_data():
user_info = check_telegram_authorization(init_data, BOT_TOKEN)
if user_info and 'id' in user_info:
- tg_user_id = str(user_info['id'])
+ tg_user_id = str(user_info['id']) # Ensure string ID
try:
db_data = load_data()
users = db_data.setdefault('users', {})
@@ -1184,40 +1260,40 @@ def validate_init_data():
initialize_user_filesystem(users[tg_user_id])
needs_save = True
else:
- # Ensure filesystem exists and update user info if necessary
- if 'filesystem' not in users[tg_user_id]:
- initialize_user_filesystem(users[tg_user_id])
- needs_save = True
- if users[tg_user_id].get('user_info') != user_info:
+ # Ensure filesystem exists for existing users
+ if 'filesystem' not in users[tg_user_id]:
+ logging.warning(f"Filesystem missing for existing user {tg_user_id}. Initializing.")
+ initialize_user_filesystem(users[tg_user_id])
+ needs_save = True
+ # Optionally update user_info if changed (e.g., username update)
+ if users[tg_user_id].get('user_info') != user_info:
users[tg_user_id]['user_info'] = user_info
needs_save = True
+
if needs_save:
- save_data(db_data) # save_data now handles potential errors internally
+ save_data(db_data) # save_data now handles its own exceptions and logging
return jsonify({"status": "ok", "user": user_info})
- except ValueError as ve: # Catch potential save_data validation error
- logging.error(f"Data validation error during user init/update for {tg_user_id}: {ve}")
- return jsonify({"status": "error", "message": "Internal data error during user initialization."}), 500
except Exception as e:
- logging.error(f"Failed to load/save data for user {tg_user_id} during validation: {e}")
- # Don't expose detailed error message to client
- return jsonify({"status": "error", "message": "Server error processing user data."}), 500
+ # This catches errors during load_data or save_data if they bubble up
+ logging.critical(f"Failed to load/save data for user {tg_user_id} during validation: {e}", exc_info=True)
+ return jsonify({"status": "error", "message": "Ошибка сервера при обработке данных пользователя."}), 500
else:
- logging.warning(f"Validation failed for initData: {init_data[:100]}...")
- return jsonify({"status": "error", "message": "Invalid authorization data."}), 403
+ logging.warning(f"Validation failed for initData provided.")
+ return jsonify({"status": "error", "message": "Недействительные данные авторизации Telegram."}), 403
@app.route('/get_dashboard_data', methods=['POST'])
def get_dashboard_data():
data = request.get_json()
if not data or 'initData' not in data or 'folder_id' not in data:
- return jsonify({"status": "error", "message": "Incomplete request"}), 400
+ return jsonify({"status": "error", "message": "Неполный запрос (отсутствует initData или folder_id)"}), 400
user_info = check_telegram_authorization(data['initData'], BOT_TOKEN)
if not user_info or 'id' not in user_info:
- return jsonify({"status": "error", "message": "Not authorized"}), 403
+ return jsonify({"status": "error", "message": "Ошибка авторизации"}), 403
tg_user_id = str(user_info['id'])
folder_id = data['folder_id']
@@ -1228,39 +1304,32 @@ def get_dashboard_data():
if not user_data or 'filesystem' not in user_data:
logging.error(f"User data or filesystem missing for validated user {tg_user_id}")
- # Attempt re-initialization (might happen if DB load failed previously)
- db_data.setdefault('users', {})
- if tg_user_id not in db_data['users']: db_data['users'][tg_user_id] = {}
- initialize_user_filesystem(db_data['users'][tg_user_id])
- user_data = db_data['users'][tg_user_id]
- try:
- save_data(db_data) # Try saving the initialized structure
- except Exception as save_err:
- logging.error(f"Failed to save re-initialized filesystem for {tg_user_id}: {save_err}")
- # Continue with the (now initialized) empty filesystem if save failed
- # but log that it happened. Fallback to error if re-init also fails structurally.
- if not user_data or 'filesystem' not in user_data:
- return jsonify({"status": "error", "message": "User data error"}), 500
+ # Attempt to re-initialize if missing, might indicate data inconsistency
+ if tg_user_id in db_data.get('users', {}):
+ logging.warning(f"Attempting to re-initialize filesystem for {tg_user_id}")
+ initialize_user_filesystem(db_data['users'][tg_user_id])
+ save_data(db_data)
+ user_data = db_data['users'][tg_user_id] # Re-fetch user_data
+ if not user_data or 'filesystem' not in user_data: # Check again
+ raise ValueError("Failed to re-initialize filesystem.")
+ else:
+ raise ValueError("User entry completely missing after validation.")
current_folder, _ = find_node_by_id(user_data['filesystem'], folder_id)
if not current_folder or current_folder.get('type') != 'folder':
- logging.warning(f"Folder {folder_id} not found for user {tg_user_id}. Defaulting to root.")
- folder_id = 'root'
+ logging.warning(f"Folder '{folder_id}' not found or not a folder for user {tg_user_id}. Defaulting to root.")
+ folder_id = 'root' # Reset to root
current_folder, _ = find_node_by_id(user_data['filesystem'], folder_id)
if not current_folder:
- logging.critical(f"CRITICAL: Root folder not found for user {tg_user_id}")
- # Attempt to fix root
- initialize_user_filesystem(user_data)
- current_folder, _ = find_node_by_id(user_data['filesystem'], 'root')
- if not current_folder: # If still not found after re-init
- return jsonify({"status": "error", "message": "Critical error: Root folder missing"}), 500
- else:
- try: save_data(db_data) # Save the fixed structure
- except Exception: pass # Logged in save_data
+ logging.critical(f"CRITICAL: Root folder node not found for user {tg_user_id} even after defaulting.")
+ return jsonify({"status": "error", "message": "Критическая ошибка: Корневая папка отсутствует"}), 500
items_in_folder = current_folder.get('children', [])
+ # Ensure items are dicts before sending
+ items_in_folder = [item for item in items_in_folder if isinstance(item, dict)]
+
breadcrumbs = get_node_path_list(user_data['filesystem'], folder_id)
current_folder_info = {
@@ -1274,9 +1343,13 @@ def get_dashboard_data():
"breadcrumbs": breadcrumbs,
"current_folder": current_folder_info
})
+
+ except ValueError as ve:
+ logging.error(f"Data integrity issue for user {tg_user_id}: {ve}", exc_info=True)
+ return jsonify({"status": "error", "message": f"Ошибка данных пользователя: {ve}"}), 500
except Exception as e:
- logging.error(f"Error loading dashboard data for {tg_user_id}, folder {folder_id}: {e}")
- return jsonify({"status": "error", "message": "Server error loading folder data."}), 500
+ logging.error(f"Error in get_dashboard_data for user {tg_user_id}, folder {folder_id}: {e}", exc_info=True)
+ return jsonify({"status": "error", "message": "Внутренняя ошибка сервера при получении данных."}), 500
@app.route('/upload', methods=['POST'])
@@ -1287,39 +1360,31 @@ def upload_files():
user_info = check_telegram_authorization(init_data, BOT_TOKEN)
if not user_info or 'id' not in user_info:
- return jsonify({"status": "error", "message": "Not authorized"}), 403
+ return jsonify({"status": "error", "message": "Ошибка авторизации"}), 403
tg_user_id = str(user_info['id'])
if not HF_TOKEN_WRITE:
- return jsonify({'status': 'error', 'message': 'Upload configuration error on server.'}), 500
+ logging.error("Upload attempt failed: HF_TOKEN_WRITE not configured.")
+ return jsonify({'status': 'error', 'message': 'Загрузка невозможна: Ошибка конфигурации сервера.'}), 503 # Service Unavailable
if not files or all(not f.filename for f in files):
- return jsonify({'status': 'error', 'message': 'No files selected for upload.'}), 400
+ return jsonify({'status': 'error', 'message': 'Файлы для загрузки не выбраны.'}), 400
if len(files) > 20:
- return jsonify({'status': 'error', 'message': 'Maximum 20 files per upload.'}), 400
+ return jsonify({'status': 'error', 'message': 'Превышен лимит файлов (максимум 20 за раз).'}), 413 # Payload Too Large
try:
db_data = load_data()
user_data = db_data.get('users', {}).get(tg_user_id)
if not user_data or 'filesystem' not in user_data:
- # Attempt re-initialization same as in get_dashboard_data
- db_data.setdefault('users', {})
- if tg_user_id not in db_data['users']: db_data['users'][tg_user_id] = {}
- initialize_user_filesystem(db_data['users'][tg_user_id])
- user_data = db_data['users'][tg_user_id]
- # No save here, let the successful upload save trigger it
- if not user_data or 'filesystem' not in user_data:
- return jsonify({"status": "error", "message": "User data error"}), 500
+ logging.error(f"User data or filesystem missing during upload for {tg_user_id}")
+ return jsonify({"status": "error", "message": "Ошибка данных пользователя на сервере"}), 500
target_folder_node, _ = find_node_by_id(user_data['filesystem'], current_folder_id)
if not target_folder_node or target_folder_node.get('type') != 'folder':
- # Default to root if target not found
- current_folder_id = 'root'
- target_folder_node, _ = find_node_by_id(user_data['filesystem'], current_folder_id)
- if not target_folder_node:
- return jsonify({'status': 'error', 'message': 'Target folder not found!'}), 404
+ logging.error(f"Target folder {current_folder_id} not found for upload by {tg_user_id}")
+ return jsonify({'status': 'error', 'message': 'Выбранная папка не найдена или недоступна.'}), 404
api = HfApi()
uploaded_count = 0
@@ -1329,486 +1394,536 @@ def upload_files():
for file in files:
if file and file.filename:
original_filename = secure_filename(file.filename)
+ if not original_filename:
+ logging.warning(f"Skipping file with potentially unsafe name before sanitization: {file.filename}")
+ errors.append(f"Пропущен файл с недопустимым именем: {file.filename}")
+ continue
+
name_part, ext_part = os.path.splitext(original_filename)
unique_suffix = uuid.uuid4().hex[:8]
- # Ensure unique_filename doesn't accidentally create hidden files if name starts with '.'
- safe_name_part = name_part.lstrip('.')
- unique_filename = f"{safe_name_part}_{unique_suffix}{ext_part}" if safe_name_part else f"file_{unique_suffix}{ext_part}"
-
+ # Ensure filename isn't excessively long after adding suffix
+ max_name_len = 200
+ unique_filename = f"{name_part[:max_name_len - len(ext_part) - 9]}_{unique_suffix}{ext_part}" # Truncate if needed
file_id = uuid.uuid4().hex
- # Use file_id in path for uniqueness, less reliance on potentially long/complex filenames
- hf_path = f"cloud_files/{tg_user_id}/{file_id}{ext_part}" # Simpler HF path
+
+ # Construct path using user ID and a unique file ID prefix for less chance of collision
+ hf_path = f"cloud_files/{tg_user_id}/{file_id[:2]}/{file_id}_{unique_filename}"
temp_path = os.path.join(UPLOAD_FOLDER, f"{file_id}_{unique_filename}")
try:
file.save(temp_path)
- logging.info(f"Uploading {original_filename} for {tg_user_id} to HF path {hf_path}")
+ logging.info(f"Uploading {original_filename} ({unique_filename}) to HF path: {hf_path} for user {tg_user_id}")
api.upload_file(
path_or_fileobj=temp_path, path_in_repo=hf_path,
repo_id=REPO_ID, repo_type="dataset", token=HF_TOKEN_WRITE,
- commit_message=f"User {tg_user_id} uploaded {original_filename}"
+ commit_message=f"User {tg_user_id} upload {original_filename}",
+ # run_as_future=True # Consider running sequentially for easier error handling? Let's keep sequential.
)
+ logging.info(f"Successfully uploaded to HF: {hf_path}")
file_info = {
'type': 'file', 'id': file_id,
- 'original_filename': original_filename,
- 'path': hf_path, # Store the simplified path
- 'file_type': get_file_type(original_filename),
+ 'original_filename': original_filename, 'unique_filename': unique_filename,
+ 'path': hf_path, 'file_type': get_file_type(original_filename),
'upload_date': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
+ # Consider adding file size: 'size': os.path.getsize(temp_path)
}
if add_node(user_data['filesystem'], current_folder_id, file_info):
uploaded_count += 1
needs_save = True
- logging.info(f"Successfully added node for {file_id} ({original_filename})")
+ logging.info(f"Added node {file_id} to folder {current_folder_id} in DB for user {tg_user_id}")
else:
- errors.append(f"Error adding metadata for {original_filename}.")
- logging.error(f"Failed add_node for {file_id} to {current_folder_id} for {tg_user_id}")
- # Attempt to delete the orphaned file from HF
+ # This should ideally not happen if target_folder_node was found
+ error_msg = f"Критическая ошибка: Не удалось добавить метаданные для {original_filename} в папку {current_folder_id}."
+ errors.append(error_msg)
+ logging.error(f"{error_msg} User: {tg_user_id}")
+ # Attempt to clean up orphaned HF file
try:
- logging.warning(f"Attempting delete of orphaned HF file: {hf_path}")
+ logging.warning(f"Attempting to delete orphaned HF file: {hf_path}")
api.delete_file(path_in_repo=hf_path, repo_id=REPO_ID, repo_type="dataset", token=HF_TOKEN_WRITE)
except Exception as del_err:
- logging.error(f"Failed deleting orphaned HF file {hf_path}: {del_err}")
+ logging.error(f"Failed deleting orphaned HF file {hf_path} after DB add failure: {del_err}")
except Exception as e:
- logging.exception(f"Upload error for {original_filename} ({tg_user_id}): {e}") # Use exception for traceback
- errors.append(f"Upload error for {original_filename}")
+ error_msg = f"Ошибка загрузки файла {original_filename}: {str(e)[:100]}" # Limit error msg length
+ logging.error(f"Upload error for {original_filename} (User: {tg_user_id}, Path: {hf_path}): {e}", exc_info=True)
+ errors.append(error_msg)
+ # If file exists on HF but failed DB add, or failed upload, try to clean temp
finally:
if os.path.exists(temp_path):
try: os.remove(temp_path)
except OSError as e: logging.error(f"Error removing temp file {temp_path}: {e}")
if needs_save:
- save_data(db_data) # Let save_data handle its internal errors
+ try:
+ save_data(db_data)
+ except Exception as e:
+ # save_data logs critical errors, just report failure
+ logging.error(f"Error saving DB after upload for {tg_user_id}: {e}", exc_info=True)
+ errors.append("Критическая ошибка сохранения метаданных после загрузки.")
+ # If save fails, the newly added nodes are lost on next load, but HF files remain. Manual cleanup might be needed.
- final_message = f"{uploaded_count} file(s) uploaded."
+ final_message = f"{uploaded_count} из {len(files)} файл(ов) загружено."
+ status = "ok"
if errors:
- final_message += f" Errors occurred with {len(errors)} file(s)."
+ final_message += " Ошибки: " + "; ".join(errors)
+ status = "error" if uploaded_count == 0 else "partial_success" # Custom status? Let's stick to ok/error for simplicity client side
return jsonify({
- "status": "ok" if uploaded_count > 0 and not errors else "error", # OK if at least one succeeded without error
+ "status": "ok" if not errors else "error", # Keep it simple for client
"message": final_message
})
except Exception as e:
- logging.exception(f"Unhandled error during file upload process for user {tg_user_id}: {e}")
- return jsonify({"status": "error", "message": "An unexpected server error occurred during upload."}), 500
+ logging.critical(f"Unexpected critical error during /upload for user {tg_user_id}: {e}", exc_info=True)
+ return jsonify({"status": "error", "message": "Непредвиденная ошибка сервера во время загрузки."}), 500
@app.route('/create_folder', methods=['POST'])
def create_folder():
data = request.get_json()
if not data or 'initData' not in data or 'parent_folder_id' not in data or 'folder_name' not in data:
- return jsonify({"status": "error", "message": "Incomplete request"}), 400
+ return jsonify({"status": "error", "message": "Неполный запрос"}), 400
user_info = check_telegram_authorization(data['initData'], BOT_TOKEN)
if not user_info or 'id' not in user_info:
- return jsonify({"status": "error", "message": "Not authorized"}), 403
+ return jsonify({"status": "error", "message": "Ошибка авторизации"}), 403
tg_user_id = str(user_info['id'])
parent_folder_id = data['parent_folder_id']
folder_name = data['folder_name'].strip()
if not folder_name:
- return jsonify({'status': 'error', 'message': 'Folder name cannot be empty.'}), 400
- if '/' in folder_name or '\\' in folder_name:
- return jsonify({'status': 'error', 'message': 'Folder name contains invalid characters.'}), 400
- if len(folder_name) > 100: # Add a length limit
- return jsonify({'status': 'error', 'message': 'Folder name is too long (max 100 chars).'}), 400
+ return jsonify({'status': 'error', 'message': 'Имя папки не может быть пустым.'}), 400
+ # Basic validation against problematic chars
+ if any(c in folder_name for c in ['/', '\\', ':', '*', '?', '"', '<', '>', '|']):
+ return jsonify({'status': 'error', 'message': 'Имя папки содержит недопустимые символы.'}), 400
+ if folder_name == '.' or folder_name == '..':
+ return jsonify({'status': 'error', 'message': 'Недопустимое имя папки.'}), 400
+ if len(folder_name) > 100: # Limit folder name length
+ return jsonify({'status': 'error', 'message': 'Имя папки слишком длинное (макс. 100 символов).'}), 400
try:
db_data = load_data()
user_data = db_data.get('users', {}).get(tg_user_id)
if not user_data or 'filesystem' not in user_data:
- # Attempt re-initialization
- db_data.setdefault('users', {})
- if tg_user_id not in db_data['users']: db_data['users'][tg_user_id] = {}
- initialize_user_filesystem(db_data['users'][tg_user_id])
- user_data = db_data['users'][tg_user_id]
- if not user_data or 'filesystem' not in user_data:
- return jsonify({"status": "error", "message": "User data error"}), 500
+ logging.error(f"User data or filesystem missing during folder creation for {tg_user_id}")
+ return jsonify({"status": "error", "message": "Ошибка данных пользователя на сервере"}), 500
# Check if folder with the same name already exists in the parent
parent_node, _ = find_node_by_id(user_data['filesystem'], parent_folder_id)
- if parent_node and parent_node.get('type') == 'folder':
- existing_names = {child.get('name', '').lower() for child in parent_node.get('children', []) if child.get('type') == 'folder'}
- if folder_name.lower() in existing_names:
- return jsonify({'status': 'error', 'message': f'A folder named "{folder_name}" already exists here.'}), 400
- else:
- # If parent somehow isn't found or isn't a folder, default to root
- parent_folder_id = 'root'
- parent_node, _ = find_node_by_id(user_data['filesystem'], parent_folder_id)
- if not parent_node: # Should not happen if root init works
- return jsonify({'status': 'error', 'message': 'Cannot find target folder.'}), 404
+ if parent_node and parent_node.get('type') == 'folder' and 'children' in parent_node:
+ if any(isinstance(child, dict) and child.get('type') == 'folder' and child.get('name') == folder_name for child in parent_node['children']):
+ return jsonify({'status': 'error', 'message': f'Папка с именем "{folder_name}" уже существует здесь.'}), 409 # Conflict
folder_id = uuid.uuid4().hex
folder_data = {
'type': 'folder', 'id': folder_id,
'name': folder_name, 'children': []
+ # Add creation date? 'created_date': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
if add_node(user_data['filesystem'], parent_folder_id, folder_data):
- save_data(db_data)
- return jsonify({'status': 'ok', 'message': f'Folder "{folder_name}" created.'})
+ try:
+ save_data(db_data)
+ logging.info(f"Folder '{folder_name}' ({folder_id}) created for user {tg_user_id} in parent {parent_folder_id}")
+ return jsonify({'status': 'ok', 'message': f'Папка "{folder_name}" создана.'})
+ except Exception as e:
+ # save_data logs critical errors
+ return jsonify({'status': 'error', 'message': 'Ошибка сохранения данных после создания папки.'}), 500
else:
- # This case might indicate an issue with find_node_by_id or data structure
- logging.error(f"add_node failed unexpectedly for folder creation user {tg_user_id}, parent {parent_folder_id}")
- return jsonify({'status': 'error', 'message': 'Failed to add folder to the directory structure.'}), 500
+ # This implies parent_folder_id wasn't found or wasn't a folder, which should have been caught earlier by find_node_by_id in add_node logic
+ logging.error(f"add_node failed for folder '{folder_name}' in parent {parent_folder_id} for user {tg_user_id}, despite parent supposedly existing.")
+ return jsonify({'status': 'error', 'message': 'Не удалось найти родительскую папку для создания новой.'}), 404 # Or 500? Parent existed check implies 404?
except Exception as e:
- logging.exception(f"Error creating folder for user {tg_user_id}: {e}")
- return jsonify({'status': 'error', 'message': 'An unexpected server error occurred.'}), 500
-
+ logging.critical(f"Unexpected critical error during /create_folder for user {tg_user_id}: {e}", exc_info=True)
+ return jsonify({"status": "error", "message": "Непредвиденная ошибка сервера при создании папки."}), 500
+
+
+# --- Public Routes (No User Auth - Use with Caution) ---
+# These routes rely on finding the file ID across *all* users.
+# If privacy is paramount, these should require authentication matching the owner.
+
+def find_file_globally(file_id: str) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
+ """ Finds a file node by its ID across all users. """
+ db_data = load_data() # Use cached data
+ for user_id, user_data in db_data.get('users', {}).items():
+ if isinstance(user_data, dict) and 'filesystem' in user_data:
+ node, _ = find_node_by_id(user_data['filesystem'], file_id)
+ if node and node.get('type') == 'file':
+ return node, user_id
+ return None, None
@app.route('/download/')
def download_file_route(file_id):
- try:
- db_data = load_data()
- file_node = None
- owner_user_id = None
+ file_node, owner_user_id = find_file_globally(file_id)
- for user_id, user_data in db_data.get('users', {}).items():
- if 'filesystem' in user_data:
- node, _ = find_node_by_id(user_data['filesystem'], file_id)
- if node and node.get('type') == 'file':
- file_node = node
- owner_user_id = user_id
- break
+ if not file_node:
+ return Response("Файл не найден", status=404)
- if not file_node:
- return Response("File not found", status=404)
+ hf_path = file_node.get('path')
+ original_filename = file_node.get('original_filename', f'{file_id}_download')
- hf_path = file_node.get('path')
- original_filename = file_node.get('original_filename', f'{file_id}_download')
-
- if not hf_path:
- logging.error(f"Missing HF path for file ID {file_id} (owner: {owner_user_id})")
- return Response("Error: File path missing", status=500)
+ if not hf_path:
+ logging.error(f"Missing HF path for file ID {file_id} (Owner: {owner_user_id})")
+ return Response("Ошибка: Путь к файлу не найден в метаданных", status=500)
- # Construct the URL without ?download=true initially for potential streaming/preview
- file_url_base = f"https://huggingface.co/datasets/{REPO_ID}/resolve/main/{hf_path}"
- # Add download=true for direct download trigger
- file_url_download = file_url_base + "?download=true"
+ file_url = f"https://huggingface.co/datasets/{REPO_ID}/resolve/main/{hf_path}?download=true"
+ logging.info(f"Serving download for file ID {file_id} (Owner: {owner_user_id}) from HF path: {hf_path}")
+ try:
headers = {}
if HF_TOKEN_READ:
headers["authorization"] = f"Bearer {HF_TOKEN_READ}"
- response = requests.get(file_url_download, headers=headers, stream=True, timeout=60) # Increased timeout
+ # Make a HEAD request first to get Content-Type and check existence/size? Optional optimization.
+ response = requests.get(file_url, headers=headers, stream=True, timeout=30)
response.raise_for_status()
- # Try to encode filename properly for Content-Disposition
+ content_type = response.headers.get('Content-Type', 'application/octet-stream')
+ # A more robust way to handle filename encoding for Content-Disposition
try:
- # Simplest ASCII-safe version
- encoded_filename_ascii = original_filename.encode('ascii', 'ignore').decode('ascii')
- disposition = f'attachment; filename="{encoded_filename_ascii}"'
- # Attempt UTF-8 version (might not be supported by all clients but better for non-ASCII)
- encoded_filename_utf8 = urlencode({'filename': original_filename}, encoding='utf-8')[9:]
- disposition += f"; filename*=UTF-8''{encoded_filename_utf8}"
- except Exception:
- # Fallback if encoding fails
- disposition = f'attachment; filename="downloaded_file"'
- logging.warning(f"Could not properly encode filename {original_filename} for download.")
+ # Try UTF-8 first
+ encoded_filename = original_filename.encode('utf-8').decode('latin-1')
+ disposition = f"attachment; filename=\"{encoded_filename}\"; filename*=UTF-8''{urlencode({'': original_filename})[1:]}"
+ except UnicodeEncodeError:
+ # Fallback for simpler names
+ ascii_filename = ''.join(c for c in original_filename if ord(c) < 128) or "download"
+ disposition = f"attachment; filename=\"{ascii_filename}\""
return Response(response.iter_content(chunk_size=65536), # Larger chunk size
- mimetype=response.headers.get('Content-Type', 'application/octet-stream'),
+ mimetype=content_type,
headers={"Content-Disposition": disposition})
except requests.exceptions.Timeout:
- logging.error(f"Timeout downloading file from HF ({hf_path}, owner: {owner_user_id})")
- return Response("Error downloading file (timeout)", status=504)
+ logging.error(f"Timeout downloading file from HF ({hf_path}, owner: {owner_user_id})")
+ return Response(f"Ошибка: Время ожидания от сервера хранения истекло", status=504) # Gateway Timeout
+ except requests.exceptions.HTTPError as e:
+ status_code = e.response.status_code
+ logging.error(f"HTTP error {status_code} downloading file from HF ({hf_path}, owner: {owner_user_id}): {e}")
+ if status_code == 404:
+ return Response("Ошибка: Файл не найден на сервере хранения.", status=404)
+ else:
+ return Response(f"Ошибка скачивания файла с сервера хранения ({status_code})", status=status_code if status_code >= 500 else 502) # Treat client errors as Bad Gateway upstream
except requests.exceptions.RequestException as e:
- status_code = e.response.status_code if e.response is not None else 502
- logging.error(f"Error downloading file from HF ({hf_path}, owner: {owner_user_id}, status: {status_code}): {e}")
- return Response(f"Error downloading file ({status_code})", status=status_code)
+ logging.error(f"Network error downloading file from HF ({hf_path}, owner: {owner_user_id}): {e}", exc_info=True)
+ return Response(f"Ошибка сети при скачивании файла", status=502) # Bad Gateway
except Exception as e:
- logging.exception(f"Unexpected error during download route ({file_id}): {e}")
- return Response("Internal server error during download", status=500)
+ logging.error(f"Unexpected error during download ({hf_path}, owner: {owner_user_id}): {e}", exc_info=True)
+ return Response("Внутренняя ошибка сервера при обработке скачивания", status=500)
@app.route('/delete_file/', methods=['POST'])
def delete_file_route(file_id):
data = request.get_json()
- if not data or 'initData' not in data: # current_folder_id not strictly needed for delete itself
- return jsonify({"status": "error", "message": "Incomplete request"}), 400
+ if not data or 'initData' not in data:
+ # Allow omitting current_folder_id from request as it's not strictly needed for deletion itself
+ return jsonify({"status": "error", "message": "Неполный запрос (отсутствует initData)"}), 400
user_info = check_telegram_authorization(data['initData'], BOT_TOKEN)
if not user_info or 'id' not in user_info:
- return jsonify({"status": "error", "message": "Not authorized"}), 403
+ return jsonify({"status": "error", "message": "Ошибка авторизации"}), 403
tg_user_id = str(user_info['id'])
if not HF_TOKEN_WRITE:
- return jsonify({'status': 'error', 'message': 'Deletion configuration error on server.'}), 500
+ logging.error("Delete attempt failed: HF_TOKEN_WRITE not configured.")
+ return jsonify({'status': 'error', 'message': 'Удаление невозможно: Ошибка конфигурации сервера.'}), 503
try:
db_data = load_data()
user_data = db_data.get('users', {}).get(tg_user_id)
if not user_data or 'filesystem' not in user_data:
- return jsonify({"status": "error", "message": "User data error"}), 500
+ logging.error(f"User data or filesystem missing during file delete for {tg_user_id}")
+ return jsonify({"status": "error", "message": "Ошибка данных пользователя на сервере"}), 500
file_node, parent_node = find_node_by_id(user_data['filesystem'], file_id)
- if not file_node or file_node.get('type') != 'file': # Parent isn't strictly needed for deletion logic
- return jsonify({'status': 'error', 'message': 'File not found.'}), 404
+ if not file_node or file_node.get('type') != 'file':
+ # It might exist for another user, but this user doesn't own it here.
+ logging.warning(f"User {tg_user_id} attempted to delete file {file_id}, but it was not found in their filesystem.")
+ return jsonify({'status': 'error', 'message': 'Файл не найден в вашем хранилище.'}), 404
+
+ if not parent_node:
+ # This case should be rare (maybe root files if allowed?), but handle it.
+ logging.error(f"File node {file_id} found for user {tg_user_id}, but has no parent node. Cannot remove from DB correctly.")
+ # Proceed with HF deletion but report DB issue? Or block? Let's block for safety.
+ return jsonify({'status': 'error', 'message': 'Ошибка структуры данных: не найден родитель файла.'}), 500
+
hf_path = file_node.get('path')
- original_filename = file_node.get('original_filename', 'file')
+ original_filename = file_node.get('original_filename', 'файл')
needs_save = False
hf_delete_error = None
if hf_path:
try:
api = HfApi()
- logging.info(f"Attempting HF delete for user {tg_user_id}, path {hf_path}")
+ logging.info(f"User {tg_user_id} deleting file {file_id} (Orig: {original_filename}) from HF path: {hf_path}")
api.delete_file(
path_in_repo=hf_path, repo_id=REPO_ID, repo_type="dataset", token=HF_TOKEN_WRITE,
- commit_message=f"User {tg_user_id} deleted {original_filename}"
+ commit_message=f"User {tg_user_id} delete {original_filename}"
)
logging.info(f"Deleted file {hf_path} from HF Hub for user {tg_user_id}")
except hf_utils.EntryNotFoundError:
- logging.warning(f"File {hf_path} not found on HF Hub for delete attempt ({tg_user_id}). Proceeding with DB removal.")
+ logging.warning(f"File {hf_path} not found on HF Hub for delete attempt by user {tg_user_id}. Assuming already deleted or path mismatch.")
+ # Continue to remove from DB
except Exception as e:
- logging.exception(f"Error deleting file from HF Hub ({hf_path}, {tg_user_id}): {e}")
- hf_delete_error = e # Store error but continue to DB removal
+ logging.error(f"Error deleting file from HF Hub ({hf_path}, User: {tg_user_id}): {e}", exc_info=True)
+ hf_delete_error = str(e)
+ # Decide whether to proceed with DB removal despite HF error.
+ # Let's proceed, but report the error. The file might be stuck on HF.
else:
- logging.warning(f"File node {file_id} for user {tg_user_id} has no hf_path. Skipping HF delete.")
+ logging.warning(f"File node {file_id} for user {tg_user_id} has no HF path. Removing from DB only.")
+
if remove_node(user_data['filesystem'], file_id):
needs_save = True
logging.info(f"Removed file node {file_id} from DB for user {tg_user_id}")
else:
- # This is concerning if the node was found initially but couldn't be removed
- logging.error(f"Failed to remove file node {file_id} from DB structure for {tg_user_id} after finding it.")
- # If HF delete failed AND DB remove failed, it's a bigger issue
- if hf_delete_error:
- return jsonify({'status': 'error', 'message': f'Error deleting file from storage and database.'}), 500
- else:
- # If HF delete succeeded (or skipped) but DB failed, still report error
- return jsonify({'status': 'error', 'message': f'File deleted from storage, but failed to update database.'}), 500
-
+ # This shouldn't happen if file_node and parent_node were found
+ logging.error(f"Failed to remove file node {file_id} from DB structure for {tg_user_id} even after finding node and parent.")
+ # Return error, as DB state is inconsistent
+ return jsonify({'status': 'error', 'message': 'Ошибка удаления файла из базы данных после подтверждения.'}), 500
if needs_save:
- save_data(db_data) # Let save_data handle its errors
- if hf_delete_error:
- # Report success overall but mention the storage delete issue
- return jsonify({'status': 'ok', 'message': f'File "{original_filename}" removed from list, but encountered an error during storage cleanup.'})
- else:
- return jsonify({'status': 'ok', 'message': f'File "{original_filename}" deleted.'})
+ try:
+ save_data(db_data)
+ if hf_delete_error:
+ return jsonify({'status': 'ok', 'message': f'Файл "{original_filename}" удален из списка, но произошла ошибка при удалении с сервера хранения: {hf_delete_error}'})
+ else:
+ return jsonify({'status': 'ok', 'message': f'Файл "{original_filename}" удален.'})
+ except Exception as e:
+ # save_data logs critical errors
+ return jsonify({'status': 'error', 'message': 'Файл удален (или была ошибка на сервере хранения), но произошла критическая ошибка сохранения базы данных.'}), 500
else:
- # Should not be reached if remove_node logic above is correct
- return jsonify({'status': 'error', 'message': 'Failed to remove file from database.'}), 500
+ # Should have been caught by the remove_node check above
+ return jsonify({'status': 'error', 'message': 'Не удалось удалить узел файла из структуры данных.'}), 500
except Exception as e:
- logging.exception(f"Unhandled error during file deletion for user {tg_user_id}, file {file_id}: {e}")
- return jsonify({'status': 'error', 'message': 'An unexpected server error occurred during deletion.'}), 500
+ logging.critical(f"Unexpected critical error during /delete_file/{file_id} for user {tg_user_id}: {e}", exc_info=True)
+ return jsonify({"status": "error", "message": "Непредвиденная ошибка сервера при удалении файла."}), 500
@app.route('/delete_folder/', methods=['POST'])
def delete_folder_route(folder_id):
if folder_id == 'root':
- return jsonify({'status': 'error', 'message': 'Cannot delete the root folder.'}), 400
+ return jsonify({'status': 'error', 'message': 'Нельзя удалить корневую папку!'}), 400
data = request.get_json()
if not data or 'initData' not in data:
- return jsonify({"status": "error", "message": "Incomplete request"}), 400
+ return jsonify({"status": "error", "message": "Неполный запрос (отсутствует initData)"}), 400
user_info = check_telegram_authorization(data['initData'], BOT_TOKEN)
if not user_info or 'id' not in user_info:
- return jsonify({"status": "error", "message": "Not authorized"}), 403
+ return jsonify({"status": "error", "message": "Ошибка авторизации"}), 403
tg_user_id = str(user_info['id'])
- # No HF token needed for folder delete as folders don't exist on HF directly
-
try:
db_data = load_data()
user_data = db_data.get('users', {}).get(tg_user_id)
if not user_data or 'filesystem' not in user_data:
- return jsonify({"status": "error", "message": "User data error"}), 500
+ logging.error(f"User data or filesystem missing during folder delete for {tg_user_id}")
+ return jsonify({"status": "error", "message": "Ошибка данных пользователя на сервере"}), 500
folder_node, parent_node = find_node_by_id(user_data['filesystem'], folder_id)
if not folder_node or folder_node.get('type') != 'folder':
- return jsonify({'status': 'error', 'message': 'Folder not found.'}), 404
+ logging.warning(f"User {tg_user_id} attempted to delete folder {folder_id}, but it was not found in their filesystem.")
+ return jsonify({'status': 'error', 'message': 'Папка не найдена в вашем хранилище.'}), 404
+
+ if not parent_node:
+ # Root cannot be deleted (checked earlier), so any folder must have a parent.
+ logging.error(f"Folder node {folder_id} found for user {tg_user_id}, but has no parent node. Data inconsistency.")
+ return jsonify({'status': 'error', 'message': 'Ошибка структуры данных: не найден родитель папки.'}), 500
- folder_name = folder_node.get('name', 'folder')
+
+ folder_name = folder_node.get('name', 'папка')
# Check if folder is empty
if folder_node.get('children'):
- return jsonify({'status': 'error', 'message': f'Folder "{folder_name}" is not empty. Cannot delete.'}), 400
+ logging.warning(f"User {tg_user_id} attempted to delete non-empty folder {folder_id} ('{folder_name}')")
+ return jsonify({'status': 'error', 'message': f'Папку "{folder_name}" можно удалить только если она пуста.'}), 400
+ # Proceed with deletion from DB
if remove_node(user_data['filesystem'], folder_id):
- save_data(db_data)
- return jsonify({'status': 'ok', 'message': f'Folder "{folder_name}" deleted.'})
+ try:
+ save_data(db_data)
+ logging.info(f"Folder '{folder_name}' ({folder_id}) deleted by user {tg_user_id}")
+ return jsonify({'status': 'ok', 'message': f'Папка "{folder_name}" удалена.'})
+ except Exception as e:
+ # save_data logs critical errors
+ return jsonify({'status': 'error', 'message': 'Ошибка сохранения базы данных после удаления папки.'}), 500
else:
- logging.error(f"Failed to remove empty folder node {folder_id} from DB for {tg_user_id}")
- return jsonify({'status': 'error', 'message': 'Failed to remove folder from database.'}), 500
+ # Should not happen if node and parent were found
+ logging.error(f"Failed to remove empty folder node {folder_id} from DB for {tg_user_id} even after checks.")
+ return jsonify({'status': 'error', 'message': 'Ошибка удаления папки из базы данных.'}), 500
except Exception as e:
- logging.exception(f"Error deleting folder for user {tg_user_id}, folder {folder_id}: {e}")
- return jsonify({'status': 'error', 'message': 'An unexpected server error occurred.'}), 500
+ logging.critical(f"Unexpected critical error during /delete_folder/{folder_id} for user {tg_user_id}: {e}", exc_info=True)
+ return jsonify({"status": "error", "message": "Непредвиденная ошибка сервера при удалении папки."}), 500
@app.route('/get_text_content/')
def get_text_content_route(file_id):
- try:
- db_data = load_data()
- file_node = None
- owner_user_id = None
+ file_node, owner_user_id = find_file_globally(file_id)
- for user_id, user_data in db_data.get('users', {}).items():
- if 'filesystem' in user_data:
- node, _ = find_node_by_id(user_data['filesystem'], file_id)
- if node and node.get('type') == 'file' and node.get('file_type') == 'text':
- file_node = node
- owner_user_id = user_id
- break
+ if not file_node:
+ return Response("Файл не найден", status=404)
- if not file_node:
- return Response("Text file not found", status=404)
+ if file_node.get('file_type') != 'text':
+ return Response("Файл не является текстовым", status=415) # Unsupported Media Type
- hf_path = file_node.get('path')
- if not hf_path:
- return Response("Error: file path missing", status=500)
+ hf_path = file_node.get('path')
+ if not hf_path:
+ return Response("Ошибка: путь к файлу отсутствует в метаданных", status=500)
- file_url = f"https://huggingface.co/datasets/{REPO_ID}/resolve/main/{hf_path}?download=true"
+ file_url = f"https://huggingface.co/datasets/{REPO_ID}/resolve/main/{hf_path}?download=true"
+ logging.info(f"Serving text content for file ID {file_id} (Owner: {owner_user_id}) from HF path: {hf_path}")
+ try:
headers = {}
if HF_TOKEN_READ:
headers["authorization"] = f"Bearer {HF_TOKEN_READ}"
- response = requests.get(file_url, headers=headers, timeout=20) # Shorter timeout for text files
+ response = requests.get(file_url, headers=headers, timeout=15)
response.raise_for_status()
- max_preview_size = 1 * 1024 * 1024 # 1 MB limit for text preview
- if len(response.content) > max_preview_size:
- # Don't return 413, maybe return truncated? Or just a message.
- # Return a message for simplicity
- return Response("File is too large for preview (>1MB). Please download.", status=413, mimetype='text/plain')
+ # Limit preview size
+ max_preview_size = 2 * 1024 * 1024 # 2 MB limit for text preview
+ if 'Content-Length' in response.headers and int(response.headers['Content-Length']) > max_preview_size:
+ logging.warning(f"Text file {file_id} too large for preview ({response.headers['Content-Length']} bytes)")
+ return Response(f"Файл слишком большой для предпросмотра (>{max_preview_size//1024//1024}MB).", status=413) # Payload Too Large
+
+ content_bytes = response.content
+ if len(content_bytes) > max_preview_size:
+ logging.warning(f"Text file {file_id} fetched content is too large ({len(content_bytes)} bytes)")
+ # Check even if Content-Length was missing or wrong
+ return Response(f"Файл слишком большой для предпросмотра (>{max_preview_size//1024//1024}MB).", status=413)
text_content = None
- detected_encoding = None
# Try common encodings, starting with UTF-8
- encodings_to_try = ['utf-8', 'cp1251', 'latin-1', 'windows-1252']
+ encodings_to_try = ['utf-8', 'cp1251', 'cp1252', 'latin-1']
for enc in encodings_to_try:
try:
- text_content = response.content.decode(enc)
- detected_encoding = enc
+ text_content = content_bytes.decode(enc)
logging.info(f"Decoded text file {file_id} using {enc}")
break
except UnicodeDecodeError:
continue
- except Exception as decode_err: # Catch other potential errors
- logging.warning(f"Error trying to decode {file_id} with {enc}: {decode_err}")
- continue
-
if text_content is None:
- # Try decoding with replacement if all else fails
+ logging.error(f"Could not decode text file {file_id} with common encodings.")
+ # Try decoding with replacement of errors as a last resort
try:
- text_content = response.content.decode('utf-8', errors='replace')
- detected_encoding = 'utf-8 (with replacement)'
- logging.warning(f"Could not definitively decode {file_id}, used utf-8 with replacement.")
- except Exception as final_decode_err:
- logging.error(f"Failed all decoding attempts for {file_id}: {final_decode_err}")
- return Response("Could not determine file encoding.", status=500)
-
- # Always return as UTF-8
+ text_content = content_bytes.decode('utf-8', errors='replace')
+ logging.warning(f"Decoded text file {file_id} using utf-8 with replacement.")
+ except Exception: # Should not happen with 'replace'
+ return Response("Не удалось определить кодировку файла или файл поврежден.", status=500)
+
+
return Response(text_content, mimetype='text/plain; charset=utf-8')
except requests.exceptions.Timeout:
- logging.error(f"Timeout fetching text content from HF ({hf_path}, owner {owner_user_id})")
- return Response("Error loading content (timeout)", status=504)
+ logging.error(f"Timeout fetching text content from HF ({hf_path}, owner {owner_user_id})")
+ return Response("Ошибка: Время ожидания от сервера хранения истекло", status=504)
+ except requests.exceptions.HTTPError as e:
+ status_code = e.response.status_code
+ logging.error(f"HTTP error {status_code} fetching text content from HF ({hf_path}, owner {owner_user_id}): {e}")
+ return Response(f"Ошибка загрузки содержимого с сервера хранения ({status_code})", status=status_code if status_code >= 500 else 502)
except requests.exceptions.RequestException as e:
- status_code = e.response.status_code if e.response is not None else 502
- logging.error(f"Error fetching text content from HF ({hf_path}, owner {owner_user_id}, status: {status_code}): {e}")
- return Response(f"Error loading content ({status_code})", status=status_code)
+ logging.error(f"Network error fetching text content from HF ({hf_path}, owner {owner_user_id}): {e}", exc_info=True)
+ return Response("Ошибка сети при загрузке содержимого", status=502)
except Exception as e:
- logging.exception(f"Unexpected error fetching text content ({file_id}): {e}")
- return Response("Internal server error", status=500)
+ logging.error(f"Unexpected error fetching text content ({hf_path}, owner {owner_user_id}): {e}", exc_info=True)
+ return Response("Внутренняя ошибка сервера при получении текста", status=500)
@app.route('/preview_thumb/')
def preview_thumb_route(file_id):
- # No data loading needed if we assume the URL is constructed correctly client-side
- # Find the file node just to get the hf_path
- try:
- db_data = load_data()
- file_node = None
- owner_user_id = None
-
- for user_id, user_data in db_data.get('users', {}).items():
- if 'filesystem' in user_data:
- node, _ = find_node_by_id(user_data['filesystem'], file_id)
- if node and node.get('type') == 'file' and node.get('file_type') == 'image':
- file_node = node
- owner_user_id = user_id
- break
-
- if not file_node: return Response("Image not found in metadata", status=404)
- hf_path = file_node.get('path')
- if not hf_path: return Response("File path missing in metadata", status=500)
+ file_node, owner_user_id = find_file_globally(file_id)
+
+ if not file_node: return Response("Изображение не найдено", status=404)
+ if file_node.get('file_type') != 'image': return Response("Файл не является изображением", status=415)
+
+ hf_path = file_node.get('path')
+ if not hf_path: return Response("Путь к файлу не найден в метаданных", status=500)
- # Construct URL without download=true for potential browser caching / direct embedding
- file_url = f"https://huggingface.co/datasets/{REPO_ID}/resolve/main/{hf_path}"
+ # Use the direct file URL for previews (not download=true)
+ file_url = f"https://huggingface.co/datasets/{REPO_ID}/resolve/main/{hf_path}"
+ logging.info(f"Serving image preview for file ID {file_id} (Owner: {owner_user_id}) from HF path: {hf_path}")
+
+ try:
headers = {}
if HF_TOKEN_READ: headers["authorization"] = f"Bearer {HF_TOKEN_READ}"
- response = requests.get(file_url, headers=headers, stream=True, timeout=30) # Generous timeout for images
+ # Use stream=True to avoid loading large images entirely into memory server-side
+ response = requests.get(file_url, headers=headers, stream=True, timeout=20)
response.raise_for_status()
- # Stream the image content directly
- return Response(response.iter_content(chunk_size=8192), mimetype=response.headers.get('Content-Type', 'image/jpeg'))
+ content_type = response.headers.get('Content-Type', 'application/octet-stream')
+ # Basic check if it looks like an image type
+ if not content_type.startswith('image/'):
+ logging.warning(f"Content-Type for image preview {file_id} is '{content_type}', expected 'image/'.")
+ # Return potentially incorrect type, or maybe a placeholder? Let's proceed for now.
+
+ return Response(response.iter_content(chunk_size=65536), mimetype=content_type)
except requests.exceptions.Timeout:
- logging.error(f"Timeout fetching preview from HF ({file_id})")
- # Return a 1x1 pixel transparent gif as fallback? Or just error.
- return Response("Timeout loading preview", status=504)
+ logging.error(f"Timeout fetching preview from HF ({hf_path}, owner: {owner_user_id})")
+ return Response("Ошибка: Время ожидания от сервера хранения истекло", status=504)
+ except requests.exceptions.HTTPError as e:
+ status_code = e.response.status_code
+ logging.error(f"HTTP error {status_code} fetching preview from HF ({hf_path}, owner: {owner_user_id}): {e}")
+ # Return appropriate status code, maybe a placeholder image for client errors?
+ return Response(f"Ошибка загрузки превью ({status_code})", status=status_code)
except requests.exceptions.RequestException as e:
- status_code = e.response.status_code if e.response is not None else 502
- logging.error(f"Error fetching preview from HF ({file_id}, status: {status_code}): {e}")
- return Response(f"Error loading preview ({status_code})", status=status_code)
+ logging.error(f"Network error fetching preview from HF ({hf_path}, owner: {owner_user_id}): {e}", exc_info=True)
+ return Response("Ошибка сети при загру��ке превью", status=502)
except Exception as e:
- logging.exception(f"Unexpected error during preview route ({file_id}): {e}")
- return Response("Internal server error loading preview", status=500)
+ logging.error(f"Unexpected error during preview ({hf_path}, owner: {owner_user_id}): {e}", exc_info=True)
+ return Response("Внутренняя ошибка сервера при загрузке превью", status=500)
+# --- Main Execution ---
if __name__ == '__main__':
print("Starting Flask Server...")
if not BOT_TOKEN or BOT_TOKEN == 'YOUR_BOT_TOKEN' or ':' not in BOT_TOKEN:
- print("\n" + "*"*60 +
- "\n CRITICAL: TELEGRAM_BOT_TOKEN is not set correctly." +
- "\n Telegram authentication WILL FAIL. Set the environment variable." +
- "\n" + "*"*60)
+ logging.critical("\n" + "*"*60 +
+ "\n CRITICAL: TELEGRAM_BOT_TOKEN is not set correctly. " +
+ "\n Telegram authentication WILL FAIL. Set the environment variable." +
+ "\n Format should be like '123456:ABC-DEF1234ghij567890'." +
+ "\n" + "*"*60)
if not HF_TOKEN_WRITE:
- print("\n" + "*"*60 +
- "\n WARNING: HF_TOKEN (write access) is not set." +
- "\n File uploads, deletions will FAIL." +
- "\n" + "*"*60)
+ logging.warning("HF_TOKEN (write access) is not set. File uploads and deletions will fail.")
if not HF_TOKEN_READ:
- print("\n" + "*"*60 +
- "\n WARNING: HF_TOKEN_READ is not set (or inherited from HF_TOKEN)." +
- "\n File downloads/previews might fail for private repos if HF_TOKEN is missing." +
- "\n" + "*"*60)
-
+ logging.warning("HF_TOKEN_READ is not set (or same as write token). File downloads/previews might fail for private repos if write token lacks read.")
- # Perform initial load to check/create files if needed, but don't block startup for long
- # load_data() is memoized, so the first request will trigger the real load/download
- print("Server ready. First request will trigger full data load.")
-
- # Use waitress for a more production-ready server than Flask's default
- try:
- from waitress import serve
- print("Running with Waitress server on http://0.0.0.0:7860")
- serve(app, host='0.0.0.0', port=7860, threads=8)
- except ImportError:
- print("Waitress not found, running with Flask development server (not recommended for production).")
- app.run(debug=False, host='0.0.0.0', port=7860)
-
-# --- END OF FILE app (24).py ---
\ No newline at end of file
+ logging.info("Performing initial database sync check with Hugging Face...")
+ download_successful = download_db_from_hf()
+ if download_successful:
+ logging.info("Initial DB sync/download from HF completed successfully.")
+ else:
+ logging.warning("Initial DB download/sync from HF failed or file not found. Will use/create local file.")
+ # Ensure local file exists if download failed and it wasn't there before
+ if not os.path.exists(DATA_FILE):
+ try:
+ with open(DATA_FILE, 'w', encoding='utf-8') as f:
+ json.dump({'users': {}}, f, ensure_ascii=False, indent=4)
+ logging.info(f"Created empty local database file: {DATA_FILE}")
+ except Exception as e:
+ logging.critical(f"CRITICAL: Failed to create initial empty DB file {DATA_FILE}: {e}")
+
+ logging.info("Starting Flask application server on 0.0.0.0:7860...")
+ # Use waitress or gunicorn in production instead of Flask's development server
+ # For development:
+ app.run(debug=False, host='0.0.0.0', port=7860)
+
+# --- END OF FILE ---