Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
do or do not, there is no try
Browse files- resources/app/no_server.py +31 -52
resources/app/no_server.py
CHANGED
|
@@ -57,77 +57,56 @@ except:
|
|
| 57 |
# CPU_ONLY = not torch.cuda.is_available()
|
| 58 |
CPU_ONLY = True
|
| 59 |
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
logger.set_logger_prefix("")
|
| 90 |
-
|
| 91 |
-
except:
|
| 92 |
-
with open("./DEBUG_err_logger.txt", "w+") as f:
|
| 93 |
-
f.write(traceback.format_exc())
|
| 94 |
-
try:
|
| 95 |
-
logger.info(traceback.format_exc())
|
| 96 |
-
except:
|
| 97 |
-
pass
|
| 98 |
|
| 99 |
if CPU_ONLY:
|
| 100 |
torch_dml_device = torch.device("cpu")
|
| 101 |
|
| 102 |
-
|
| 103 |
-
# try:
|
| 104 |
from python.plugins_manager import PluginManager
|
| 105 |
plugin_manager = PluginManager(APP_VERSION, PROD, CPU_ONLY, logger)
|
| 106 |
active_plugins = plugin_manager.get_active_plugins_count()
|
| 107 |
logger.info(f'Plugin manager loaded. {active_plugins} active plugins.')
|
| 108 |
-
# except:
|
| 109 |
-
# logger.info("Plugin manager FAILED.")
|
| 110 |
-
# logger.info(traceback.format_exc())
|
| 111 |
|
| 112 |
plugin_manager.run_plugins(plist=plugin_manager.plugins["start"]["pre"], event="pre start", data=None)
|
| 113 |
|
| 114 |
|
| 115 |
# ======================== Models manager
|
| 116 |
modelsPaths = {}
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
models_manager = ModelsManager(logger, PROD, device="cpu")
|
| 120 |
-
except:
|
| 121 |
-
logger.info("Models manager failed to initialize")
|
| 122 |
-
logger.info(traceback.format_exc())
|
| 123 |
# ========================
|
| 124 |
|
| 125 |
-
|
| 126 |
-
|
| 127 |
print("Models ready")
|
| 128 |
logger.info("Models ready")
|
| 129 |
|
| 130 |
-
|
| 131 |
post_data = ""
|
| 132 |
def loadModel(post_data):
|
| 133 |
req_response = {}
|
|
|
|
| 57 |
# CPU_ONLY = not torch.cuda.is_available()
|
| 58 |
CPU_ONLY = True
|
| 59 |
|
| 60 |
+
logger = logging.getLogger('serverLog')
|
| 61 |
+
logger.setLevel(logging.DEBUG)
|
| 62 |
+
server_log_path = f'{os.path.dirname(os.path.realpath(__file__))}/{"../../../" if PROD else ""}/server.log'
|
| 63 |
+
fh = RotatingFileHandler(server_log_path, maxBytes=2*1024*1024, backupCount=5)
|
| 64 |
+
fh.setLevel(logging.DEBUG)
|
| 65 |
+
ch = logging.StreamHandler()
|
| 66 |
+
ch.setLevel(logging.ERROR)
|
| 67 |
+
formatter = logging.Formatter('%(asctime)s - %(message)s')
|
| 68 |
+
fh.setFormatter(formatter)
|
| 69 |
+
ch.setFormatter(formatter)
|
| 70 |
+
logger.addHandler(fh)
|
| 71 |
+
logger.addHandler(ch)
|
| 72 |
+
logger.info(f'New session. Version: {APP_VERSION}. Installation: {"CPU" if CPU_ONLY else "CPU+GPU"} | Prod: {PROD} | Log path: {server_log_path}')
|
| 73 |
+
|
| 74 |
+
logger.orig_info = logger.info
|
| 75 |
+
|
| 76 |
+
def prefixed_log (msg):
|
| 77 |
+
logger.info(f'{logger.logging_prefix}{msg}')
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def set_logger_prefix (prefix=""):
|
| 81 |
+
if len(prefix):
|
| 82 |
+
logger.logging_prefix = f'[{prefix}]: '
|
| 83 |
+
logger.log = prefixed_log
|
| 84 |
+
else:
|
| 85 |
+
logger.log = logger.orig_info
|
| 86 |
+
|
| 87 |
+
logger.set_logger_prefix = set_logger_prefix
|
| 88 |
+
logger.set_logger_prefix("")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
if CPU_ONLY:
|
| 91 |
torch_dml_device = torch.device("cpu")
|
| 92 |
|
|
|
|
|
|
|
| 93 |
from python.plugins_manager import PluginManager
|
| 94 |
plugin_manager = PluginManager(APP_VERSION, PROD, CPU_ONLY, logger)
|
| 95 |
active_plugins = plugin_manager.get_active_plugins_count()
|
| 96 |
logger.info(f'Plugin manager loaded. {active_plugins} active plugins.')
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
plugin_manager.run_plugins(plist=plugin_manager.plugins["start"]["pre"], event="pre start", data=None)
|
| 99 |
|
| 100 |
|
| 101 |
# ======================== Models manager
|
| 102 |
modelsPaths = {}
|
| 103 |
+
from python.models_manager import ModelsManager
|
| 104 |
+
models_manager = ModelsManager(logger, PROD, device="cpu")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
# ========================
|
| 106 |
|
|
|
|
|
|
|
| 107 |
print("Models ready")
|
| 108 |
logger.info("Models ready")
|
| 109 |
|
|
|
|
| 110 |
post_data = ""
|
| 111 |
def loadModel(post_data):
|
| 112 |
req_response = {}
|